repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
wq13552463699/UCD_UR5E
[ "513acb7e235ab940fd03c3038208678e285690f3" ]
[ "Simulation/reaching_gym/DQN_train.py" ]
[ "from collections import deque\nimport tensorflow as tf\nfrom DQN_agent import DQNAgent\nimport numpy as np\nimport random\nfrom rl_symbol_env import symbol_env\n\n# setting seeds for result reproducibility. This is not super important\nrandom.seed(2212)\nnp.random.seed(2212)\ntf.set_random_seed(2212)\n\n# Hyperparameters / Constants\nEPISODES = 500000\n\nREPLAY_MEMORY_SIZE = 1000000\n#Increased\n\nMINIMUM_REPLAY_MEMORY = 1000\nMINIBATCH_SIZE = 128\n\nEPSILON = 0\nEPSILON_DECAY = 0.999\nMINIMUM_EPSILON = 0.05\nMINIMUM_EPSILON_stage_1 = 0.95\nMINIMUM_EPSILON_stage_2 = 0.05\nDISCOUNT = 0.8\nAWARD_RATIO = 1000\nPUNISH_RATIO = -30\n\n\n\n# Environment details\nenv = symbol_env()\n# env = gym.make(ENV_NAME)\naction_dim = 10\nobservation_dim = (12,)\n\n# creating own session to use across all the Keras/Tensorflow models we are using\nsess = tf.Session()\n\n# Replay memory to store experiances of the model with the environment\nreplay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)\n\n# Our models to solve the mountaincar problem.\nagent = DQNAgent(sess, action_dim, observation_dim)\nagent.model.load_weights('/home/qiang/Pro/10000_agent_.h5')\n\n\ndef train_dqn_agent():\n minibatch = random.sample(replay_memory, MINIBATCH_SIZE)\n X_cur_states = []\n X_next_states = []\n for index, sample in enumerate(minibatch):\n cur_state, action, reward, next_state, done = sample\n X_cur_states.append(cur_state)\n X_next_states.append(next_state)\n\n X_cur_states = np.array(X_cur_states)\n X_next_states = np.array(X_next_states)\n\n # action values for the current_states\n cur_action_values = agent.model.predict(X_cur_states)\n # action values for the next_states taken from our agent (Q network)\n next_action_values = agent.model.predict(X_next_states)\n for index, sample in enumerate(minibatch):\n cur_state, action, reward, next_state, done = sample\n if not done:\n # Q(st, at) = rt + DISCOUNT * max(Q(s(t+1), a(t+1)))\n cur_action_values[index][action] = reward + DISCOUNT * np.amax(next_action_values[index])\n else:\n # Q(st, at) = rt\n cur_action_values[index][action] = reward\n # train the agent with new Q values for the states and the actions\n agent.model.fit(X_cur_states, cur_action_values, verbose=0)\n\nglobal_success_time = 0\nsuccess_time = 0\nmax_reward = -999999\nstep = 0\ndone = 1\nhistory = {\"Episode\":[],\"Success_rate\":[]}\n\nhistory[\"Episode\"].append(1)\nfor episode in range(EPISODES):\n if done == 1:\n cur_state = env.reset()\n if done == 2:\n cur_state = env.reset_o()\n # print(\"Target pos:\", env.target_pos)\n done = 0\n episode_reward = 0\n episode_length = 0\n step += 1\n while not done:\n episode_length += 1\n # step += 1\n # set VISUALIZATION = True if want to see agent while training. But makes training a bit slower.\n # if VISUALIZATION:\n # env.render()\n\n if (np.random.uniform(0, 1) < EPSILON):\n # Take random action\n action = np.random.randint(0, action_dim)\n # print(action)\n else:\n # Take action that maximizes the total reward\n action = np.argmax(agent.model.predict(np.expand_dims(cur_state, axis=0))[0])\n\n # print(\"Action I take\", action)\n next_state, reward, done = env.step(action)\n # if episode_length > 2000:\n print(cur_state,action, reward)\n # print(\"Current state\", next_state)\n # print(\"Reward I got\", reward)\n # print(\"done\", done)\n # print(done)\n\n # print(done)\n # if reward == 0:\n # done = 2\n # print(reward)\n if done == 1:\n # If episode is ended the we have won the game. So, give some large positive reward\n reward = AWARD_RATIO\n success_time += 1\n global_success_time += 1\n if done == 2:\n reward = PUNISH_RATIO\n\n episode_reward += reward\n # # save the model if we are getting maximum score this time\n # if (episode_reward > max_reward):\n # agent.model.save_weights(str(episode_reward) + \"_agent_.h5\")\n #\n # elif done == 2:\n # reward = episode_reward - 250\n # if (episode_reward > max_reward):\n # agent.model.save_weights(str(episode_reward) + \"_agent_.h5\")\n\n # else:\n # # In oher cases reward will be proportional to the distance that car has travelled\n # # from it's previous location + velocity of the car\n # reward = 5 * abs(next_state[0] - cur_state[0]) + 3 * abs(cur_state[1])\n\n # Add experience to replay memory buffer\n replay_memory.append((cur_state, action, reward, next_state, done))\n cur_state = next_state\n \n \n if (len(replay_memory) > MINIMUM_REPLAY_MEMORY):\n train_dqn_agent()\n \n\n if (len(replay_memory) < MINIMUM_REPLAY_MEMORY):\n continue\n \n \n\n \n # print(\"Training\")\n\n if (EPSILON > MINIMUM_EPSILON and len(replay_memory) > MINIMUM_REPLAY_MEMORY):\n EPSILON *= EPSILON_DECAY\n \n # if (episode_length > 100000 and EPSILON > MINIMUM_EPSILON_stage_2):\n # EPSILON *= EPSILON_DECAY\n \n # some bookkeeping.\n avg_reward = episode_reward / episode_length\n \n # if (episode % 10) == 0 and episode != 0:\n # train_dqn_agent()\n \n if (episode % 500) == 0 and episode != 0:\n agent.model.save_weights(str(episode) + \"_agent_.h5\")\n \n # if (avg_reward > max_reward):\n # agent.model.save_weights(str(episode_reward) + \"_agent_.h5\")\n max_reward = max(avg_reward, max_reward)\n\n print('Episode', episode,'| ', 'Episodic Reward', episode_reward,'| ', 'Maximum Reward', max_reward, '| ','End_step',episode_length, '| ','EPSILON', EPSILON)\n print('-------------------------------------------------------------------------------------------------------' )\n\n if episode % 500 == 0:\n print(\"For the last 500 episodes, I successfully reach the target \",success_time,\" times\")\n print(\"For the last \",episode,\" episodes\", \", I successfully reach the target \",global_success_time,\"times\")\n print(\"Success rate: \",global_success_time / step)\n success_time = 0" ]
[ [ "numpy.amax", "numpy.expand_dims", "numpy.random.seed", "tensorflow.Session", "tensorflow.set_random_seed", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
lindehesse/M3d-Cam
[ "c5e709ff7e9a9805333bc1131bb54f252ac0bbd0" ]
[ "medcam/backends/guided_backpropagation.py" ]
[ "import torch\nfrom torch import nn\nfrom medcam.backends.base import _BaseWrapper\n\n\nclass GuidedBackPropagation(_BaseWrapper):\n\n def __init__(self, model, postprocessor=None, retain_graph=False):\n \"\"\"\n \"Striving for Simplicity: the All Convolutional Net\"\n https://arxiv.org/pdf/1412.6806.pdf\n Look at Figure 1 on page 8.\n \"\"\"\n # equivalent to super().__init__(........), so enheriting from _BaseWrapper class\n super(GuidedBackPropagation, self).__init__(model, postprocessor=postprocessor, retain_graph=retain_graph)\n\n def _register_hooks(self):\n \"\"\"Registers the backward hooks to the layers.\"\"\"\n def backward_hook(module, grad_in, grad_out):\n # Cut off negative gradients\n if isinstance(module, nn.ReLU):\n new = torch.clamp(grad_in[0], min=0.0)\n return (new,)\n \n self.remove_hook(forward=True, backward=True)\n for name, module in self.model.named_modules():\n self.registered_hooks[name] = [True, True]\n # Changed from register_backward_hook to register_full_backward_hook\n self.backward_handlers.append(module.register_full_backward_hook(backward_hook))\n \n def get_registered_hooks(self):\n \"\"\"Returns every hook that was able to register to a layer.\"\"\"\n registered_hooks = []\n for layer in self.registered_hooks.keys():\n if self.registered_hooks[layer][0] and self.registered_hooks[layer][1]:\n registered_hooks.append(layer)\n self.remove_hook(forward=True, backward=True)\n return registered_hooks\n\n def forward(self, data):\n \"\"\"Calls the forward() of the base.\"\"\"\n self._register_hooks()\n self.data = data.requires_grad_()\n return super(GuidedBackPropagation, self).forward(self.data)\n\n def generate(self):\n \"\"\"Generates an attention map.\"\"\"\n attention_map = self.data.grad.clone()\n self.data.grad.zero_()\n B, _, *data_shape = attention_map.shape\n #attention_map = attention_map.view(B, self.channels, -1, *data_shape)\n attention_map = attention_map.view(B, 1, -1, *data_shape)\n attention_map = torch.mean(attention_map, dim=2) # TODO: mean or sum?\n attention_map = attention_map.repeat(1, self.output_channels, *[1 for _ in range(self.input_dim)])\n attention_map = self._normalize_per_channel(attention_map)\n attention_map = attention_map.cpu().numpy()\n attention_maps = {}\n attention_maps[\"\"] = attention_map\n return attention_maps\n" ]
[ [ "torch.mean", "torch.clamp" ] ]
isVoid/cudf
[ "1a3b3f217be93a55b47af3a9d0da29f0fcb7c7e9", "1a3b3f217be93a55b47af3a9d0da29f0fcb7c7e9" ]
[ "python/dask_cudf/dask_cudf/core.py", "python/dask_cudf/dask_cudf/tests/test_join.py" ]
[ "# Copyright (c) 2018-2020, NVIDIA CORPORATION.\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\nfrom tlz import partition_all\n\nimport dask\nfrom dask import dataframe as dd\nfrom dask.base import normalize_token, tokenize\nfrom dask.compatibility import apply\nfrom dask.context import _globals\nfrom dask.core import flatten\nfrom dask.dataframe.core import Scalar, handle_out, map_partitions\nfrom dask.dataframe.utils import raise_on_meta_error\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.optimization import cull, fuse\nfrom dask.utils import M, OperatorMethodMixin, derived_from, funcname\n\nimport cudf\nfrom cudf import _lib as libcudf\n\nfrom dask_cudf import sorting\n\nDASK_VERSION = LooseVersion(dask.__version__)\n\n\ndef optimize(dsk, keys, **kwargs):\n flatkeys = list(flatten(keys)) if isinstance(keys, list) else [keys]\n dsk, dependencies = cull(dsk, flatkeys)\n dsk, dependencies = fuse(\n dsk,\n keys,\n dependencies=dependencies,\n ave_width=_globals.get(\"fuse_ave_width\", 1),\n )\n dsk, _ = cull(dsk, keys)\n return dsk\n\n\ndef finalize(results):\n if results and isinstance(\n results[0], (cudf.DataFrame, cudf.Series, cudf.Index, cudf.MultiIndex)\n ):\n return cudf.concat(results)\n return results\n\n\nclass _Frame(dd.core._Frame, OperatorMethodMixin):\n \"\"\" Superclass for DataFrame and Series\n\n Parameters\n ----------\n dsk : dict\n The dask graph to compute this DataFrame\n name : str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame / Series\n meta : cudf.DataFrame, cudf.Series, or cudf.Index\n An empty cudf object with names, dtypes, and indices matching the\n expected output.\n divisions : tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n __dask_scheduler__ = staticmethod(dask.get)\n __dask_optimize__ = staticmethod(optimize)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return type(self), (self._name, self._meta, self.divisions)\n\n def __init__(self, dsk, name, meta, divisions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n meta = dd.core.make_meta(meta)\n if not isinstance(meta, self._partition_type):\n raise TypeError(\n \"Expected meta to specify type {0}, got type \"\n \"{1}\".format(\n self._partition_type.__name__, type(meta).__name__\n )\n )\n self._meta = meta\n self.divisions = tuple(divisions)\n\n def __getstate__(self):\n return (self.dask, self._name, self._meta, self.divisions)\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta, self.divisions = state\n\n def __repr__(self):\n s = \"<dask_cudf.%s | %d tasks | %d npartitions>\"\n return s % (type(self).__name__, len(self.dask), self.npartitions)\n\n def to_dask_dataframe(self, **kwargs):\n \"\"\"Create a dask.dataframe object from a dask_cudf object\"\"\"\n nullable_pd_dtype = kwargs.get(\"nullable_pd_dtype\", False)\n return self.map_partitions(\n M.to_pandas, nullable_pd_dtype=nullable_pd_dtype\n )\n\n\nconcat = dd.concat\n\n\nnormalize_token.register(_Frame, lambda a: a._name)\n\n\nclass DataFrame(_Frame, dd.core.DataFrame):\n _partition_type = cudf.DataFrame\n\n def _assign_column(self, k, v):\n def assigner(df, k, v):\n out = df.copy()\n out[k] = v\n return out\n\n meta = assigner(self._meta, k, dd.core.make_meta(v))\n return self.map_partitions(assigner, k, v, meta=meta)\n\n def apply_rows(self, func, incols, outcols, kwargs={}, cache_key=None):\n import uuid\n\n if cache_key is None:\n cache_key = uuid.uuid4()\n\n def do_apply_rows(df, func, incols, outcols, kwargs):\n return df.apply_rows(\n func, incols, outcols, kwargs, cache_key=cache_key\n )\n\n meta = do_apply_rows(self._meta, func, incols, outcols, kwargs)\n return self.map_partitions(\n do_apply_rows, func, incols, outcols, kwargs, meta=meta\n )\n\n def merge(self, other, **kwargs):\n if kwargs.pop(\"shuffle\", \"tasks\") != \"tasks\":\n raise ValueError(\n \"Dask-cudf only supports task based shuffling, got %s\"\n % kwargs[\"shuffle\"]\n )\n on = kwargs.pop(\"on\", None)\n if isinstance(on, tuple):\n on = list(on)\n return super().merge(other, on=on, shuffle=\"tasks\", **kwargs)\n\n def join(self, other, **kwargs):\n if kwargs.pop(\"shuffle\", \"tasks\") != \"tasks\":\n raise ValueError(\n \"Dask-cudf only supports task based shuffling, got %s\"\n % kwargs[\"shuffle\"]\n )\n\n # CuDF doesn't support \"right\" join yet\n how = kwargs.pop(\"how\", \"left\")\n if how == \"right\":\n return other.join(other=self, how=\"left\", **kwargs)\n\n on = kwargs.pop(\"on\", None)\n if isinstance(on, tuple):\n on = list(on)\n return super().join(other, how=how, on=on, shuffle=\"tasks\", **kwargs)\n\n def set_index(self, other, sorted=False, divisions=None, **kwargs):\n if kwargs.pop(\"shuffle\", \"tasks\") != \"tasks\":\n raise ValueError(\n \"Dask-cudf only supports task based shuffling, got %s\"\n % kwargs[\"shuffle\"]\n )\n pre_sorted = sorted\n del sorted\n\n if (\n divisions == \"quantile\"\n or isinstance(divisions, (cudf.DataFrame, cudf.Series))\n or (\n isinstance(other, str)\n and cudf.utils.dtypes.is_string_dtype(self[other].dtype)\n )\n ):\n\n # Let upstream-dask handle \"pre-sorted\" case\n if pre_sorted:\n return dd.shuffle.set_sorted_index(\n self, other, divisions=divisions, **kwargs\n )\n\n by = other\n if not isinstance(other, list):\n by = [by]\n if len(by) > 1:\n raise ValueError(\"Dask does not support MultiIndex (yet).\")\n if divisions == \"quantile\":\n divisions = None\n\n # Use dask_cudf's sort_values\n # TODO: Handle `sorted=True`\n df = self.sort_values(\n by,\n max_branch=kwargs.get(\"max_branch\", None),\n divisions=divisions,\n set_divisions=True,\n ignore_index=True,\n )\n\n # Ignore divisions if its a dataframe\n if isinstance(divisions, cudf.DataFrame):\n divisions = None\n\n # Set index and repartition\n df2 = df.map_partitions(\n sorting.set_index_post,\n index_name=other,\n drop=kwargs.get(\"drop\", True),\n column_dtype=df.columns.dtype,\n )\n npartitions = kwargs.get(\"npartitions\", self.npartitions)\n partition_size = kwargs.get(\"partition_size\", None)\n if partition_size:\n return df2.repartition(partition_size=partition_size)\n if not divisions and df2.npartitions != npartitions:\n return df2.repartition(npartitions=npartitions)\n if divisions and df2.npartitions != len(divisions) - 1:\n return df2.repartition(divisions=divisions)\n return df2\n\n return super().set_index(\n other,\n sorted=pre_sorted,\n shuffle=\"tasks\",\n divisions=divisions,\n **kwargs,\n )\n\n def sort_values(\n self,\n by,\n ignore_index=False,\n max_branch=None,\n divisions=None,\n set_divisions=False,\n **kwargs,\n ):\n if self.npartitions == 1:\n df = self.map_partitions(M.sort_values, by)\n else:\n df = sorting.sort_values(\n self,\n by,\n max_branch=max_branch,\n divisions=divisions,\n set_divisions=set_divisions,\n ignore_index=ignore_index,\n )\n\n if ignore_index:\n return df.reset_index(drop=True)\n return df\n\n def to_parquet(self, path, *args, **kwargs):\n \"\"\" Calls dask.dataframe.io.to_parquet with CudfEngine backend \"\"\"\n from dask_cudf.io import to_parquet\n\n return to_parquet(self, path, *args, **kwargs)\n\n def to_orc(self, path, **kwargs):\n \"\"\" Calls dask_cudf.io.to_orc \"\"\"\n from dask_cudf.io import to_orc\n\n return to_orc(self, path, **kwargs)\n\n @derived_from(pd.DataFrame)\n def var(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n ):\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.var(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.var,\n self,\n meta=meta,\n token=self._token_prefix + \"var\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n )\n return handle_out(out, result)\n\n else:\n num = self._get_numeric_data()\n x = 1.0 * num.sum(skipna=skipna, split_every=split_every)\n x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"var\"\n result = map_partitions(\n var_aggregate, x2, x, n, token=name, meta=meta, ddof=ddof\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return handle_out(out, result)\n\n def repartition(self, *args, **kwargs):\n \"\"\" Wraps dask.dataframe DataFrame.repartition method.\n Uses DataFrame.shuffle if `columns=` is specified.\n \"\"\"\n columns = kwargs.pop(\"columns\", None)\n if columns:\n warnings.warn(\n \"The column argument will be removed from repartition in \"\n \" future versions of dask_cudf. Use DataFrame.shuffle().\",\n DeprecationWarning,\n )\n warnings.warn(\n \"Rearranging data by column hash. Divisions will lost. \"\n \"Set ignore_index=False to preserve Index values.\"\n )\n ignore_index = kwargs.pop(\"ignore_index\", True)\n return self.shuffle(\n on=columns, ignore_index=ignore_index, **kwargs\n )\n return super().repartition(*args, **kwargs)\n\n def shuffle(self, *args, **kwargs):\n \"\"\" Wraps dask.dataframe DataFrame.shuffle method\n \"\"\"\n shuffle_arg = kwargs.pop(\"shuffle\", None)\n if shuffle_arg and shuffle_arg != \"tasks\":\n raise ValueError(\"dask_cudf does not support disk-based shuffle.\")\n return super().shuffle(*args, shuffle=\"tasks\", **kwargs)\n\n\ndef sum_of_squares(x):\n x = x.astype(\"f8\")._column\n outcol = libcudf.reduce.reduce(\"sum_of_squares\", x)\n return cudf.Series(outcol)\n\n\ndef var_aggregate(x2, x, n, ddof):\n try:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n result = (x2 / n) - (x / n) ** 2\n if ddof != 0:\n result = result * n / (n - ddof)\n return result\n except ZeroDivisionError:\n return np.float64(np.nan)\n\n\ndef nlargest_agg(x, **kwargs):\n return cudf.concat(x).nlargest(**kwargs)\n\n\ndef nsmallest_agg(x, **kwargs):\n return cudf.concat(x).nsmallest(**kwargs)\n\n\nclass Series(_Frame, dd.core.Series):\n _partition_type = cudf.Series\n\n def count(self, split_every=False):\n return reduction(\n self,\n chunk=M.count,\n aggregate=np.sum,\n split_every=split_every,\n meta=\"i8\",\n )\n\n def mean(self, split_every=False):\n sum = self.sum(split_every=split_every)\n n = self.count(split_every=split_every)\n return sum / n\n\n @derived_from(pd.DataFrame)\n def var(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n ):\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.var(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.var,\n self,\n meta=meta,\n token=self._token_prefix + \"var\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n )\n return handle_out(out, result)\n\n else:\n num = self._get_numeric_data()\n x = 1.0 * num.sum(skipna=skipna, split_every=split_every)\n x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"var\"\n result = map_partitions(\n var_aggregate, x2, x, n, token=name, meta=meta, ddof=ddof\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return handle_out(out, result)\n\n\nclass Index(Series, dd.core.Index):\n _partition_type = cudf.Index\n\n\ndef _extract_meta(x):\n \"\"\"\n Extract internal cache data (``_meta``) from dask_cudf objects\n \"\"\"\n if isinstance(x, (Scalar, _Frame)):\n return x._meta\n elif isinstance(x, list):\n return [_extract_meta(_x) for _x in x]\n elif isinstance(x, tuple):\n return tuple([_extract_meta(_x) for _x in x])\n elif isinstance(x, dict):\n return {k: _extract_meta(v) for k, v in x.items()}\n return x\n\n\ndef _emulate(func, *args, **kwargs):\n \"\"\"\n Apply a function using args / kwargs. If arguments contain dd.DataFrame /\n dd.Series, using internal cache (``_meta``) for calculation\n \"\"\"\n with raise_on_meta_error(funcname(func)):\n return func(*_extract_meta(args), **_extract_meta(kwargs))\n\n\ndef align_partitions(args):\n \"\"\"Align partitions between dask_cudf objects.\n\n Note that if all divisions are unknown, but have equal npartitions, then\n they will be passed through unchanged.\"\"\"\n dfs = [df for df in args if isinstance(df, _Frame)]\n if not dfs:\n return args\n\n divisions = dfs[0].divisions\n if not all(df.divisions == divisions for df in dfs):\n raise NotImplementedError(\"Aligning mismatched partitions\")\n return args\n\n\ndef reduction(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=None,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n **kwargs,\n):\n \"\"\"Generic tree reduction operation.\n\n Parameters\n ----------\n args :\n Positional arguments for the `chunk` function. All `dask.dataframe`\n objects should be partitioned and indexed equivalently.\n chunk : function [block-per-arg] -> block\n Function to operate on each block of data\n aggregate : function list-of-blocks -> block\n Function to operate on the list of results of chunk\n combine : function list-of-blocks -> block, optional\n Function to operate on intermediate lists of results of chunk\n in a tree-reduction. If not provided, defaults to aggregate.\n $META\n token : str, optional\n The name to use for the output keys.\n chunk_kwargs : dict, optional\n Keywords for the chunk function only.\n aggregate_kwargs : dict, optional\n Keywords for the aggregate function only.\n combine_kwargs : dict, optional\n Keywords for the combine function only.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to ``aggregate``.\n Default is 8.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``aggregate``, and\n ``combine``.\n \"\"\"\n if chunk_kwargs is None:\n chunk_kwargs = dict()\n if aggregate_kwargs is None:\n aggregate_kwargs = dict()\n chunk_kwargs.update(kwargs)\n aggregate_kwargs.update(kwargs)\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n else:\n if combine_kwargs is None:\n combine_kwargs = dict()\n combine_kwargs.update(kwargs)\n\n if not isinstance(args, (tuple, list)):\n args = [args]\n\n npartitions = set(\n arg.npartitions for arg in args if isinstance(arg, _Frame)\n )\n if len(npartitions) > 1:\n raise ValueError(\"All arguments must have same number of partitions\")\n npartitions = npartitions.pop()\n\n if split_every is None:\n split_every = 8\n elif split_every is False:\n split_every = npartitions\n elif split_every < 2 or not isinstance(split_every, int):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token_key = tokenize(\n token or (chunk, aggregate),\n meta,\n args,\n chunk_kwargs,\n aggregate_kwargs,\n combine_kwargs,\n split_every,\n )\n\n # Chunk\n a = \"{0}-chunk-{1}\".format(token or funcname(chunk), token_key)\n if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:\n dsk = {\n (a, 0, i): (chunk, key)\n for i, key in enumerate(args[0].__dask_keys__())\n }\n else:\n dsk = {\n (a, 0, i): (\n apply,\n chunk,\n [(x._name, i) if isinstance(x, _Frame) else x for x in args],\n chunk_kwargs,\n )\n for i in range(args[0].npartitions)\n }\n\n # Combine\n b = \"{0}-combine-{1}\".format(token or funcname(combine), token_key)\n k = npartitions\n depth = 0\n while k > split_every:\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n conc = (list, [(a, depth, i) for i in inds])\n dsk[(b, depth + 1, part_i)] = (\n (apply, combine, [conc], combine_kwargs)\n if combine_kwargs\n else (combine, conc)\n )\n k = part_i + 1\n a = b\n depth += 1\n\n # Aggregate\n b = \"{0}-agg-{1}\".format(token or funcname(aggregate), token_key)\n conc = (list, [(a, depth, i) for i in range(k)])\n if aggregate_kwargs:\n dsk[(b, 0)] = (apply, aggregate, [conc], aggregate_kwargs)\n else:\n dsk[(b, 0)] = (aggregate, conc)\n\n if meta is None:\n meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)\n meta = _emulate(apply, aggregate, [[meta_chunk]], aggregate_kwargs)\n meta = dd.core.make_meta(meta)\n\n for arg in args:\n if isinstance(arg, _Frame):\n dsk.update(arg.dask)\n\n return dd.core.new_dd_object(dsk, b, meta, (None, None))\n\n\ndef from_cudf(data, npartitions=None, chunksize=None, sort=True, name=None):\n if isinstance(getattr(data, \"index\", None), cudf.MultiIndex):\n raise NotImplementedError(\n \"dask_cudf does not support MultiIndex Dataframes.\"\n )\n\n name = name or (\"from_cudf-\" + tokenize(data, npartitions or chunksize))\n return dd.from_pandas(\n data,\n npartitions=npartitions,\n chunksize=chunksize,\n sort=sort,\n name=name,\n )\n\n\nfrom_cudf.__doc__ = (\n \"Wraps main-line Dask from_pandas...\\n\" + dd.from_pandas.__doc__\n)\n\n\ndef from_dask_dataframe(df):\n return df.map_partitions(cudf.from_pandas)\n\n\nfor name in [\n \"add\",\n \"sub\",\n \"mul\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"radd\",\n \"rsub\",\n \"rmul\",\n \"rtruediv\",\n \"rfloordiv\",\n \"rmod\",\n \"rpow\",\n]:\n meth = getattr(cudf.DataFrame, name)\n kwargs = {\"original\": cudf.DataFrame} if DASK_VERSION >= \"2.11.1\" else {}\n DataFrame._bind_operator_method(name, meth, **kwargs)\n\n meth = getattr(cudf.Series, name)\n kwargs = {\"original\": cudf.Series} if DASK_VERSION >= \"2.11.1\" else {}\n Series._bind_operator_method(name, meth, **kwargs)\n\nfor name in [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"]:\n meth = getattr(cudf.Series, name)\n kwargs = {\"original\": cudf.Series} if DASK_VERSION >= \"2.11.1\" else {}\n Series._bind_comparison_method(name, meth, **kwargs)\n", "from functools import partial\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom dask import dataframe as dd\n\nimport dask_cudf as dgd\n\nimport cudf\n\nparam_nrows = [5, 10, 50, 100]\n\n\n@pytest.mark.parametrize(\"left_nrows\", param_nrows)\n@pytest.mark.parametrize(\"right_nrows\", param_nrows)\n@pytest.mark.parametrize(\"left_nkeys\", [4, 5])\n@pytest.mark.parametrize(\"right_nkeys\", [4, 5])\ndef test_join_inner(left_nrows, right_nrows, left_nkeys, right_nkeys):\n chunksize = 50\n\n np.random.seed(0)\n\n # cuDF\n left = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, left_nkeys, size=left_nrows),\n \"a\": np.arange(left_nrows),\n }\n )\n right = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, right_nkeys, size=right_nrows),\n \"a\": 1000 * np.arange(right_nrows),\n }\n )\n\n expect = left.set_index(\"x\").join(\n right.set_index(\"x\"), how=\"inner\", sort=True, lsuffix=\"l\", rsuffix=\"r\"\n )\n expect = expect.to_pandas()\n\n # dask_cudf\n left = dgd.from_cudf(left, chunksize=chunksize)\n right = dgd.from_cudf(right, chunksize=chunksize)\n\n joined = left.set_index(\"x\").join(\n right.set_index(\"x\"), how=\"inner\", lsuffix=\"l\", rsuffix=\"r\"\n )\n got = joined.compute().to_pandas()\n\n if len(got.columns):\n got = got.sort_values(list(got.columns))\n expect = expect.sort_values(list(expect.columns))\n\n # Check index\n np.testing.assert_array_equal(expect.index.values, got.index.values)\n\n # Check rows in each groups\n expect_rows = {}\n got_rows = {}\n\n def gather(df, grows):\n grows[df[\"x\"].values[0]] = (set(df.al), set(df.ar))\n\n expect.reset_index().groupby(\"x\").apply(partial(gather, grows=expect_rows))\n expect.reset_index().groupby(\"x\").apply(partial(gather, grows=got_rows))\n\n assert got_rows == expect_rows\n\n\n@pytest.mark.parametrize(\"left_nrows\", param_nrows)\n@pytest.mark.parametrize(\"right_nrows\", param_nrows)\n@pytest.mark.parametrize(\"left_nkeys\", [4, 5])\n@pytest.mark.parametrize(\"right_nkeys\", [4, 5])\n@pytest.mark.parametrize(\"how\", [\"left\", \"right\"])\ndef test_join_left(left_nrows, right_nrows, left_nkeys, right_nkeys, how):\n chunksize = 50\n\n np.random.seed(0)\n\n # cuDF\n left = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, left_nkeys, size=left_nrows),\n \"a\": np.arange(left_nrows, dtype=np.float64),\n }\n )\n right = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, right_nkeys, size=right_nrows),\n \"a\": 1000 * np.arange(right_nrows, dtype=np.float64),\n }\n )\n\n expect = left.set_index(\"x\").join(\n right.set_index(\"x\"), how=how, sort=True, lsuffix=\"l\", rsuffix=\"r\"\n )\n expect = expect.to_pandas()\n\n # dask_cudf\n left = dgd.from_cudf(left, chunksize=chunksize)\n right = dgd.from_cudf(right, chunksize=chunksize)\n\n joined = left.set_index(\"x\").join(\n right.set_index(\"x\"), how=how, lsuffix=\"l\", rsuffix=\"r\"\n )\n got = joined.compute().to_pandas()\n\n if len(got.columns):\n got = got.sort_values(list(got.columns))\n expect = expect.sort_values(list(expect.columns))\n\n # Check index\n np.testing.assert_array_equal(expect.index.values, got.index.values)\n\n # Check rows in each groups\n expect_rows = {}\n got_rows = {}\n\n def gather(df, grows):\n cola = np.sort(np.asarray(df.al))\n colb = np.sort(np.asarray(df.ar))\n\n grows[df[\"x\"].values[0]] = (cola, colb)\n\n expect.reset_index().groupby(\"x\").apply(partial(gather, grows=expect_rows))\n\n expect.reset_index().groupby(\"x\").apply(partial(gather, grows=got_rows))\n\n for k in expect_rows:\n np.testing.assert_array_equal(expect_rows[k][0], got_rows[k][0])\n np.testing.assert_array_equal(expect_rows[k][1], got_rows[k][1])\n\n\n@pytest.mark.parametrize(\"left_nrows\", param_nrows)\n@pytest.mark.parametrize(\"right_nrows\", param_nrows)\n@pytest.mark.parametrize(\"left_nkeys\", [4, 5])\n@pytest.mark.parametrize(\"right_nkeys\", [4, 5])\ndef test_merge_left(\n left_nrows, right_nrows, left_nkeys, right_nkeys, how=\"left\"\n):\n chunksize = 3\n\n np.random.seed(0)\n\n # cuDF\n left = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, left_nkeys, size=left_nrows),\n \"y\": np.random.randint(0, left_nkeys, size=left_nrows),\n \"a\": np.arange(left_nrows, dtype=np.float64),\n }\n )\n right = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, right_nkeys, size=right_nrows),\n \"y\": np.random.randint(0, right_nkeys, size=right_nrows),\n \"a\": 1000 * np.arange(right_nrows, dtype=np.float64),\n }\n )\n\n expect = left.merge(right, on=(\"x\", \"y\"), how=how)\n\n def normalize(df):\n return (\n df.to_pandas()\n .sort_values([\"x\", \"y\", \"a_x\", \"a_y\"])\n .reset_index(drop=True)\n )\n\n # dask_cudf\n left = dgd.from_cudf(left, chunksize=chunksize)\n right = dgd.from_cudf(right, chunksize=chunksize)\n\n result = left.merge(right, on=(\"x\", \"y\"), how=how).compute(\n scheduler=\"single-threaded\"\n )\n\n dd.assert_eq(normalize(expect), normalize(result))\n\n\n@pytest.mark.parametrize(\"left_nrows\", [2, 5])\n@pytest.mark.parametrize(\"right_nrows\", [5, 10])\n@pytest.mark.parametrize(\"left_nkeys\", [4])\n@pytest.mark.parametrize(\"right_nkeys\", [4])\ndef test_merge_1col_left(\n left_nrows, right_nrows, left_nkeys, right_nkeys, how=\"left\"\n):\n chunksize = 3\n\n np.random.seed(0)\n\n # cuDF\n left = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, left_nkeys, size=left_nrows),\n \"a\": np.arange(left_nrows, dtype=np.float64),\n }\n )\n right = cudf.DataFrame(\n {\n \"x\": np.random.randint(0, right_nkeys, size=right_nrows),\n \"a\": 1000 * np.arange(right_nrows, dtype=np.float64),\n }\n )\n\n expect = left.merge(right, on=[\"x\"], how=how)\n expect = (\n expect.to_pandas()\n .sort_values([\"x\", \"a_x\", \"a_y\"])\n .reset_index(drop=True)\n )\n\n # dask_cudf\n left = dgd.from_cudf(left, chunksize=chunksize)\n right = dgd.from_cudf(right, chunksize=chunksize)\n\n joined = left.merge(right, on=[\"x\"], how=how)\n\n got = joined.compute().to_pandas()\n\n got = got.sort_values([\"x\", \"a_x\", \"a_y\"]).reset_index(drop=True)\n\n dd.assert_eq(expect, got)\n\n\ndef test_merge_should_fail():\n # Expected failure cases described in #2694\n df1 = cudf.DataFrame()\n df1[\"a\"] = [1, 2, 3, 4, 5, 6] * 2\n df1[\"b\"] = np.random.randint(0, 12, 12)\n\n df2 = cudf.DataFrame()\n df2[\"a\"] = [7, 2, 3, 8, 5, 9] * 2\n df2[\"c\"] = np.random.randint(0, 12, 12)\n\n left = dgd.from_cudf(df1, 1).groupby(\"a\").b.min().to_frame()\n right = dgd.from_cudf(df2, 1).groupby(\"a\").c.min().to_frame()\n\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=[\"nonCol\"])\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=[\"b\"])\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=[\"c\"])\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=[\"a\"])\n\n # Same column names\n df2[\"b\"] = np.random.randint(0, 12, 12)\n right = dgd.from_cudf(df2, 1).groupby(\"a\").b.min().to_frame()\n\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=\"NonCol\")\n with pytest.raises(KeyError):\n left.merge(right, how=\"left\", on=\"a\")\n\n\n@pytest.mark.parametrize(\"how\", [\"inner\", \"left\"])\ndef test_indexed_join(how):\n p_left = pd.DataFrame({\"x\": np.arange(10)}, index=np.arange(10) * 2)\n p_right = pd.DataFrame({\"y\": 1}, index=np.arange(15))\n\n g_left = cudf.from_pandas(p_left)\n g_right = cudf.from_pandas(p_right)\n\n dg_left = dd.from_pandas(g_left, npartitions=4)\n dg_right = dd.from_pandas(g_right, npartitions=5)\n\n d = g_left.merge(g_right, left_index=True, right_index=True, how=how)\n dg = dg_left.merge(dg_right, left_index=True, right_index=True, how=how)\n\n # occassionally order is not correct (possibly do to hashing in the merge)\n d = d.sort_values(\"x\") # index is preserved\n dg = dg.sort_values(\n \"x\"\n ) # index is reset -- sort_values will slow test down\n\n dd.assert_eq(d, dg, check_index=False)\n\n\n@pytest.mark.parametrize(\"how\", [\"left\", \"inner\"])\ndef test_how(how):\n left = cudf.DataFrame(\n {\"x\": [1, 2, 3, 4, None], \"y\": [1.0, 2.0, 3.0, 4.0, 0.0]}\n )\n right = cudf.DataFrame({\"x\": [2, 3, None, 2], \"y\": [20, 30, 0, 20]})\n\n dleft = dd.from_pandas(left, npartitions=2)\n dright = dd.from_pandas(right, npartitions=3)\n\n expected = left.merge(right, how=how, on=\"x\")\n result = dleft.merge(dright, how=how, on=\"x\")\n\n dd.assert_eq(\n result.compute().to_pandas().sort_values(\"x\"),\n expected.to_pandas().sort_values(\"x\"),\n check_index=False,\n )\n\n\n@pytest.mark.parametrize(\"daskify\", [True, False])\ndef test_single_dataframe_merge(daskify):\n right = cudf.DataFrame({\"x\": [1, 2, 1, 2], \"y\": [1, 2, 3, 4]})\n left = cudf.DataFrame({\"x\": np.arange(100) % 10, \"z\": np.arange(100)})\n\n dleft = dd.from_pandas(left, npartitions=10)\n\n if daskify:\n dright = dd.from_pandas(right, npartitions=1)\n else:\n dright = right\n\n expected = left.merge(right, how=\"inner\")\n result = dd.merge(dleft, dright, how=\"inner\")\n assert len(result.dask) < 25\n\n dd.assert_eq(\n result.compute().to_pandas().sort_values([\"z\", \"y\"]),\n expected.to_pandas().sort_values([\"z\", \"y\"]),\n check_index=False,\n )\n\n\n@pytest.mark.parametrize(\"how\", [\"inner\", \"left\"])\n@pytest.mark.parametrize(\"on\", [\"id_1\", [\"id_1\"], [\"id_1\", \"id_2\"]])\ndef test_on(how, on):\n left = cudf.DataFrame(\n {\"id_1\": [1, 2, 3, 4, 5], \"id_2\": [1.0, 2.0, 3.0, 4.0, 0.0]}\n )\n right = cudf.DataFrame(\n {\"id_1\": [2, 3, None, 2], \"id_2\": [2.0, 3.0, 4.0, 20]}\n )\n\n dleft = dd.from_pandas(left, npartitions=2)\n dright = dd.from_pandas(right, npartitions=3)\n\n expected = left.merge(right, how=how, on=on)\n result = dleft.merge(dright, how=how, on=on)\n\n dd.assert_eq(\n result.compute().to_pandas().sort_values(on),\n expected.to_pandas().sort_values(on),\n check_index=False,\n )\n\n\ndef test_single_partition():\n left = cudf.DataFrame({\"x\": range(200), \"y\": range(200)})\n right = cudf.DataFrame({\"x\": range(100), \"z\": range(100)})\n\n dleft = dd.from_pandas(left, npartitions=1)\n dright = dd.from_pandas(right, npartitions=10)\n\n m = dleft.merge(dright, how=\"inner\")\n assert len(m.dask) < len(dleft.dask) + len(dright.dask) * 3\n\n dleft = dd.from_pandas(left, npartitions=5)\n m2 = dleft.merge(right, how=\"inner\")\n assert len(m2.dask) < len(dleft.dask) * 3\n assert len(m2) == 100\n" ]
[ [ "numpy.float64" ], [ "numpy.random.seed", "numpy.asarray", "numpy.arange", "numpy.testing.assert_array_equal", "numpy.random.randint" ] ]
Daniel-Vital/magicroot
[ "5dba2fa23d8933946af1c2e4244f0e18e455721f" ]
[ "src/magicroot/df/create.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\ndef empty(shape, *args, **kwargs):\n canvas = np.empty(shape)\n canvas[:] = np.NaN\n return pd.DataFrame(canvas, *args, **kwargs)\n\n\ndef const_col(df, const, *args, **kwargs):\n s = pd.Series(np.ones(len(df)) * const, *args, **kwargs)\n s.index = df.index\n return s\n" ]
[ [ "numpy.empty", "pandas.DataFrame" ] ]
G-Wang/Pytorch-Voice-Loop
[ "9d2d5fdebc2e5be06e8a2809e731a8ea378330ea" ]
[ "test.py" ]
[ "import torch\nfrom model import Loop\nfrom hparams import Hparams\nfrom utils import VCTKDataSet, my_collate_fn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\ndef no_test_train():\n hp = Hparams()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = Loop(hp, device)\n optim = torch.optim.Adam(model.parameters(), lr=1e-4)\n print(\"model has {} million parameters\".format(model.count_parameters()))\n\n\n dataset = VCTKDataSet(\"data/vctk/numpy_features_valid/\")\n\n loader = DataLoader(dataset, shuffle=False, batch_size=10, drop_last=False, collate_fn = my_collate_fn)\n\n for data in tqdm(loader):\n text, text_list, target, target_list, spkr = data\n # compute loss\n loss = model.compute_loss_batch((text, text_list), spkr, (target, target_list), teacher_forcing=True)\n print(loss.detach().cpu().numpy())\n\n\ndef test_model():\n hp = Hparams()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = Loop(hp, device)\n optim = torch.optim.Adam(model.parameters(), lr=1e-4)\n print(\"model has {} million parameters\".format(model.count_parameters()))\n dataset = VCTKDataSet(\"data/vctk/numpy_features_valid/\")\n loader = DataLoader(dataset, shuffle=False, batch_size=10, drop_last=False, collate_fn = my_collate_fn)\n\n for data in tqdm(loader):\n text, text_list, target, target_list, spkr = data\n loss = model.compute_loss_batch((text, text_list), spkr, (target, target_list))\n print(loss.detach().cpu().numpy())\n # forward pass through encoding\n #p_out, s_out = model.encoder.forward((text, text_list), spkr)\n #print(p_out.shape, s_out.shape)\n \n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.is_available" ] ]
moinnadeem/apex
[ "f5bdd8e4e2a8d4c31c3b2bfe4662953d6903da2a" ]
[ "tests/L0/run_transformer/test_transformer_module.py" ]
[ "from typing import Tuple\nimport os\nimport subprocess\nimport sys\nimport unittest\n\n\nSEVERALGPU_TEST = [\n \"bert_minimal_test\",\n \"gpt_minimal_test\",\n \"dynamic_batchsize_test\",\n]\n\n\ndef get_multigpu_launch_option(min_gpu):\n should_skip = False\n import torch\n\n num_devices = torch.cuda.device_count()\n if num_devices < min_gpu:\n should_skip = True\n distributed_run_options = f\"-m torch.distributed.run --nproc_per_node={num_devices}\"\n return should_skip, distributed_run_options\n\n\ndef get_launch_option(test_filename) -> Tuple[bool, str]:\n should_skip = False\n for severalgpu_test in SEVERALGPU_TEST:\n if severalgpu_test in test_filename:\n return get_multigpu_launch_option(3)\n return should_skip, \"\"\n\n\ndef run_transformer_tests():\n python_executable_path = sys.executable\n # repository_root = os.path.join(os.path.dirname(__file__), \"../../../\")\n # directory = os.path.abspath(os.path.join(repository_root, \"tests/mpu\"))\n directory = os.path.dirname(__file__)\n files = [\n os.path.join(directory, f)\n for f in os.listdir(directory)\n if f.startswith(\"run_\") and os.path.isfile(os.path.join(directory, f))\n ]\n print(\"#######################################################\")\n print(f\"# Python executable path: {python_executable_path}\")\n print(f\"# {len(files)} tests: {files}\")\n print(\"#######################################################\")\n errors = []\n for i, test_file in enumerate(files, 1):\n is_denied = False\n should_skip, launch_option = get_launch_option(test_file)\n if should_skip:\n print(\n f\"### {i} / {len(files)}: {test_file} skipped. Requires multiple GPUs.\"\n )\n continue\n test_run_cmd = (\n f\"{python_executable_path} {launch_option} {test_file} \"\n \"--micro-batch-size 2 --num-layers 16 --hidden-size 256 --num-attention-heads 8 --max-position-embeddings \"\n \"512 --seq-length 512 --global-batch-size 128\"\n )\n if \"bert\" in test_file or \"gpt\" in test_file:\n import torch\n\n num_devices = torch.cuda.device_count()\n tensor_model_parallel_size = 1 + (1 - (num_devices % 2 and num_devices > 4))\n pipeline_model_parallel_size = num_devices // tensor_model_parallel_size\n test_run_cmd += f\" --pipeline-model-parallel-size {pipeline_model_parallel_size} --tensor-model-parallel-size {tensor_model_parallel_size}\"\n else:\n test_run_cmd += f\" --use-cpu-initialization\"\n print(f\"### {i} / {len(files)}: cmd: {test_run_cmd}\")\n try:\n output = (\n subprocess.check_output(test_run_cmd, shell=True)\n .decode(sys.stdout.encoding)\n .strip()\n )\n except Exception as e:\n errors.append((test_file, str(e)))\n else:\n if \">> passed the test :-)\" not in output:\n errors.append((test_file, output))\n else:\n if not errors:\n print(\"### PASSED\")\n else:\n print(\"### FAILED\")\n short_msg = f\"{len(errors)} out of {len(files)} tests failed\"\n print(short_msg)\n for (filename, log) in errors:\n print(f\"File: {filename}\\nLog: {log}\")\n raise RuntimeError(short_msg)\n\n\nclass TestTransformer(unittest.TestCase):\n def test_transformer(self):\n run_transformer_tests()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.cuda.device_count" ] ]
Crazyalltnt/RL-Alogorithms-Implement
[ "27905f1c1890b1aff907564230b4ec0c22e60ba0" ]
[ "Independent/DDPG/env.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n'''\n@Author: John\n@Email: johnjim0816@gmail.com\n@Date: 2020-06-10 15:28:30\n@LastEditor: John\nLastEditTime: 2021-09-16 00:52:30\n@Discription: \n@Environment: python 3.7.7\n'''\n\nimport gym\nimport numpy as np\n\n\nclass NormalizedActions(gym.ActionWrapper):\n ''' 将 action 范围重新映射\n '''\n\n def action(self, action):\n '''将输入给 env.step(action) 的 action 进行重新修改,从 tanh 的 [-1,1] 映射到环境 [-2,2]\n '''\n low_bound = self.action_space.low\n upper_bound = self.action_space.high\n action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound)\n action = np.clip(action, low_bound, upper_bound)\n return action\n\n def reverse_action(self, action):\n '''将 action 进行重新修改,从 环境 [-2,2] 映射到 [-1,1] (暂未用到)\n '''\n low_bound = self.action_space.low\n upper_bound = self.action_space.high\n action = 2 * (action - low_bound) / (upper_bound - low_bound) - 1\n action = np.clip(action, low_bound, upper_bound)\n return action\n\n\nclass OUNoise(object):\n '''Ornstein–Uhlenbeck噪声\n '''\n\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):\n self.mu = mu # OU噪声的参数\n self.theta = theta # OU噪声的参数\n self.sigma = max_sigma # OU噪声的参数\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_space.shape[0]\n self.low = action_space.low\n self.high = action_space.high\n self.reset()\n\n def reset(self):\n self.obs = np.ones(self.action_dim) * self.mu\n\n def evolve_obs(self):\n x = self.obs\n dx = self.theta * (self.mu - x) + self.sigma * \\\n np.random.randn(self.action_dim)\n self.obs = x + dx\n return self.obs\n\n def get_action(self, action, t=0):\n ou_obs = self.evolve_obs()\n self.sigma = self.max_sigma - \\\n (self.max_sigma - self.min_sigma) * \\\n min(1.0, t / self.decay_period) # sigma会逐渐衰减\n return np.clip(action + ou_obs, self.low, self.high) # 动作加上噪声后进行剪切\n" ]
[ [ "numpy.ones", "numpy.random.randn", "numpy.clip" ] ]
Antrovirens/learn-surveying-software-designing
[ "96b492510b3a3ac970675ddffb25d2b4d2b9970c" ]
[ "读取rinex文件/MainWindow.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\nimport sys\nimport numpy as np\n#from math import pi, atan, sqrt\n#from datetime import datetime\n\n#import matplotlib\n#matplotlib.use(\"Qt5Agg\") # 声明使用QT5\n#from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n#from matplotlib.figure import Figure\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom Ui_MainWindow import Ui_MainWindow\n\nnp.set_printoptions(suppress=True)\nephemeris = []\n\nlines = []\n\nclass Ephemeris:\n def __init__(self):\n e = []\n \n def print(self):\n str = str(e)\n return str\n\n\nendofhead = 'END OF HEADER'\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_action_Open_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # \n global ephemeris\n \n filename,_ = QFileDialog.getOpenFileName(self, '输入星历文件', './', 'All Files (*);;2020 RINEX N Files (*.20n)');\n if filename == '':\n return 0\n print(filename)\n f=open(filename,'r', encoding='utf-8')\n global lines \n for line in f.readlines():\n line=line.strip('\\n') #去掉换行符\\n\n lines.append(line)\n f.close()\n t = 0\n for line in lines:\n if t > 0:\n if line[1] != ' ':\n \n self.plainTextEdit.appendPlainText(line)\n elif line[1] == ' ' and line[4] != 0:\n \n self.plainTextEdit.appendPlainText(line)\n \n if line[60:73] == 'END OF HEADER':\n t = 1 \n \n \n \n @pyqtSlot()\n def on_action_Close_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Save_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Quit_triggered(self):\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = MainWindow()\n dlg.show()\n sys.exit(app.exec_())\n" ]
[ [ "numpy.set_printoptions" ] ]
yiyuezhuo/loss-landscape
[ "464dffdeba6cd45f0275836552e551addc7249a2" ]
[ "cifar10/model_loader.py" ]
[ "import os\nimport torch, torchvision\nimport cifar10.models.vgg as vgg\nimport cifar10.models.resnet as resnet\nimport cifar10.models.densenet as densenet\n\n# map between model name and function\nmodels = {\n 'vgg9' : vgg.VGG9,\n 'densenet121' : densenet.DenseNet121,\n 'resnet18' : resnet.ResNet18,\n 'resnet18_noshort' : resnet.ResNet18_noshort,\n 'resnet34' : resnet.ResNet34,\n 'resnet34_noshort' : resnet.ResNet34_noshort,\n 'resnet50' : resnet.ResNet50,\n 'resnet50_noshort' : resnet.ResNet50_noshort,\n 'resnet101' : resnet.ResNet101,\n 'resnet101_noshort' : resnet.ResNet101_noshort,\n 'resnet152' : resnet.ResNet152,\n 'resnet152_noshort' : resnet.ResNet152_noshort,\n 'resnet20' : resnet.ResNet20, \n 'resnet20_noshort' : resnet.ResNet20_noshort,\n 'resnet32_noshort' : resnet.ResNet32_noshort,\n 'resnet44_noshort' : resnet.ResNet44_noshort,\n 'resnet50_16_noshort' : resnet.ResNet50_16_noshort,\n 'resnet56' : resnet.ResNet56,\n 'resnet56_noshort' : resnet.ResNet56_noshort,\n 'resnet110' : resnet.ResNet110,\n 'resnet110_noshort' : resnet.ResNet110_noshort,\n 'wrn56_2' : resnet.WRN56_2,\n 'wrn56_2_noshort' : resnet.WRN56_2_noshort,\n 'wrn56_4' : resnet.WRN56_4,\n 'wrn56_4_noshort' : resnet.WRN56_4_noshort,\n 'wrn56_8' : resnet.WRN56_8,\n 'wrn56_8_noshort' : resnet.WRN56_8_noshort,\n 'wrn110_2_noshort' : resnet.WRN110_2_noshort,\n 'wrn110_4_noshort' : resnet.WRN110_4_noshort,\n}\n\ndef load(model_name, model_file=None, data_parallel=False):\n net = models[model_name]()\n if data_parallel: # the model is saved in data paralle mode\n net = torch.nn.DataParallel(net)\n\n if model_file:\n assert os.path.exists(model_file), model_file + \" does not exist.\"\n stored = torch.load(model_file, map_location=lambda storage, loc: storage)\n if 'state_dict' in stored.keys():\n net.load_state_dict(stored['state_dict'])\n else:\n net.load_state_dict(stored)\n\n if data_parallel: # convert the model back to the single GPU version\n net = net.module\n\n net.eval()\n return net\n" ]
[ [ "torch.nn.DataParallel", "torch.load" ] ]
akrajewska/cost-prophet
[ "f31b462b144c76ccfd56153e156d08d0795b579d" ]
[ "cost_prophet/utils/experiment.py" ]
[ "import os\n\nimport pandas as pd\nfrom dask import delayed, compute\nfrom dotenv import dotenv_values\n\nfrom cost_prophet.utils.evaluation import test_error\nfrom cost_prophet.utils.linear_alebra import get_known_indices, split_tests_sets\n\nconfig = dotenv_values()\n\nOUTPUT_DIR = config.get(\"OUTPUT_DIR\")\n\n\ndef run_trial(solver, X, known_indices, trial):\n test_indices, train_indices, X_train = split_tests_sets(known_indices, X)\n X_soft_impute_results = solver.fit_transform(X_train)\n results = []\n for shrinkage_value, X_out in X_soft_impute_results:\n _test_error = test_error(X_out, X, test_indices)\n results.append([trial, shrinkage_value, _test_error])\n return results\n\n\ndef experiment(solver, trials, X):\n errors = []\n known_indices = get_known_indices(X)\n for trial in range(trials):\n _test_error_data = delayed(run_trial)(solver, X, known_indices, trial)\n errors += _test_error_data\n errors = compute(errors)\n # TODO dlaczego jest tuple?\n df = pd.DataFrame(data=errors[0], columns=['param', 'trial', 'shrinkage value', 'test error'])\n df.to_csv(os.path.join(OUTPUT_DIR, type(solver).__name__))\n return df\n" ]
[ [ "pandas.DataFrame" ] ]
rachel-1/transformers
[ "d7398397c8202b8aa4bceaff51e90c6d12c1dec1" ]
[ "transformers/modeling_bert.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model. \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom io import open\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .modeling_utils import PreTrainedModel, prune_linear_layer\nfrom .configuration_bert import BertConfig\nfrom .file_utils import add_start_docstrings\n\nlogger = logging.getLogger(__name__)\n\nBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n 'bert-base-german-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-base-cased-finetuned-mrpc': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin\",\n 'bert-base-german-dbmdz-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin\",\n 'bert-base-german-dbmdz-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin\",\n}\n\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef gelu(x):\n \"\"\" Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\ndef gelu_new(x):\n \"\"\" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"gelu_new\": gelu_new}\n\n\nBertLayerNorm = torch.nn.LayerNorm\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads\n for head in heads:\n # Compute how many pruned heads are before the head and move the index accordingly\n head = head - sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_tensor, attention_mask=None, head_mask=None):\n self_outputs = self.self(input_tensor, attention_mask, head_mask)\n attention_output = self.output(self_outputs[0], input_tensor)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n attention_outputs = self.attention(hidden_states, attention_mask, head_mask)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size,\n config.vocab_size,\n bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = BertConfig\n pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nBERT_START_DOCSTRING = r\"\"\" The BERT model was proposed in\n `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_\n by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer\n pre-trained using a combination of masked language modeling objective and next sentence prediction\n on a large corpus comprising the Toronto Book Corpus and Wikipedia.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:\n https://arxiv.org/abs/1810.04805\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. \n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs:\n\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n \n ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``\n\n (b) For single sequences:\n\n ``tokens: [CLS] the dog is hairy . [SEP]``\n \n ``token_type_ids: 0 0 0 0 0 0 0``\n\n Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n@add_start_docstrings(\"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertModel(BertPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertModel.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n encoder_outputs = self.encoder(embedding_output,\n extended_attention_mask,\n head_mask=head_mask)\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with two heads on top as done during the pre-training:\n a `masked language modeling` head and a `next sentence prediction (classification)` head. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForPreTraining(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForPreTraining.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n prediction_scores, seq_relationship_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the input and output embeddings.\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder,\n self.bert.embeddings.word_embeddings)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n masked_lm_labels=None, next_sentence_label=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForMaskedLM(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForMaskedLM.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, masked_lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the input and output embeddings.\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder,\n self.bert.embeddings.word_embeddings)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n masked_lm_labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForNextSentencePrediction(BertPreTrainedModel):\n r\"\"\"\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Next sequence prediction (classification) loss.\n **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n seq_relationship_scores = outputs[0]\n\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n next_sentence_label=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n seq_relationship_score = self.cls(pooled_output)\n\n outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n outputs = (next_sentence_loss,) + outputs\n\n return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForSequenceClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForMultipleChoice(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above).\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForMultipleChoice.from_pretrained('bert-base-uncased')\n choices = [\"Hello, my dog is cute\", \"Hello, my cat is amazing\"]\n input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices\n labels = torch.tensor(1).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, classification_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForMultipleChoice, self).__init__(config)\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None):\n num_choices = input_ids.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1))\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForTokenClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForTokenClassification.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForQuestionAnswering(BertPreTrainedModel):\n r\"\"\"\n **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-start scores (before SoftMax).\n **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-end scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')\n question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n input_text = \"[CLS] \" + question + \" [SEP] \" + text + \" [SEP]\"\n input_ids = tokenizer.encode(input_text)\n token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] \n start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))\n all_tokens = tokenizer.convert_ids_to_tokens(input_ids) \n print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))\n # a nice puppet\n\n\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n start_positions=None, end_positions=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids, \n head_mask=head_mask)\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n\nclass BertForVQR(BertPreTrainedModel):\n def __init__(self, config, num_labels=2, q_relevance=True, r_relevance=True, answer_extraction=True, answer_verification=False):\n super(BertForVQR, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n \n # set up which tasks the network will do\n self.q_relevance = q_relevance\n self.r_relevance = r_relevance\n self.answer_extraction = answer_extraction\n\n if self.q_relevance:\n self.q_relevance_classifier = nn.Linear(config.hidden_size, num_labels)\n if self.r_relevance:\n self.r_relevance_classifier = nn.Linear(config.hidden_size, num_labels)\n if self.answer_extraction:\n self.span_classifier = nn.Linear(config.hidden_size, 2)\n\n self.init_weights()\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None,\n q_relevance_ids=None, r_relevance_ids=None,\n start_positions=None, end_positions=None, original_examples=None):\n output = self.bert(input_ids, attention_mask, token_type_ids)\n encoded_layers, pooled_output = output\n\n def classify_confusion(input_type='r', weighting=[1,1], answer_span_loss=None):\n if input_type == 'r':\n logits = self.r_relevance_classifier(self.dropout(pooled_output))\n labels = r_relevance_ids\n elif input_type == 'q':\n logits = self.q_relevance_classifier(self.dropout(pooled_output))\n labels = q_relevance_ids\n\n loss = 0\n if labels is not None:\n weights = labels.new(weighting).float()\n if self.num_labels > 1:\n loss_fct = CrossEntropyLoss(weight=weights)\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n else:\n loss_fct = BCEWithLogitsLoss(weight=weights)\n loss = loss_fct(logits.view(-1), labels.view(-1).float())\n\n if self.num_labels == 1:\n logits = nn.functional.sigmoid(logits)\n else:\n logits = torch.nn.functional.softmax(logits, dim=1)\n if answer_span_loss is not None:\n logit_adjustment = self.loss_multiplier*torch.stack([torch.zeros_like(answer_span_loss), answer_span_loss], dim=1)\n logit_adjustment[:, 1] = logit_adjustment[:, 1] + self.loss_bias\n logit_adjustment = logit_adjustment.where(answer_span_loss.view(-1, 1) != -1, answer_span_loss.new([[0, 0]]))\n logits = logits + logit_adjustment\n logits = torch.nn.functional.softmax(logits, dim=1)\n return loss, logits\n\n def extract_answer(loss_mask=None):\n logits = self.span_classifier(encoded_layers)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n total_loss = 0\n if start_positions is not None and end_positions is not None:\n # ignore start/end positions outside model inputs\n ignored_index = start_logits.size(1)\n # avoid modifying input\n tmp_start_positions = start_positions.clamp(0, ignored_index)\n tmp_end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(reduction='none', ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, tmp_start_positions)\n end_loss = loss_fct(end_logits, tmp_end_positions)\n if loss_mask is not None:\n start_loss = torch.where(loss_mask == 1, torch.zeros_like(start_loss), start_loss)\n end_loss = torch.where(loss_mask == 1, torch.zeros_like(end_loss), end_loss)\n \n total_loss = torch.mean(start_loss + end_loss)\n return total_loss, start_logits, end_logits\n \n q_loss, r_loss, span_loss = 0, 0, 0\n retvals = {}\n if self.q_relevance:\n # it's 3x more likely that a question is valid\n q_loss, q_logits = classify_confusion('q', weighting=[1,3])\n retvals['q_logits'] = q_logits\n\n answer_span_losses = None\n if self.answer_extraction:\n span_loss, start_logits, end_logits = extract_answer(r_relevance_ids)\n retvals['span_logits'] = [start_logits, end_logits]\n\n if self.r_relevance:\n # it's 2x more likely that a response is valid\n r_loss, r_logits = classify_confusion('r', weighting=[1,2], answer_span_loss=answer_span_losses)\n retvals['r_logits'] = r_logits\n \n retvals['loss'] = q_loss + r_loss + span_loss\n return tuple([retvals[key] for key in sorted(retvals.keys())])\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.functional.softmax", "torch.mean", "torch.zeros", "torch.nn.Embedding", "torch.pow", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.from_numpy", "torch.nn.functional.sigmoid", "torch.arange", "tensorflow.train.list_variables", "torch.ones_like", "torch.sigmoid", "torch.zeros_like", "tensorflow.train.load_variable", "torch.nn.Linear", "numpy.transpose", "torch.nn.Tanh", "torch.matmul", "torch.nn.MSELoss" ] ]
reenasheoran/Flight_Fare_MLOP
[ "6848893c253307b1bc76f50d00016a458cdad1df" ]
[ "src/split_data.py" ]
[ "import os\nimport argparse\nfrom get_data import read_params\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndef split_data(config_path):\n config = read_params(config_path)\n data_path = config[\"filter_data\"][\"filter_data_csv\"]\n df = pd.read_csv(data_path,sep=',',encoding='utf-8')\n split_ratio = config[\"split_data\"][\"test_size\"]\n random_state = config[\"base\"][\"random_state\"]\n train,test=train_test_split(df,test_size=split_ratio,random_state=random_state)\n train_data_path=config[\"split_data\"][\"train_data\"]\n test_data_path=config[\"split_data\"][\"test_data\"]\n train.to_csv(train_data_path,sep=',',encoding='utf-8',index=False)\n test.to_csv(test_data_path,sep=',',encoding='utf-8',index=False)\n\n\n\nif __name__==\"__main__\":\n args=argparse.ArgumentParser()\n args.add_argument(\"--config\",default=\"params.yaml\")\n Parsed_args=args.parse_args()\n split_data(config_path=Parsed_args.config)\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
nyu-mll/CoLA-baselines
[ "dd095d3646ed05a315280aaa8ed4ec84ba435b3e" ]
[ "acceptability/modules/lm_generator.py" ]
[ "import argparse\nimport torch\nimport os\n\nfrom torch.autograd import Variable\n\nfrom .dataset import Vocab\nfrom acceptability.utils import get_lm_generator_parser, seed_torch\n\n\nclass LMGenerator():\n def __init__(self):\n parser = get_lm_generator_parser()\n self.args = parser.parse_args()\n print(self.args)\n if self.args.temperature < 1e-3:\n parser.error(\"--temperature has to be greater or equal 1e-3\")\n\n seed_torch(self.args)\n\n\n def load(self):\n with open(self.args.checkpoint, 'rb') as f:\n self.model = torch.load(f)\n self.model.eval()\n\n if self.args.gpu:\n self.model.cuda()\n else:\n self.model.cpu()\n\n self.vocab = Vocab(self.args.vocab_file)\n self.ntokens = self.vocab.get_size()\n\n def generate(self):\n hidden = self.model.init_hidden(1)\n inp = Variable(torch.LongTensor([self.vocab.SOS_INDEX]).unsqueeze(0), volatile=True)\n if self.args.gpu:\n inp.data = inp.data.cuda()\n\n with open(self.args.outf, 'w') as outf:\n for i in range(self.args.nlines):\n words = []\n while True:\n output, hidden = self.model(inp, hidden)\n word_weights = output.squeeze().data.div(self.args.temperature).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n inp.data.fill_(word_idx)\n word = self.vocab.itos[word_idx]\n\n if word == self.vocab.EOS_TOKEN:\n line = ['lm', '0', '', ' '.join(words) + '\\n']\n outf.write('\\t'.join(line))\n break\n else:\n words.append(word)\n\n if i % self.args.log_interval == 0:\n print('| Generated {}/{} lines, {} words'\n .format(i, self.args.nlines, len(words)))\n" ]
[ [ "torch.LongTensor", "torch.multinomial", "torch.load" ] ]
xiaowanzi123good/adversarial-polyglots
[ "fa5000acdf0a49c2373d17c9133e8670385cb855" ]
[ "attacks/run_polygloss_nli.py" ]
[ "from transformers import glue_processors as processors\nimport csv, os, argparse, ray, torch, time, json\nfrom pathlib import Path\nfrom polygloss import PolyglossPairSequenceClassificationHF\nfrom tqdm import tqdm\nfrom ray.util import ActorPool\nfrom math import ceil\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data\", \"-d\", default=None, type=str, required=True,\n help=\"The input data directory, e.g., 'data/MNLI'.\")\nparser.add_argument(\"--model\", \"-m\", type=str, required=True)\nparser.add_argument(\"--output_dir\", \"-o\", default=None, type=str, required=True, help=\"The output directory.\")\nparser.add_argument(\"--mm\", action='store_true', required=False, help=\"Use Mismatch dev data.\")\nparser.add_argument(\"--split\", '-s', default='test', type=str, required=False,\n help=\"Use the dev or test data as the source.\")\nparser.add_argument(\"--beam\", '-b', default=0, type=int, required=False, help=\"Beam size.\")\nparser.add_argument(\"--tgt_langs\", '-t', default='zh,hi,fr,de,tr', type=str, required=False,\n help=\"Comma separated list of embedded languages.\")\nparser.add_argument(\"--use_reference_translations\", '-r', action='store_true', required=False,\n help=\"Filter candidates with reference translations.\")\nparser.add_argument(\"--simplified_zh\", '-szh', action='store_true', required=False, help=\"Use simplified Chinese dict\")\nparser.add_argument(\"--gpu\", '-g', default=0.33, type=float, required=False,\n help=\"GPU allocation per actor (% of one GPU). Total number of parallel actors is calculated using this value. Set to 0 to use CPU.\")\n\nargs = parser.parse_args()\n\nUSE_CUDA = torch.cuda.is_available() and args.gpu > 0\nNUM_GPU_PER_ACTOR = args.gpu if USE_CUDA else 0 # set gpu usage\nSRC_LANG = 'en' # matrix language\nLABELS = [\"contradiction\", \"entailment\", \"neutral\"]\n\n\n@ray.remote(num_gpus=NUM_GPU_PER_ACTOR)\nclass PolyglotActor(object):\n def __init__(self, model, src_lang, tgt_langs, src_tgts_map, reference_translations=None):\n self.polyglot = PolyglossPairSequenceClassificationHF(model, src_lang, tgt_langs, src_tgts_map, LABELS,\n is_nli=True, use_cuda=USE_CUDA)\n self.reference_translations = reference_translations\n\n def mutate(self, batch, beam):\n score = 0\n for example in tqdm(batch):\n prem_refs = self.reference_translations[0][example.text_a] if self.reference_translations else None\n hypo_refs = self.reference_translations[1][example.text_b] if self.reference_translations else None\n refs = [prem_refs, hypo_refs]\n prem, hypo, text_label, _, lowest_prem, lowest_hypo, lowest_text_label = self.polyglot.generate(\n example.text_a,\n example.text_b,\n example.label,\n beam_size=beam,\n reference_translations=refs)\n if text_label != example.label:\n example.text_a = prem\n example.text_b = hypo\n\n example.text_a_lowest = lowest_prem\n example.text_b_lowest = lowest_hypo\n\n example.adv_label = text_label\n example.adv_label_lowest = lowest_text_label\n else:\n score += 1\n return batch, score\n\n\ndef _create_output_data(examples, input_tsv_list):\n output = []\n columns = {}\n for (i, line) in enumerate(input_tsv_list):\n output_line = line.copy()\n if i == 0:\n output_line.insert(-1, 'predicted_label')\n output_line.insert(-1, 'sentence1_lowest')\n output_line.insert(-1, 'sentence2_lowest')\n output_line.insert(-1, 'predicted_label_lowest')\n columns = {col: i for i, col in enumerate(output_line)}\n output.append(output_line)\n continue\n output_line[4] = '-'\n output_line[5] = '-'\n output_line[6] = '-'\n output_line[7] = '-'\n output_line[columns['sentence1']] = examples[i - 1].text_a\n output_line[columns['sentence2']] = examples[i - 1].text_b\n try:\n output_line.insert(-1, examples[i - 1].adv_label)\n except AttributeError:\n output_line.insert(-1, '-')\n try:\n output_line.insert(-1, examples[i - 1].text_a_lowest)\n output_line.insert(-1, examples[i - 1].text_b_lowest)\n output_line.insert(-1, examples[i - 1].adv_label_lowest)\n except AttributeError:\n output_line.insert(-1, '-')\n output_line.insert(-1, '-')\n output_line.insert(-1, '-')\n output.append(output_line)\n return output\n\n\ndef _write_tsv(output, output_file):\n with open(output_file, \"w\", encoding=\"utf-8-sig\") as f:\n writer = csv.writer(f, delimiter=\"\\t\", quotechar=None)\n for row in output:\n writer.writerow(row)\n\n\ndef get_examples(data_dir, task, split):\n if split == 'dev':\n return processors[task]().get_dev_examples(data_dir)\n if split == 'test':\n return processors[task]().get_test_examples(data_dir)\n raise ValueError('Must be dev or test')\n\n\nTGT_LANGS = args.tgt_langs.split(',')\n\nreference_translations = None\nrefs_var = 'no_ref'\nif args.use_reference_translations:\n reference_translations = [\n json.load(open('../dictionaries/xnli-' + args.split + '-sentence1-reference-translations-en-head.json', 'r')),\n json.load(open('../dictionaries/xnli-' + args.split + '-sentence2-reference-translations-en-head.json', 'r'))]\n refs_var = 'ref_constrained'\n\noutput_path = Path(args.output_dir,\n 'polygloss_pairseqcls_' + \\\n args.data.strip('/').split('/')[-1] + \\\n '.' + args.model.split('/')[-1] + \\\n '.' + '_'.join(TGT_LANGS) + \\\n '.beam-' + str(args.beam) + \\\n '.' + refs_var)\noutput_path.mkdir(parents=True, exist_ok=True)\n\noutput_file = args.split + '_'\nif args.mm:\n output_file += 'mismatched'\nelse:\n output_file += 'matched'\noutput_file += '.tsv'\noutput_file = str(Path(output_path, output_file))\nprint('Output file path:', output_file)\n\nif args.mm:\n input_tsv = processors['mnli-mm']()._read_tsv(args.data + '/' + args.split + '_mismatched.tsv')\n examples = get_examples(args.data, 'mnli-mm', args.split)\nelse:\n input_tsv = processors['mnli']()._read_tsv(args.data + '/' + args.split + '_matched.tsv')\n examples = get_examples(args.data, 'mnli', args.split)\n\nif args.simplified_zh:\n word_map = 'en_to_all_map_simplified_zh.json'\nelse:\n word_map = 'en_to_all_map.json'\n\nnum_actors = int(torch.cuda.device_count() // NUM_GPU_PER_ACTOR) if USE_CUDA else int(25 // max(1, 0.5 * args.beam))\nprint('Number of Polyglots:', num_actors)\n\ntotal_exs = len(examples)\nprint(total_exs)\nlen_per_batch = ceil(total_exs / num_actors)\n\nbatches = [examples[i:i + len_per_batch] for i in range(0, total_exs, len_per_batch)]\n\nray.init()\nactors = ActorPool([PolyglotActor.remote(args.model, SRC_LANG, TGT_LANGS, word_map, reference_translations)\n for i in range(num_actors)])\nstart = time.time()\nresults, scores = map(list, zip(*actors.map(lambda actor, batch: actor.mutate.remote(batch, args.beam), batches)))\ntime_taken = time.time() - start\nresults = [ex for batch in results for ex in batch]\n\nprint(\"Acc:\", str(sum(scores) / total_exs * 100))\nprint(\"Time taken:\", time_taken / 60)\nprint(\"Output: \", str(output_path))\n\n_write_tsv(_create_output_data(results, input_tsv), output_file)\nwith open(str(Path(output_path, 'time_taken.txt')), 'w') as t:\n t.write('Acc: ' + str(sum(scores) / total_exs * 100) + '\\n')\n t.write(\"Time taken:\" + str(time_taken / 60))\n" ]
[ [ "torch.cuda.device_count", "torch.cuda.is_available" ] ]
kunakl07/AL-MLresearch
[ "5ab80169563e6cbe4de15aefa4bbfa09298795f9" ]
[ "src/active_learning_script/active_learning.py" ]
[ "import shutil\nimport os\nimport logging\nimport argparse\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.preprocessing.image import img_to_array\nimport tensorflow as tf\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef predict(model_path, test_path):\n \"\"\"Predict the labels using predict function\n\n Args:\n model_path: The path to the model or the .h5 file\n test_path: The path to the test directory\n\n Returns:\n None.\n \"\"\"\n\n img_width, img_height = 288, 432\n model = tf.keras.models.load_model(model_path)\n folder_path = test_path\n model_path = model_path\n N = sum(len(files) for _, _, files in os.walk(folder_path))\n data = np.empty((N, img_width, img_height, 3), dtype=np.uint8)\n predictions_probab = model.predict_proba(data)\n one_dim_predict = predictions_probab\n\n for dirs, _, files in os.walk(folder_path):\n for i, file in enumerate(files):\n f_name = os.path.join(dirs, file)\n img = image.load_img(f_name, target_size=(img_width, img_height))\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n data[i, ...] = img\n\n logger.info(\"Starting Prediction\")\n classes = model.predict_classes(data, batch_size=32)\n\n f = []\n for i in os.listdir(folder_path):\n f.append(i)\n\n for i in range(len(classes)):\n f_n = os.path.join(folder_path, f[i])\n\n os.makedirs(\"uncertain_calls\", exist_ok=True)\n if one_dim_predict[i] > 0.1 and one_dim_predict[i] < 0.9:\n shutil.copy(f_n, 'uncertain_calls')\n logger.info(\n f\"There are {sum(len(files) for _, _, files in os.walk('uncertain_calls'))} uncertain srkw calls\")\n return one_dim_predict[i][0], classes[10][0]\n\n\ndef main(args):\n model_path = args.modelpath\n test_path = args.testpath\n predict(model_path, test_path)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description=\"Predict which are uncertain_calls\")\n parser.add_argument(\n '-m',\n '--modelpath',\n type=str,\n help='path to saved model weights',\n required=True)\n parser.add_argument(\n '-c',\n \"--datapath\",\n type=str,\n help='directory with Preprocessed images',\n required=True)\n\n args = parser.parse_args()\n\n main(args)\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.expand_dims", "numpy.empty" ] ]
georgedeath/eshotgun
[ "6b34c9acfd1d56ebc64493babd77799441298d31" ]
[ "eshotgun/test_problems/util.py" ]
[ "import numpy as np\n\n\n# simple wrapper around functions to act as though they reside in [0, 1]^d\nclass uniform_problem_wrapper():\n def __init__(self, problem):\n self.problem = problem\n self.dim = problem.dim\n\n self.real_lb = problem.lb\n self.real_ub = problem.ub\n\n self.lb = np.zeros(self.dim)\n self.ub = np.ones(self.dim)\n\n self.real_cf = problem.cf\n self.set_cf()\n\n def __call__(self, x):\n x = np.atleast_2d(x)\n\n # map x back to original space\n x = x * (self.real_ub - self.real_lb)[np.newaxis, :] + self.real_lb[np.newaxis, :]\n\n return self.problem(x)\n\n def set_cf(self):\n if self.real_cf is None:\n self.cf = None\n return\n\n def cf_wrapper(x):\n x = np.atleast_2d(x)\n\n # map x back to original space\n x = x * (self.real_ub - self.real_lb)[np.newaxis, :] + self.real_lb[np.newaxis, :]\n\n return self.real_cf(x)\n\n self.cf = cf_wrapper\n" ]
[ [ "numpy.atleast_2d", "numpy.zeros", "numpy.ones" ] ]
Dirac231/BCHamming
[ "0fedd3b909a6f8a876547b0e368b817664aac212" ]
[ "REED_SOLOMON/RS-noise.py" ]
[ "from qiskit import *\nfrom unireedsolomon import *\nfrom matplotlib import *\nfrom math import *\nfrom collections import defaultdict\nimport numpy as np\nfrom numpy.polynomial import Polynomial\nfrom qiskit.providers.aer import AerSimulator\nfrom qiskit.circuit.library import QFT\nfrom qiskit.visualization import plot_histogram\nfrom qiskit.providers.aer.noise import NoiseModel\n\n\n#NOISE MODULE\n\n#Needed in order to load the ibm-mps simulator for an optimal simulation\nprovider = IBMQ.load_account()\n##the following adds the noise model that is based on ibmq_vigo\nbackend = provider.get_backend('ibmq_16_melbourne')\nnoise_model = NoiseModel.from_backend(backend) #noise model from real machines\n\n##The following adds custom noise model to the circuit\n# Error probabilities\n#prob_1 = 0.001 # x error\n#prob_2 = 0.01 # depolarizing error\n\n# Depolarizing quantum errors\n#error_1 = noise.depolarizing_error(prob_1, 1)\n#error_2 = noise.depolarizing_error(prob_2, 2)\n#error_1 = NoiseModel.depolarizing_error(prob_1, 1)\n#error_2 = NoiseModel.depolarizing_error(prob_2, 2)\n\n# Get basis gates from noise model\nbasis_gates = noise_model.basis_gates\nbasis_gates = ['cx', 'id', 'u1', 'sx', 'x']\n\n#--------------------------------------------------------------------------------------\n\n#PARAMETERS SETUP\n\n#Parameters of the classical code used to generate the optimal quantum code.\n#The code is built so that everything is a function of k_cl, the order of the finite field.\n\n#The initial state is read from the file states.txt\n\ndef init_decoder():\n global initial_state, k_cl, delta, K, ENC, encode_reg, ecc, shots, fourier, inv_fourier,provider\n provider = IBMQ.load_account()\n initial_state = np.loadtxt('states.txt')\n k_cl = len(initial_state) #Order of the finite field in terms of powers of 2, corresponds to the amount of qbits sent\n\n delta = floor((2**k_cl-1)/2+2) #Classical optimal minimum distance of the code\n K = (2**k_cl) - delta #Number of classical bits sent, directly related to the error-correcting capability of the code ecc = floor((K+1)/2) \n ENC = k_cl*(2**k_cl - 1) #Total encoding Qbits needed\n encode_reg = QuantumRegister(ENC+2*k_cl*K) #Quantum Register used to construct the full circuit\n ecc = floor((K+1)/2) #Maximum error correction capability per symbol\n shots = 100\n\n #Initialization of the parameters is completed\n print(\"\")\n print(\"Reading from file: found \",k_cl,\" Qbits: \\n\")\n\n print(\"Parameters of the code: \")\n print(\"-------------------------------------------\")\n print(\"Encoding Qbits: \", ENC)\n print(\"Sent Qbits: \", k_cl*(2**k_cl-1-2*K))\n print(\"Maximum error-correcting: \", ecc, \"/Symbol = \", ecc*k_cl, \"/Encoded Qbit\")\n print(\"-------------------------------------------\")\n\n #--------------------------------------------------------------------------------------\n\n #QTF IMPLEMENTATION\n\n #A quantum fourier transform is used both for encoding and decoding purposes\n\n fourier = QFT(num_qubits=ENC, approximation_degree=0, do_swaps=True, inverse=False, insert_barriers=False, name='qft')\n inv_fourier = QFT(num_qubits=ENC, approximation_degree=0, do_swaps=True, inverse=True, insert_barriers=False, name='qft-inverse')\n\n#-----------------------------------------------------------------------------------\n\n#SIMULATES THE CIRCUIT\n\ndef simulate(qc):\n \"\"\"Simulates the circuit using the cloud-computing services of IBMq, this is always the recommended choice to run simulations\"\"\"\n provider = IBMQ.get_provider(hub='ibm-q')\n backend=provider.get_backend('simulator_mps')\n result = execute(circ, backend,shots=shots,\n basis_gates=basis_gates,\n noise_model=noise_model).result() #add noise module\n print('Simulation Success: {}'.format(result.success))\n print(\"Time taken: {} sec\".format(result.time_taken))\n counts = result.get_counts(0)\n return counts\n\n#------------------------------------------------------------------------------------\n\n#MEASURE FUNCTIONS\n\ndef measure_encoding(qc):\n \"\"\"Measure the Qbits used in the encoding, i.e. if the lenght is 3, the first 21 Qbits\"\"\"\n cr = ClassicalRegister(ENC, 'encoded')\n qc.add_register(cr)\n for i in range(0, ENC):\n qc.measure(i,cr[i])\n results = simulate(qc)\n encoded = max(results, key=results.get)\n return encoded\n\n\ndef get_qbits(qc):\n \"\"\"Measure the Qbits with the message, i.e. if the lenght is 3, the first 3 Qbits\"\"\"\n cr = ClassicalRegister(k_cl*(2**k_cl-1-2*K), 'out')\n qc.add_register(cr)\n for i in range(0,k_cl):\n qc.measure(i, cr[i]) \n for i in range(k_cl*(K + 1), ENC-k_cl*K):\n qc.measure(i, cr[i])\n results = simulate(qc)\n qbits = max(results, key=results.get)\n return qbits,results\n\n\ndef get_syndrome(qc):\n \"\"\"Measure the Qbits with the syndrome, i.e. if the lenght is 3, the last 18 Qbits\"\"\"\n cr = ClassicalRegister(2*k_cl*K)\n qc.add_register(cr)\n for i in range(0, 2*k_cl*K):\n qc.measure(ENC+i,cr[i])\n #orders the syndromes in descending order in term of the occurrences\n ordered_res = {k: v for k, v in sorted(simulate(qc).items(), key=lambda item: item[1])} \n syndromes = list(ordered_res)[::-1]\n return syndromes\n\n#------------------------------------------------------------------------------------\n\n#GIVEN THE CLASSICAL SYNDROME, RETURNS THE POSITIONS OF THE ERRORS USING CLASSICAL BERLEKAMP-MASSEY\n\n#Performs a Berlekamp-Massey algorithm in order to find the error locator polynomial relative to the syndrome#\ndef error_string(classical_syn):\n k1 = int(ENC/k_cl)\n k2 = int(((ENC-K*k_cl)/k_cl))\n prime = int(hex(find_prime_polynomials(c_exp=k_cl,single=True)),16)\n coder = rs.RSCoder(k1, k2, prim=prime,c_exp=k_cl)\n error_bf, sigma_bf = coder._berlekamp_massey_fast(coder._list2gfpoly(str(classical_syn)))\n eval_tmp_bf, bf = coder._chien_search_faster(error_bf)\n Y = coder._forney(sigma_bf, eval_tmp_bf)\n Elist = []\n if(classical_syn != \"0\"*k_cl):\n\n if len(Y) >= len(bf): \n for i in range(coder.gf2_charac):\n if i in bf:\n Elist.append(Y[bf.index(i)])\n E = Polynomial( Elist[::-1])\n error_bits = [bin(int(i))[2:] for i in Elist]\n s = \"\"\n for i in range(len(error_bits)): \n s += error_bits[i]\n s = s[::-1]\n return s\n else:\n return \"\"\n \n \ndef error_locator(syn):\n \"\"\"take the syndrome computed by the quantum circuit and apply error_string\"\"\"\n for x in syn:\n BFsyndrome = oct(int((x[::-1])[:k_cl*K],2))[2:] #bit flip syndrome string\n PFsyndrome = oct(int((x[::-1])[k_cl*K:],2))[2:] #phase flip syndrome string\n\n #Performs the error locator finding for each measured syndrome, if a error occurs, it computes the errors associated with the next most probable syndrome\n try: #uses functions in the unireedsolomon library to compute the error locations bf, pf\n bf = error_string(BFsyndrome)\n pf = error_string(PFsyndrome)\n return bf,pf,x\n except (RSCodecError,ValueError):\n continue\n print(\"No valid syndrome was found, too many errors try increasing the number of shots.\")\n exit()\n\n#------------------------------------------------------------------------------------\n\n\"\"\"ENCODING: takes a message and return the circuit that encodes it\"\"\"\n\ndef encoder(initial_state):\n \"\"\"Takes a message and return the circuit that encodes it\"\"\"\n qc = QuantumCircuit(encode_reg)\n for i in range(0,k_cl):\n qc.initialize(initial_state[i], i) \n for i in range(k_cl*(K + 1), ENC-k_cl*K):\n qc.initialize(initial_state[i], i)\n for i in range(ENC - k_cl*K,ENC):\n qc.h(i)\n qc.append(inv_fourier, encode_reg[:ENC])\n return qc\n\n\n#CIRCUIT TO COMPUTE THE SYNDROME\n\ndef decoder(qc):\n \"\"\"Takes the ecoding circuit, computes the syndrome and corrects the message\"\"\"\n qc.append(fourier, encode_reg[:ENC])\n for i in range(k_cl+1,k_cl*(K+1)+1):\n qc.cx(i-1, i+ENC-k_cl-1)\n for i in range(ENC -k_cl*K, ENC):\n qc.h(i)\n for i in range(ENC-k_cl*K-1,ENC-1):\n qc.cx(i+1, i+ENC-k_cl+1)\n for i in range(ENC -k_cl*K-1, ENC-1):\n qc.h(i+1)\n qc.append(inv_fourier, encode_reg[:ENC])\n syn = get_syndrome(qc)\n bf,pf,x = error_locator(syn)\n if(bf != \"1\" or x[:k_cl*K] != \"0\"*k_cl*K):\n for i in range(len(bf)):\n if (bf[i] == \"1\"):\n qc.x(i)\n if (pf != \"1\" or x[k_cl*K:] != \"0\"*k_cl*K):\n for i in range(ENC):\n qc.h(i)\n\n for i in range(len(pf)):\n if (pf[i] == \"1\"):\n qc.z(i)\n\n for i in range(ENC):\n qc.h(i)\n qc.append(fourier, encode_reg[:ENC])\n message,occurrences = get_qbits(qc)\n occurrences = zip([x[:3][::-1] for x in occurrences.keys()] , list(occurrences.values()))\n D = defaultdict(int)\n for k,v in occurrences:\n D[k]+= int(v)\n occurrences = dict(D)\n return qc,message,x,occurrences\n\n#------------------------------------------------------------------------------------\n\n\ndef send_message(initial_state):\n \"\"\"Auxiliary testing function, sends the message contained in the file states.txt and returns the simulation circuit.\"\"\"\n qc = encoder(initial_state) #Classical optimal minimum distance of the code\n #INSERT ERRORS HERE: (such as qc.x(4) or z-errors)\n qc,retrieved,syn,occurrences = decoder(qc)\n plot_histogram(occurrences, color='midnightblue', title=\"Message occurrences\").savefig(\"histogram.png\")\n print(\"Most probable message: \", retrieved[:3][::-1])\n print(\"Occurrences: \", occurrences)\n print(\"Compared with: \")\n for i in initial_state:\n print(i,\"\\n\")\n print(\"Syndrome was: \", syn[::-1])\n qc.draw(output='mpl', filename='prova.png')\n return qc\n\n#------------------------------------------------------------------------------------\n\nqc = send_message(initial_state)\n" ]
[ [ "numpy.polynomial.Polynomial", "numpy.loadtxt" ] ]
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
[ "c022613ea05818c842e0760c44a0a2cb9cc0c424" ]
[ "code/model/loss.py" ]
[ "import torch.nn.functional as F\nimport torch\n\ndef nll_loss(output, target):\n return F.nll_loss(output, target)\n\n\ndef ce_loss(output, target):\n return F.cross_entropy(output, target)\n\ndef ce_loss_weighted(output, target):\n weighted_loss = torch.tensor([0.34, 0.66])\n return F.cross_entropy(output, target, weight= weighted_loss)\n\ndef focal_loss(output, target, gama=2., size_average=True, weight =None):\n log_P = -F.cross_entropy(output, target, weight=self.weight, reduction='none')\n P = torch.exp(log_P)\n batch_loss = -torch.pow(1 - P, gama).mul(log_P)\n if size_average:\n loss = batch_loss.mean()\n else:\n loss = batch_loss.sum()\n return loss\n\n" ]
[ [ "torch.nn.functional.nll_loss", "torch.nn.functional.cross_entropy", "torch.tensor", "torch.exp", "torch.pow" ] ]
faradaymahe/DynaPhopy
[ "e1201f6de62b4303c68a7808ed19175364409586" ]
[ "dynaphopy/interface/iofile/trajectory_parsers.py" ]
[ "import os\nimport numpy as np\nimport mmap\nimport dynaphopy.dynamics as dyn\nimport warnings\n\n\n# VASP OUTCAR file parser\ndef read_vasp_trajectory(file_name, structure=None, time_step=None,\n limit_number_steps=10000000, # Maximum number of steps read (for security)\n last_steps=None,\n initial_cut=1,\n end_cut=None,\n memmap=False,\n template=None):\n\n # warning\n warnings.warn('This parser will be deprecated, you can use XDATCAR instead', DeprecationWarning)\n\n # Check file exists\n if not os.path.isfile(file_name):\n print('Trajectory file does not exist!')\n exit()\n\n # Check time step\n if time_step is not None:\n print('Warning! Time step flag has no effect reading from VASP OUTCAR file (time step will be read from file)')\n\n if memmap:\n print('Warning! Memory mapping is not implemented in VASP OUTCAR parser')\n\n # Starting reading\n print(\"Reading VASP trajectory\")\n print(\"This could take long, please wait..\")\n\n # Dimensionality of VASP calculation\n number_of_dimensions = 3\n\n with open(file_name, \"r+\") as f:\n\n #Memory-map the file\n file_map = mmap.mmap(f.fileno(), 0)\n position_number=file_map.find(b'NIONS =')\n file_map.seek(position_number+7)\n number_of_atoms = int(file_map.readline())\n\n #Read time step\n position_number=file_map.find(b'POTIM =')\n file_map.seek(position_number+8)\n time_step = float(file_map.readline().split()[0])* 1E-3 # in picoseconds\n\n #Reading super cell\n position_number = file_map.find(b'direct lattice vectors')\n file_map.seek(position_number)\n file_map.readline()\n super_cell = []\n for i in range (number_of_dimensions):\n super_cell.append(file_map.readline().split()[0:number_of_dimensions])\n super_cell = np.array(super_cell, dtype='double')\n\n file_map.seek(position_number)\n file_map.readline()\n\n # Check if number of atoms is multiple of cell atoms\n if structure is not None:\n if number_of_atoms % structure.get_number_of_cell_atoms() != 0:\n print('Warning: Number of atoms not matching, check VASP output files')\n # structure.set_number_of_atoms(number_of_atoms)\n\n# Read coordinates and energy\n trajectory = []\n energy = []\n counter = 0\n while True:\n\n counter +=1\n #Initial cut control\n if initial_cut > counter:\n continue\n\n position_number=file_map.find(b'POSITION')\n if position_number < 0 : break\n\n file_map.seek(position_number)\n file_map.readline()\n file_map.readline()\n\n read_coordinates = []\n for i in range (number_of_atoms):\n read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])\n\n read_coordinates = np.array(read_coordinates, dtype=float) # in angstrom\n if template is not None:\n indexing = np.argsort(template)\n read_coordinates = read_coordinates[indexing, :]\n\n position_number=file_map.find(b'energy(')\n file_map.seek(position_number)\n read_energy = file_map.readline().split()[2]\n trajectory.append(read_coordinates.flatten()) #in angstrom\n energy.append(np.array(read_energy, dtype=float))\n\n #security routine to limit maximum of steps to read and put in memory\n if limit_number_steps+initial_cut < counter:\n print(\"Warning! maximum number of steps reached! No more steps will be read\")\n break\n\n if end_cut is not None and end_cut <= counter:\n break\n\n file_map.close()\n\n trajectory = np.array([[[trajectory[i][j*number_of_dimensions+k]\n for k in range(number_of_dimensions)]\n for j in range(number_of_atoms)]\n for i in range (len(trajectory))])\n\n if last_steps is not None:\n trajectory = trajectory[-last_steps:,:,:]\n energy = energy[-last_steps:]\n\n print('Number of total steps read: {0}'.format(trajectory.shape[0]))\n time = np.array([i*time_step for i in range(trajectory.shape[0])], dtype=float)\n\n print('Trajectory file read')\n return dyn.Dynamics(structure=structure,\n trajectory=np.array(trajectory, dtype=complex),\n energy=np.array(energy),\n time=time,\n supercell=super_cell,\n memmap=memmap)\n\n\n# LAMMPS custom dump file parser\ndef read_lammps_trajectory(file_name, structure=None, time_step=None,\n limit_number_steps=10000000,\n last_steps=None,\n initial_cut=1,\n end_cut=None,\n memmap=False,\n template=None):\n\n\n # Time in picoseconds\n # Coordinates in Angstroms\n\n # Read environtment variables\n try:\n temp_directory = os.environ[\"DYNAPHOPY_TEMPDIR\"]\n if os.path.isdir(temp_directory):\n print('Set temporal directory: {0}'.format(temp_directory))\n temp_directory += '/'\n else:\n temp_directory = ''\n except KeyError:\n temp_directory = ''\n\n number_of_atoms = None\n bounds = None\n\n #Check file exists\n if not os.path.isfile(file_name):\n print('Trajectory file does not exist!')\n exit()\n\n # Check time step\n if time_step is None:\n print('Warning! LAMMPS trajectory file does not contain time step information')\n print('Using default: 0.001 ps')\n time_step = 0.001\n\n # Starting reading\n print(\"Reading LAMMPS trajectory\")\n print(\"This could take long, please wait..\")\n\n # Dimension of LAMMP calculation\n if structure is None:\n number_of_dimensions = 3\n else:\n number_of_dimensions = structure.get_number_of_dimensions()\n\n time = []\n data = []\n counter = 0\n\n lammps_labels = False\n\n with open(file_name, \"r+\") as f:\n\n file_map = mmap.mmap(f.fileno(), 0)\n\n while True:\n\n counter += 1\n\n #Read time steps\n position_number=file_map.find(b'TIMESTEP')\n if position_number < 0: break\n\n file_map.seek(position_number)\n file_map.readline()\n time.append(float(file_map.readline()))\n\n\n if number_of_atoms is None:\n #Read number of atoms\n file_map = mmap.mmap(f.fileno(), 0)\n position_number=file_map.find(b'NUMBER OF ATOMS')\n file_map.seek(position_number)\n file_map.readline()\n number_of_atoms = int(file_map.readline())\n\n # Check if number of atoms is multiple of cell atoms\n if structure is not None:\n if number_of_atoms % structure.get_number_of_cell_atoms() != 0:\n print('Warning: Number of atoms not matching, check LAMMPS output file')\n\n if bounds is None:\n #Read cell\n file_map = mmap.mmap(f.fileno(), 0)\n position_number=file_map.find(b'BOX BOUNDS')\n file_map.seek(position_number)\n file_map.readline()\n\n bounds = []\n for i in range(3):\n bounds.append(file_map.readline().split())\n\n bounds = np.array(bounds, dtype=float)\n if bounds.shape[1] == 2:\n bounds = np.append(bounds, np.array([0, 0, 0])[None].T ,axis=1)\n\n xy = bounds[0, 2]\n xz = bounds[1, 2]\n yz = bounds[2, 2]\n\n xlo = bounds[0, 0] - np.min([0.0, xy, xz, xy+xz])\n xhi = bounds[0, 1] - np.max([0.0, xy, xz, xy+xz])\n ylo = bounds[1, 0] - np.min([0.0, yz])\n yhi = bounds[1, 1] - np.max([0.0, yz])\n zlo = bounds[2, 0]\n zhi = bounds[2, 1]\n\n supercell = np.array([[xhi-xlo, xy, xz],\n [0, yhi-ylo, yz],\n [0, 0, zhi-zlo]]).T\n\n #for 2D\n supercell = supercell[:number_of_dimensions, :number_of_dimensions]\n\n # Testing cell\n lx = xhi-xlo\n ly = yhi-ylo\n lz = zhi-zlo\n\n a = lx\n b = np.sqrt(pow(ly,2) + pow(xy,2))\n c = np.sqrt(pow(lz,2) + pow(xz,2) + pow(yz,2))\n\n alpha = np.arccos((xy*xz + ly*yz)/(b*c))\n beta = np.arccos(xz/c)\n gamma = np.arccos(xy/b)\n\n # End testing cell\n\n # rotate lammps supercell to match unitcell orientation\n def unit_matrix(matrix):\n return np.array([np.array(row)/np.linalg.norm(row) for row in matrix])\n\n unit_structure = unit_matrix(structure.get_cell())\n unit_supercell_lammps = unit_matrix(supercell)\n\n transformation_mat = np.dot(np.linalg.inv(unit_structure), unit_supercell_lammps).T\n\n supercell = np.dot(supercell, transformation_mat)\n\n if memmap:\n if end_cut:\n data = np.memmap(temp_directory+'trajectory.{0}'.format(os.getpid()), dtype='complex', mode='w+', shape=(end_cut - initial_cut+1, number_of_atoms, number_of_dimensions))\n else:\n print('Memory mapping requires to define reading range (use read_from/read_to option)')\n exit()\n\n position_number = file_map.find(b'ITEM: ATOMS')\n\n file_map.seek(position_number)\n lammps_labels=file_map.readline()\n\n #Initial cut control\n if initial_cut > counter:\n time = []\n continue\n\n #Reading coordinates\n read_coordinates = []\n for i in range (number_of_atoms):\n read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])\n read_coordinates = np.array(read_coordinates, dtype=float)\n\n if template is not None:\n indexing = np.argsort(template)\n read_coordinates = read_coordinates[indexing, :]\n\n try:\n read_coordinates = np.dot(read_coordinates, transformation_mat)\n if memmap:\n data[counter-initial_cut, :, :] = read_coordinates #in angstroms\n else:\n data.append(read_coordinates) #in angstroms\n\n except ValueError:\n print(\"Error reading step {0}\".format(counter))\n break\n # print(read_coordinates)\n\n #security routine to limit maximum of steps to read and put in memory\n if limit_number_steps+initial_cut < counter:\n print(\"Warning! maximum number of steps reached! No more steps will be read\")\n break\n\n if end_cut is not None and end_cut <= counter:\n break\n\n file_map.close()\n\n time = np.array(time) * time_step\n\n if not memmap:\n data = np.array(data, dtype=complex)\n\n if last_steps is not None:\n data = data[-last_steps:, :, :]\n time = time[-last_steps:]\n\n\n # Check position/velocity dump\n if b'vx vy' in lammps_labels:\n return dyn.Dynamics(structure=structure,\n velocity=data,\n time=time,\n supercell=supercell,\n memmap=memmap)\n\n if b'x y' in lammps_labels:\n return dyn.Dynamics(structure=structure,\n trajectory=data,\n time=time,\n supercell=supercell,\n memmap=memmap)\n\n print('LAMMPS parsing error. Data not recognized: {}'.format(lammps_labels))\n exit()\n\n\ndef read_VASP_XDATCAR(file_name, structure=None, time_step=None,\n limit_number_steps=10000000,\n last_steps=None,\n initial_cut=1,\n end_cut=None,\n memmap=False,\n template=None):\n\n # Time in picoseconds\n # Coordinates in Angstroms\n\n #Read environtment variables\n try:\n temp_directory = os.environ[\"DYNAPHOPY_TEMPDIR\"]\n if os.path.isdir(temp_directory):\n print('Set temporal directory: {0}'.format(temp_directory))\n temp_directory += '/'\n else:\n temp_directory = ''\n except KeyError:\n temp_directory = ''\n\n number_of_atoms = None\n bounds = None\n\n #Check file exists\n if not os.path.isfile(file_name):\n print('Trajectory file does not exist!')\n exit()\n\n #Check time step\n if time_step is None:\n print('Warning! XDATCAR file does not contain time step information')\n print('Using default: 0.001 ps')\n time_step = 0.001\n\n #Starting reading\n print(\"Reading XDATCAR file\")\n print(\"This could take long, please wait..\")\n\n #Dimensionality of VASP calculation\n number_of_dimensions = 3\n\n time = []\n data = []\n counter = 0\n\n with open(file_name, \"r+b\") as f:\n\n file_map = mmap.mmap(f.fileno(), 0)\n\n #Read cell\n for i in range(2): file_map.readline()\n a = file_map.readline().split()\n b = file_map.readline().split()\n c = file_map.readline().split()\n super_cell = np.array([a, b, c], dtype='double')\n\n for i in range(1): file_map.readline()\n number_of_atoms = np.array(file_map.readline().split(), dtype=int).sum()\n\n while True:\n\n counter += 1\n #Read time steps\n position_number=file_map.find(b'Direct configuration')\n if position_number < 0: break\n\n file_map.seek(position_number)\n time.append(float(file_map.readline().split(b'=')[1]))\n\n if memmap:\n if end_cut:\n data = np.memmap(temp_directory+'trajectory.{0}'.format(os.getpid()), dtype='complex', mode='w+', shape=(end_cut - initial_cut+1, number_of_atoms, number_of_dimensions))\n else:\n print('Memory mapping requires to define reading range (use read_from/read_to option)')\n exit()\n\n\n #Initial cut control\n if initial_cut > counter:\n continue\n\n #Reading coordinates\n read_coordinates = []\n for i in range (number_of_atoms):\n read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])\n\n read_coordinates = np.array(read_coordinates, dtype=float) # in angstroms\n if template is not None:\n indexing = np.argsort(template)\n read_coordinates = read_coordinates[indexing, :]\n\n try:\n if memmap:\n data[counter-initial_cut, :, :] = read_coordinates #in angstroms\n else:\n data.append(read_coordinates) #in angstroms\n\n except ValueError:\n print(\"Error reading step {0}\".format(counter))\n break\n # print(read_coordinates)\n\n #security routine to limit maximum of steps to read and put in memory\n if limit_number_steps+initial_cut < counter:\n print(\"Warning! maximum number of steps reached! No more steps will be read\")\n break\n\n if end_cut is not None and end_cut <= counter:\n break\n\n file_map.close()\n\n time = np.array(time) * time_step\n\n if not memmap:\n data = np.array(data, dtype=complex)\n\n if last_steps is not None:\n data = data[-last_steps:, :, :]\n time = time[-last_steps:]\n\n\n return dyn.Dynamics(structure=structure,\n scaled_trajectory=data,\n time=time,\n supercell=super_cell,\n memmap=memmap)\n\n\nif __name__ == \"__main__\":\n read_VASP_XDATCAR('/home/abel/VASP/MgO/MgO-FINAL/MgO_0.5_1600/No1/XDATCAR')\n" ]
[ [ "numpy.dot", "numpy.min", "numpy.linalg.inv", "numpy.arccos", "numpy.linalg.norm", "numpy.max", "numpy.argsort", "numpy.array" ] ]
littlePrince126/HRNet_Pose_Estimation_TensorFlow2
[ "7c7ebbb7eb642dbdecddb0adebfbce01c027984b" ]
[ "utils/tools.py" ]
[ "from configuration.coco_config.w32_256x192_config import CocoW32Size256x192\nimport tensorflow as tf\n\n\ndef get_config_params(config_name):\n if config_name == \"coco_w32_256x192\":\n config_params = CocoW32Size256x192()\n return config_params\n else:\n raise ValueError(\"Invalid config_name.\")\n\n\ndef read_image(image_dir, cfg):\n image_content = tf.io.read_file(filename=image_dir)\n # The 'image' has been normalized.\n image = tf.io.decode_image(contents=image_content, channels=cfg.CHANNELS, dtype=tf.dtypes.float32)\n return image\n\n\ndef random_crop_and_resize_image(image_tensor, bbox, resize_h, resize_w):\n if resize_h != resize_w:\n raise ValueError(\"The values of resize_h and resize_w should be equal.\")\n human_instance = tf.image.crop_to_bounding_box(image=image_tensor,\n offset_height=bbox[1],\n offset_width=bbox[0],\n target_height=bbox[3],\n target_width=bbox[2])\n left_top_of_human_instance = bbox[0:2]\n crop_rect, cropped_image = random_crop_in_roi(image=image_tensor, roi=human_instance, left_top_of_roi=left_top_of_human_instance)\n resize_ratio = resize_h / crop_rect.shape[-1]\n resized_image = tf.image.resize(images=cropped_image, size=[resize_h, resize_w])\n return resized_image, resize_ratio, crop_rect\n\n\ndef random_crop_in_roi(image, roi, left_top_of_roi):\n roi_h = roi.shape[0]\n roi_w = roi.shape[1]\n if roi_h > roi_w:\n longer_border = roi_h\n shorter_border = roi_w\n else:\n longer_border = roi_w\n shorter_border = roi_h\n random_coord = tf.random.uniform(shape=(), minval=0, maxval=longer_border - shorter_border)\n if longer_border == roi_h:\n x_random_crop = left_top_of_roi[0]\n y_random_crop = int(left_top_of_roi[1] + random_coord)\n else:\n x_random_crop = int(left_top_of_roi[0] + random_coord)\n y_random_crop = left_top_of_roi[1]\n crop_rect = tf.convert_to_tensor(value=[x_random_crop, y_random_crop, shorter_border, shorter_border], dtype=tf.dtypes.int32)\n cropped_image = tf.image.crop_to_bounding_box(image=image,\n offset_height=y_random_crop,\n offset_width=x_random_crop,\n target_height=shorter_border,\n target_width=shorter_border)\n return crop_rect, cropped_image\n\n\ndef point_in_rect(point_x, point_y, rect):\n # rect : (x, y, w, h)\n xmin = rect[0]\n ymin = rect[1]\n xmax = xmin + rect[2]\n ymax = ymin + rect[3]\n if xmin <= point_x <= xmax and ymin <= point_y <= ymax:\n is_point_in_rect = True\n else:\n is_point_in_rect = False\n return is_point_in_rect" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.image.crop_to_bounding_box", "tensorflow.random.uniform", "tensorflow.io.decode_image", "tensorflow.image.resize", "tensorflow.io.read_file" ] ]
khaledsabry97/Argus
[ "c794f6e46ec529a836db127dfdb33b3161cf79ee" ]
[ "Car_Detection_TF/yolo3/model.py" ]
[ "\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom Car_Detection_TF.yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3,3)),\n DarknetConv2D_BN_Leaky(256, (1,1)))(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])\n\n return Model(inputs, [y1,y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh)==0: continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]\n y_true[l][b, j, i, k, 4] = 1\n y_true[l][b, j, i, k, 5+c] = 1\n\n return y_true\n\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')\n return loss\n" ]
[ [ "tensorflow.boolean_mask", "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "tensorflow.image.non_max_suppression", "numpy.argmax", "numpy.floor", "numpy.array" ] ]
ckkelvinchan/mmgeneration
[ "198e7e2112efd38c32c5b2837a46b9ff6badabb2" ]
[ "mmgen/core/evaluation/metrics.py" ]
[ "import os\nimport pickle\nfrom abc import ABC, abstractmethod\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom mmcv.runner import get_dist_info\nfrom scipy import linalg, signal\nfrom scipy.stats import entropy\nfrom torchvision import models\nfrom torchvision.models.inception import inception_v3\n\nfrom mmgen.models.architectures import InceptionV3\nfrom mmgen.models.architectures.common import get_module_device\nfrom mmgen.models.architectures.lpips import PerceptualLoss\nfrom ..registry import METRICS\nfrom .metric_utils import (_f_special_gauss, _hox_downsample,\n compute_pr_distances, finalize_descriptors,\n get_descriptors_for_minibatch, get_gaussian_kernel,\n laplacian_pyramid, slerp)\n\n\ndef _ssim_for_multi_scale(img1,\n img2,\n max_val=255,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03):\n \"\"\"Calculate SSIM (structural similarity) and contrast sensitivity.\n\n Ref:\n Image quality assessment: From error visibility to structural similarity.\n\n The results are the same as that of the official released MATLAB code in\n https://ece.uwaterloo.ca/~z70wang/research/ssim/.\n\n For three-channel images, SSIM is calculated for each channel and then\n averaged.\n\n This function attempts to match the functionality of ssim_index_new.m by\n Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip\n\n Args:\n img1 (ndarray): Images with range [0, 255] and order \"NHWC\".\n img2 (ndarray): Images with range [0, 255] and order \"NHWC\".\n max_val (int): the dynamic range of the images (i.e., the difference\n between the maximum the and minimum allowed values).\n Default to 255.\n filter_size (int): Size of blur kernel to use (will be reduced for\n small images). Default to 11.\n filter_sigma (float): Standard deviation for Gaussian blur kernel (will\n be reduced for small images). Default to 1.5.\n k1 (float): Constant used to maintain stability in the SSIM calculation\n (0.01 in the original paper). Default to 0.01.\n k2 (float): Constant used to maintain stability in the SSIM calculation\n (0.03 in the original paper). Default to 0.03.\n\n Returns:\n tuple: Pair containing the mean SSIM and contrast sensitivity between\n `img1` and `img2`.\n \"\"\"\n if img1.shape != img2.shape:\n raise RuntimeError(\n 'Input images must have the same shape (%s vs. %s).' %\n (img1.shape, img2.shape))\n if img1.ndim != 4:\n raise RuntimeError('Input images must have four dimensions, not %d' %\n img1.ndim)\n\n img1 = img1.astype(np.float32)\n img2 = img2.astype(np.float32)\n _, height, width, _ = img1.shape\n\n # Filter size can't be larger than height or width of images.\n size = min(filter_size, height, width)\n\n # Scale down sigma if a smaller filter size is used.\n sigma = size * filter_sigma / filter_size if filter_size else 0\n\n if filter_size:\n window = np.reshape(_f_special_gauss(size, sigma), (1, size, size, 1))\n mu1 = signal.fftconvolve(img1, window, mode='valid')\n mu2 = signal.fftconvolve(img2, window, mode='valid')\n sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')\n sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')\n sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')\n else:\n # Empty blur kernel so no need to convolve.\n mu1, mu2 = img1, img2\n sigma11 = img1 * img1\n sigma22 = img2 * img2\n sigma12 = img1 * img2\n\n mu11 = mu1 * mu1\n mu22 = mu2 * mu2\n mu12 = mu1 * mu2\n sigma11 -= mu11\n sigma22 -= mu22\n sigma12 -= mu12\n\n # Calculate intermediate values used by both ssim and cs_map.\n c1 = (k1 * max_val)**2\n c2 = (k2 * max_val)**2\n v1 = 2.0 * sigma12 + c2\n v2 = sigma11 + sigma22 + c2\n ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)),\n axis=(1, 2, 3)) # Return for each image individually.\n cs = np.mean(v1 / v2, axis=(1, 2, 3))\n return ssim, cs\n\n\ndef ms_ssim(img1,\n img2,\n max_val=255,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03,\n weights=None):\n \"\"\"Calculate MS-SSIM (multi-scale structural similarity).\n\n Ref:\n This function implements Multi-Scale Structural Similarity (MS-SSIM) Image\n Quality Assessment according to Zhou Wang's paper, \"Multi-scale structural\n similarity for image quality assessment\" (2003).\n Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf\n\n Author's MATLAB implementation:\n http://www.cns.nyu.edu/~lcv/ssim/msssim.zip\n\n PGGAN's implementation:\n https://github.com/tkarras/progressive_growing_of_gans/blob/master/metrics/ms_ssim.py\n\n Args:\n img1 (ndarray): Images with range [0, 255] and order \"NHWC\".\n img2 (ndarray): Images with range [0, 255] and order \"NHWC\".\n max_val (int): the dynamic range of the images (i.e., the difference\n between the maximum the and minimum allowed values).\n Default to 255.\n filter_size (int): Size of blur kernel to use (will be reduced for\n small images). Default to 11.\n filter_sigma (float): Standard deviation for Gaussian blur kernel (will\n be reduced for small images). Default to 1.5.\n k1 (float): Constant used to maintain stability in the SSIM calculation\n (0.01 in the original paper). Default to 0.01.\n k2 (float): Constant used to maintain stability in the SSIM calculation\n (0.03 in the original paper). Default to 0.03.\n weights (list): List of weights for each level; if none, use five\n levels and the weights from the original paper. Default to None.\n\n Returns:\n float: MS-SSIM score between `img1` and `img2`.\n \"\"\"\n if img1.shape != img2.shape:\n raise RuntimeError(\n 'Input images must have the same shape (%s vs. %s).' %\n (img1.shape, img2.shape))\n if img1.ndim != 4:\n raise RuntimeError('Input images must have four dimensions, not %d' %\n img1.ndim)\n\n # Note: default weights don't sum to 1.0 but do match the paper / matlab\n # code.\n weights = np.array(\n weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])\n levels = weights.size\n im1, im2 = [x.astype(np.float32) for x in [img1, img2]]\n mssim = []\n mcs = []\n for _ in range(levels):\n ssim, cs = _ssim_for_multi_scale(\n im1,\n im2,\n max_val=max_val,\n filter_size=filter_size,\n filter_sigma=filter_sigma,\n k1=k1,\n k2=k2)\n mssim.append(ssim)\n mcs.append(cs)\n im1, im2 = [_hox_downsample(x) for x in [im1, im2]]\n\n # Clip to zero. Otherwise we get NaNs.\n mssim = np.clip(np.asarray(mssim), 0.0, np.inf)\n mcs = np.clip(np.asarray(mcs), 0.0, np.inf)\n\n # Average over images only at the end.\n return np.mean(\n np.prod(mcs[:-1, :]**weights[:-1, np.newaxis], axis=0) *\n (mssim[-1, :]**weights[-1]))\n\n\ndef sliced_wasserstein(distribution_a,\n distribution_b,\n dir_repeats=4,\n dirs_per_repeat=128):\n r\"\"\"sliced Wasserstein distance of two sets of patches.\n\n Ref: https://github.com/tkarras/progressive_growing_of_gans/blob/master/metrics/ms_ssim.py # noqa\n\n Args:\n distribution_a (Tensor): Descriptors of first distribution.\n distribution_b (Tensor): Descriptors of second distribution.\n dir_repeats (int): The number of projection times. Default to 4.\n dirs_per_repeat (int): The number of directions per projection.\n Default to 128.\n\n Returns:\n float: sliced Wasserstein distance.\n \"\"\"\n if torch.cuda.is_available():\n distribution_b = distribution_b.cuda()\n assert distribution_a.ndim == 2\n assert distribution_a.shape == distribution_b.shape\n assert dir_repeats > 0 and dirs_per_repeat > 0\n distribution_a = distribution_a.to(distribution_b.device)\n results = []\n for _ in range(dir_repeats):\n dirs = torch.randn(distribution_a.shape[1], dirs_per_repeat)\n dirs /= torch.sqrt(torch.sum((dirs**2), dim=0, keepdim=True))\n dirs = dirs.to(distribution_b.device)\n proj_a = torch.matmul(distribution_a, dirs)\n proj_b = torch.matmul(distribution_b, dirs)\n # To save cuda memory, we perform sort in cpu\n proj_a, _ = torch.sort(proj_a.cpu(), dim=0)\n proj_b, _ = torch.sort(proj_b.cpu(), dim=0)\n dists = torch.abs(proj_a - proj_b)\n results.append(torch.mean(dists).item())\n torch.cuda.empty_cache()\n return sum(results) / dir_repeats\n\n\nclass Metric(ABC):\n \"\"\"The abstract base class of metrics. Basically, we split calculation into\n three steps. First, we initialize the metric object and do some\n preparation. Second, we will feed the real and fake images into metric\n object batch by batch, and we calculate intermediate results of these\n batches. Finally, We use these intermediate results to summarize the final\n result. And the result as a string can be obtained by property\n 'result_str'.\n\n Args:\n num_images (int): The number of real/fake images needed to calculate\n metric.\n image_shape (tuple): Shape of the real/fake images with order \"CHW\".\n \"\"\"\n\n def __init__(self, num_images, image_shape=None):\n self.num_images = num_images\n self.image_shape = image_shape\n self.num_real_need = num_images\n self.num_fake_need = num_images\n self.num_real_feeded = 0 # record of the feeded real images\n self.num_fake_feeded = 0 # record of the feeded fake images\n self._result_str = None # string of metric result\n\n @property\n def result_str(self):\n \"\"\"Get results in string format.\n\n Returns:\n str: results in string format\n \"\"\"\n if not self._result_str:\n self.summary()\n return self._result_str\n\n return self._result_str\n\n def feed(self, batch, mode):\n \"\"\"Feed a image batch into metric calculator and perform intermediate\n operation in 'feed_op' function.\n\n Args:\n batch (Tensor): Images feeded into metric object with order \"NCHW\"\n and range [-1, 1].\n mode (str): Mark the batch as real or fake images. Value can be\n 'reals' or 'fakes',\n \"\"\"\n if mode == 'reals':\n if self.num_real_feeded == self.num_real_need:\n return 0\n\n batch_size = batch.shape[0]\n end = min(batch_size, self.num_real_need - self.num_real_feeded)\n self.feed_op(batch[:end, :, :, :], mode)\n self.num_real_feeded += end\n return end\n\n elif mode == 'fakes':\n if self.num_fake_feeded == self.num_fake_need:\n return 0\n\n batch_size = batch.shape[0]\n end = min(batch_size, self.num_fake_need - self.num_fake_feeded)\n self.feed_op(batch[:end, :, :, :], mode)\n self.num_fake_feeded += end\n return end\n else:\n raise ValueError(\n f\"The expected mode should be set to 'reals' or 'fakes,\\\n but got '{mode}'\")\n\n def check(self):\n \"\"\"Check the numbers of image.\"\"\"\n assert self.num_real_feeded == self.num_fake_feeded == self.num_images\n\n @abstractmethod\n def prepare(self, *args, **kwargs):\n \"\"\"please implement in subclass.\"\"\"\n\n @abstractmethod\n def feed_op(self, batch, mode):\n \"\"\"please implement in subclass.\"\"\"\n\n @abstractmethod\n def summary(self):\n \"\"\"please implement in subclass.\"\"\"\n\n\n@METRICS.register_module()\nclass FID(Metric):\n \"\"\"FID metric.\n\n In this metric, we calculate the distance between real distributions and\n fake distributions. The distributions are modeled by the real samples and\n fake samples, respectively.\n\n `Inception_v3` is adopted as the feature extractor, which is widely used in\n StyleGAN and BigGAN.\n\n Args:\n num_images (int): The number of images to be tested.\n image_shape (tuple[int], optional): Image shape. Defaults to None.\n inception_pkl (str, optional): Path to reference inception pickle file.\n If `None`, the statistical value of real distribution will be\n calculated at running time. Defaults to None.\n bgr2rgb (bool, optional): If True, reformat the BGR image to RGB\n format. Defaults to True.\n inception_args (dict, optional): Keyword args for inception net.\n Defaults to `dict(normalize_input=False)`.\n \"\"\"\n name = 'FID'\n\n def __init__(self,\n num_images,\n image_shape=None,\n inception_pkl=None,\n bgr2rgb=True,\n inception_args=dict(normalize_input=False)):\n super().__init__(num_images, image_shape=image_shape)\n self.inception_pkl = inception_pkl\n self.real_feats = []\n self.fake_feats = []\n self.real_mean = None\n self.real_cov = None\n self.bgr2rgb = bgr2rgb\n self.device = 'cpu'\n\n # define inception network as official StyleGAN\n if inception_args.get('type', None) == 'StyleGAN':\n self.inception_net = torch.jit.load(\n inception_args['inception_path'])\n self.inception_style = 'StyleGAN'\n else:\n self.inception_style = 'PyTorch'\n # define inception net with default PyTorch style\n self.inception_net = InceptionV3([3], **inception_args)\n if torch.cuda.is_available():\n self.inception_net = self.inception_net.cuda()\n self.device = 'cuda'\n self.inception_net.eval()\n\n mmcv.print_log(f'Adopt Inception in {self.inception_style} style',\n 'mmgen')\n\n def prepare(self):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n # if `inception_pkl` is provided, read mean and cov stat\n if self.inception_pkl is not None and mmcv.is_filepath(\n self.inception_pkl):\n with open(self.inception_pkl, 'rb') as f:\n reference = pickle.load(f)\n self.real_mean = reference['mean']\n self.real_cov = reference['cov']\n mmcv.print_log(\n f'Load reference inception pkl from {self.inception_pkl}',\n 'mmgen')\n self.num_real_feeded = self.num_images\n\n @torch.no_grad()\n def feed_op(self, batch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n if self.bgr2rgb:\n batch = batch[:, [2, 1, 0]]\n batch = batch.to(self.device)\n\n if self.inception_style == 'StyleGAN':\n batch = (batch * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n feat = self.inception_net(batch, return_features=True)\n else:\n feat = self.inception_net(batch)[0].view(batch.shape[0], -1)\n\n # gather all of images if using distributed training\n if dist.is_initialized():\n ws = dist.get_world_size()\n placeholder = [torch.zeros_like(feat) for _ in range(ws)]\n dist.all_gather(placeholder, feat)\n feat = torch.cat(placeholder, dim=0)\n\n # in distributed training, we only collect features at rank-0.\n if (dist.is_initialized()\n and dist.get_rank() == 0) or not dist.is_initialized():\n if mode == 'reals':\n self.real_feats.append(feat.cpu())\n elif mode == 'fakes':\n self.fake_feats.append(feat.cpu())\n else:\n raise ValueError(\n f\"The expected mode should be set to 'reals' or 'fakes,\\\n but got '{mode}'\")\n\n @staticmethod\n def _calc_fid(sample_mean, sample_cov, real_mean, real_cov, eps=1e-6):\n \"\"\"Refer to the implementation from:\n\n https://github.com/rosinality/stylegan2-pytorch/blob/master/fid.py#L34\n \"\"\"\n cov_sqrt, _ = linalg.sqrtm(sample_cov @ real_cov, disp=False)\n\n if not np.isfinite(cov_sqrt).all():\n print('product of cov matrices is singular')\n offset = np.eye(sample_cov.shape[0]) * eps\n cov_sqrt = linalg.sqrtm(\n (sample_cov + offset) @ (real_cov + offset))\n\n if np.iscomplexobj(cov_sqrt):\n if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):\n m = np.max(np.abs(cov_sqrt.imag))\n\n raise ValueError(f'Imaginary component {m}')\n\n cov_sqrt = cov_sqrt.real\n\n mean_diff = sample_mean - real_mean\n mean_norm = mean_diff @ mean_diff\n\n trace = np.trace(sample_cov) + np.trace(\n real_cov) - 2 * np.trace(cov_sqrt)\n\n fid = mean_norm + trace\n\n return fid, mean_norm, trace\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n # calculate reference inception stat\n if self.real_mean is None:\n feats = torch.cat(self.real_feats, dim=0)\n assert feats.shape[0] >= self.num_images\n feats = feats[:self.num_images]\n feats_np = feats.numpy()\n self.real_mean = np.mean(feats_np, 0)\n self.real_cov = np.cov(feats_np, rowvar=False)\n\n # calculate fake inception stat\n fake_feats = torch.cat(self.fake_feats, dim=0)\n assert fake_feats.shape[0] >= self.num_images\n fake_feats = fake_feats[:self.num_images]\n fake_feats_np = fake_feats.numpy()\n fake_mean = np.mean(fake_feats_np, 0)\n fake_cov = np.cov(fake_feats_np, rowvar=False)\n\n # calculate distance between real and fake statistics\n fid, mean, cov = self._calc_fid(fake_mean, fake_cov, self.real_mean,\n self.real_cov)\n\n # results for print/table\n self._result_str = (f'{fid:.4f} ({mean:.5f}/{cov:.5f})')\n # results for log_buffer\n self._result_dict = dict(fid=fid, fid_mean=mean, fid_cov=cov)\n\n return fid, mean, cov\n\n def clear_fake_data(self):\n \"\"\"Clear fake data.\"\"\"\n self.fake_feats = []\n self.num_fake_feeded = 0\n\n def clear(self, clear_reals=False):\n \"\"\"Clear data buffers.\n\n Args:\n clear_reals (bool, optional): Whether to clear real data.\n Defaults to False.\n \"\"\"\n self.clear_fake_data()\n if clear_reals:\n self.real_feats = []\n self.num_real_feeded = 0\n\n\n@METRICS.register_module()\nclass MS_SSIM(Metric):\n \"\"\"MS-SSIM (Multi-Scale Structure Similarity) metric.\n\n Ref: https://github.com/tkarras/progressive_growing_of_gans/blob/master/metrics/ms_ssim.py # noqa\n\n Args:\n num_images (int): The number of evaluated generated samples.\n image_shape (tuple, optional): Image shape in order \"CHW\". Defaults to\n None.\n \"\"\"\n name = 'MS-SSIM'\n\n def __init__(self, num_images, image_shape=None):\n super().__init__(num_images, image_shape)\n assert num_images % 2 == 0\n self.num_pairs = num_images // 2\n\n def prepare(self):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n self.sum = 0.0\n\n @torch.no_grad()\n def feed_op(self, minibatch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n if mode == 'reals':\n return\n minibatch = ((minibatch + 1) / 2)\n minibatch = minibatch.clamp_(0, 1)\n half1 = minibatch[0::2].cpu().data.numpy().transpose((0, 2, 3, 1))\n half1 = (half1 * 255).astype('uint8')\n half2 = minibatch[1::2].cpu().data.numpy().transpose((0, 2, 3, 1))\n half2 = (half2 * 255).astype('uint8')\n score = ms_ssim(half1, half2)\n self.sum += score * (minibatch.shape[0] // 2)\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n self.check()\n avg = self.sum / self.num_pairs\n self._result_str = str(round(avg.item(), 4))\n return avg\n\n\n@METRICS.register_module()\nclass SWD(Metric):\n \"\"\"SWD (Sliced Wasserstein distance) metric. We calculate the SWD of two\n sets of images in the following way. In every 'feed', we obtain the\n Laplacian pyramids of every images and extract patches from the Laplacian\n pyramids as descriptors. In 'summary', we normalize these descriptors along\n channel, and reshape them so that we can use these descriptors to represent\n the distribution of real/fake images. And we can calculate the sliced\n Wasserstein distance of the real and fake descriptors as the SWD of the\n real and fake images.\n\n Ref: https://github.com/tkarras/progressive_growing_of_gans/blob/master/metrics/sliced_wasserstein.py # noqa\n\n Args:\n num_images (int): The number of evaluated generated samples.\n image_shape (tuple): Image shape in order \"CHW\".\n \"\"\"\n name = 'SWD'\n\n def __init__(self, num_images, image_shape):\n super().__init__(num_images, image_shape)\n\n self.nhood_size = 7 # height and width of the extracted patches\n self.nhoods_per_image = 128 # number of extracted patches per image\n self.dir_repeats = 4 # times of sampling directions\n self.dirs_per_repeat = 128 # number of directions per sampling\n self.resolutions = []\n res = image_shape[1]\n while res >= 16 and len(self.resolutions) < 4:\n self.resolutions.append(res)\n res //= 2\n self.n_pyramids = len(self.resolutions)\n\n def prepare(self):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n self.real_descs = [[] for res in self.resolutions]\n self.fake_descs = [[] for res in self.resolutions]\n self.gaussian_k = get_gaussian_kernel()\n\n @torch.no_grad()\n def feed_op(self, minibatch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n assert minibatch.shape[1:] == self.image_shape\n if mode == 'reals':\n real_pyramid = laplacian_pyramid(minibatch, self.n_pyramids - 1,\n self.gaussian_k)\n for lod, level in enumerate(real_pyramid):\n desc = get_descriptors_for_minibatch(level, self.nhood_size,\n self.nhoods_per_image)\n self.real_descs[lod].append(desc)\n elif mode == 'fakes':\n fake_pyramid = laplacian_pyramid(minibatch, self.n_pyramids - 1,\n self.gaussian_k)\n for lod, level in enumerate(fake_pyramid):\n desc = get_descriptors_for_minibatch(level, self.nhood_size,\n self.nhoods_per_image)\n self.fake_descs[lod].append(desc)\n else:\n raise ValueError(f'{mode} is not a implemented feed mode.')\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n self.check()\n real_descs = [finalize_descriptors(d) for d in self.real_descs]\n fake_descs = [finalize_descriptors(d) for d in self.fake_descs]\n del self.real_descs\n del self.fake_descs\n distance = [\n sliced_wasserstein(dreal, dfake, self.dir_repeats,\n self.dirs_per_repeat)\n for dreal, dfake in zip(real_descs, fake_descs)\n ]\n del real_descs\n del fake_descs\n distance = [d * 1e3 for d in distance] # multiply by 10^3\n result = distance + [np.mean(distance)]\n self._result_str = ', '.join([str(round(d, 2)) for d in result])\n return result\n\n\n@METRICS.register_module()\nclass PR(Metric):\n r\"\"\"Improved Precision and recall metric.\n\n In this metric, we draw real and generated samples respectively, and\n embed them into a high-dimensional feature space using a pre-trained\n classifier network. We use these features to estimate the corresponding\n manifold. We obtain the estimation by calculating pairwise Euclidean\n distances between all feature vectors in the set and, for each feature\n vector, construct a hypersphere with radius equal to the distance to its\n kth nearest neighbor. Together, these hyperspheres define a volume in\n the feature space that serves as an estimate of the true manifold.\n Precision is quantified by querying for each generated image whether\n the image is within the estimated manifold of real images.\n Symmetrically, recall is calculated by querying for each real image\n whether the image is within estimated manifold of generated image.\n\n Ref: https://github.com/NVlabs/stylegan2-ada-pytorch/blob/main/metrics/precision_recall.py # noqa\n\n Note that we highly recommend that users should download the vgg16\n script module from the following address. Then, the `vgg16_script` can\n be set with user's local path. If not given, we will use the vgg16 from\n pytorch model zoo. However, this may bring significant different in the\n final results.\n\n Tero's vgg16: https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt\n\n Args:\n num_images (int): The number of evaluated generated samples.\n image_shape (tuple): Image shape in order \"CHW\". Defaults to None.\n num_real_need (int | None, optional): The number of real images.\n Defaults to None.\n full_dataset (bool, optional): Whether to use full dataset for\n evaluation. Defaults to False.\n k (int, optional): Kth nearest parameter. Defaults to 3.\n bgr2rgb (bool, optional): Whether to change the order of image\n channel. Defaults to True.\n vgg16_script (str, optional): Path for the Tero's vgg16 module.\n Defaults to 'work_dirs/cache/vgg16.pt'.\n row_batch_size (int, optional): The batch size of row data.\n Defaults to 10000.\n col_batch_size (int, optional): The batch size of col data.\n Defaults to 10000.\n \"\"\"\n name = 'PR'\n\n def __init__(self,\n num_images,\n image_shape=None,\n num_real_need=None,\n full_dataset=False,\n k=3,\n bgr2rgb=True,\n vgg16_script='work_dirs/cache/vgg16.pt',\n row_batch_size=10000,\n col_batch_size=10000):\n super().__init__(num_images, image_shape)\n mmcv.print_log('loading vgg16 for improved precision and recall...',\n 'mmgen')\n if os.path.isfile(vgg16_script):\n self.vgg16 = torch.jit.load('work_dirs/cache/vgg16.pt').eval()\n self.use_tero_scirpt = True\n else:\n mmcv.print_log(\n 'Cannot load Tero\\'s script module. Use official '\n 'vgg16 instead', 'mmgen')\n self.vgg16 = models.vgg16(pretrained=True).eval()\n self.use_tero_scirpt = False\n self.device = 'cpu'\n if torch.cuda.is_available():\n self.vgg16 = self.vgg16.cuda()\n self.device = 'cuda'\n self.k = k\n\n self.bgr2rgb = bgr2rgb\n self.full_dataset = full_dataset\n self.row_batch_size = row_batch_size\n self.col_batch_size = col_batch_size\n if num_real_need:\n self.num_real_need = num_real_need\n\n if self.full_dataset:\n self.num_real_need = 10000000\n\n def prepare(self, *args, **kwargs):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n self.features_of_reals = []\n self.features_of_fakes = []\n\n @torch.no_grad()\n def feed_op(self, batch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n batch = batch.to(self.device)\n if self.bgr2rgb:\n batch = batch[:, [2, 1, 0], ...]\n if self.use_tero_scirpt:\n batch = (batch * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n if mode == 'reals':\n self.features_of_reals.append(self.extract_features(batch))\n elif mode == 'fakes':\n self.features_of_fakes.append(self.extract_features(batch))\n else:\n raise ValueError(f'{mode} is not a implemented feed mode.')\n\n def check(self):\n if not self.full_dataset:\n assert (self.num_real_feeded == self.num_real_need\n and self.num_fake_feeded == self.num_fake_need)\n else:\n assert self.num_fake_feeded == self.num_fake_need\n mmcv.print_log(\n f'Test for the full dataset with {self.num_real_feeded}'\n ' real images', 'mmgen')\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n self.check()\n\n real_features = torch.cat(self.features_of_reals)\n gen_features = torch.cat(self.features_of_fakes)\n\n self._result_dict = {}\n rank, ws = get_dist_info()\n\n for name, manifold, probes in [\n ('precision', real_features, gen_features),\n ('recall', gen_features, real_features)\n ]:\n kth = []\n for manifold_batch in manifold.split(self.row_batch_size):\n distance = compute_pr_distances(\n row_features=manifold_batch,\n col_features=manifold,\n num_gpus=ws,\n rank=rank,\n col_batch_size=self.col_batch_size)\n kth.append(\n distance.to(torch.float32).kthvalue(self.k + 1).values.\n to(torch.float16) if rank == 0 else None)\n kth = torch.cat(kth) if rank == 0 else None\n pred = []\n for probes_batch in probes.split(self.row_batch_size):\n distance = compute_pr_distances(\n row_features=probes_batch,\n col_features=manifold,\n num_gpus=ws,\n rank=rank,\n col_batch_size=self.col_batch_size)\n pred.append((distance <= kth).any(\n dim=1) if rank == 0 else None)\n self._result_dict[name] = float(\n torch.cat(pred).to(torch.float32).mean() if rank ==\n 0 else 'nan')\n\n precision = self._result_dict['precision']\n recall = self._result_dict['recall']\n self._result_str = f'precision: {precision}, recall:{recall}'\n return self._result_dict\n\n def extract_features(self, images):\n if self.use_tero_scirpt:\n feature = self.vgg16(images, return_features=True)\n else:\n batch = F.interpolate(images, size=(224, 224))\n before_fc = self.vgg16.features(batch)\n before_fc = before_fc.view(-1, 7 * 7 * 512)\n feature = self.vgg16.classifier[:4](before_fc)\n\n return feature\n\n\n@METRICS.register_module()\nclass IS(Metric):\n \"\"\"IS (Inception Score) metric.\n\n The images are splitted into groups, and the inception score is calculated\n on each group of images, then the mean and standard deviation of the score\n is reported. The calculation of the inception score on a group of images\n involves first using the inception v3 model to calculate the conditional\n probability for each image (p(y|x)). The marginal probability is then\n calculated as the average of the conditional probabilities for the images\n in the group (p(y)). The KL divergence is then calculated for each image as\n the conditional probability multiplied by the log of the conditional\n probability minus the log of the marginal probability. The KL divergence is\n then summed over all images and averaged over all classes and the exponent\n of the result is calculated to give the final score.\n\n Ref: https://github.com/sbarratt/inception-score-pytorch/blob/master/inception_score.py # noqa\n\n Args:\n num_images (int): The number of evaluated generated samples.\n image_shape (tuple, optional): Image shape in order \"CHW\". Defaults to\n None.\n resize (bool, optional): Whether resize image to 299x299. Defaults to\n True.\n splits (int, optional): The number of groups. Defaults to 10.\n \"\"\"\n name = 'IS'\n\n def __init__(self,\n num_images,\n image_shape=None,\n bgr2rgb=False,\n resize=True,\n splits=10):\n super().__init__(num_images, image_shape)\n self.num_real_feeded = self.num_images\n self.resize = resize\n self.splits = splits\n self.bgr2rgb = bgr2rgb\n self.inception_model = inception_v3(\n pretrained=True, transform_input=False)\n self.device = 'cpu'\n if torch.cuda.is_available():\n self.inception_model = self.inception_model.cuda()\n self.device = 'cuda'\n self.inception_model.eval()\n\n def get_pred(self, x):\n \"\"\"Get prediction from inception model.\n\n Args:\n x (Tensor): Input tensor.\n\n Returns:\n np.array: Inception score.\n \"\"\"\n if self.resize:\n x = F.interpolate(x, size=(299, 299), mode='bilinear')\n x = self.inception_model(x)\n return F.softmax(x).data.cpu().numpy()\n\n def prepare(self):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n self.preds = []\n\n @torch.no_grad()\n def feed_op(self, batch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n batch = batch.to(self.device)\n if self.bgr2rgb:\n batch = batch[:, [2, 1, 0], ...]\n if mode == 'reals':\n pass\n elif mode == 'fakes':\n self.preds.append(self.get_pred(batch))\n else:\n raise ValueError(f'{mode} is not a implemented feed mode.')\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n self.check()\n split_scores = []\n self.preds = np.concatenate(self.preds, axis=0)\n for k in range(self.splits):\n part = self.preds[k * (self.num_images // self.splits):(k + 1) *\n (self.num_images // self.splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\n mean, std = np.mean(split_scores), np.std(split_scores)\n self._result_str = f'mean: {mean:.3f}, std: {std:.3f}'\n return mean, std\n\n\n@METRICS.register_module()\nclass PPL(Metric):\n r\"\"\"Perceptual path length.\n\n Measure the difference between consecutive images (their VGG16\n embeddings) when interpolating between two random inputs. Drastic\n changes mean that multiple features have changed together and that\n they might be entangled.\n\n Ref: https://github.com/rosinality/stylegan2-pytorch/blob/master/ppl.py # noqa\n\n Args:\n num_images (int): The number of evaluated generated samples.\n image_shape (tuple, optional): Image shape in order \"CHW\". Defaults\n to None.\n crop (bool, optional): Whether crop images. Defaults to True.\n epsilon (float, optional): Epsilon parameter for path sampling.\n Defaults to 1e-4.\n space (str, optional): Latent space. Defaults to 'W'.\n sampling (str, optional): Sampling mode, whether sampling in full\n path or endpoints. Defaults to 'end'.\n latent_dim (int, optional): Latent dimension of input noise.\n Defaults to 512.\n \"\"\"\n name = 'PPL'\n\n def __init__(self,\n num_images,\n image_shape=None,\n crop=True,\n epsilon=1e-4,\n space='W',\n sampling='end',\n latent_dim=512):\n super().__init__(num_images, image_shape=image_shape)\n self.crop = crop\n self.epsilon = epsilon\n self.space = space\n self.sampling = sampling\n self.latent_dim = latent_dim\n self.num_images = num_images * 2\n self.num_real_feeded = self.num_images\n\n def prepare(self):\n \"\"\"Prepare for evaluating models with this metric.\"\"\"\n self.dist_list = []\n\n @torch.no_grad()\n def feed_op(self, minibatch, mode):\n \"\"\"Feed data to the metric.\n\n Args:\n batch (Tensor): Input tensor.\n mode (str): The mode of current data batch. 'reals' or 'fakes'.\n \"\"\"\n if mode == 'reals':\n return\n # use minibatch's device type to initialize a lpips calculator\n if not hasattr(self, 'percept'):\n self.percept = PerceptualLoss(\n use_gpu=minibatch.device.type.startswith('cuda'))\n # crop and resize images\n if self.crop:\n c = minibatch.shape[2] // 8\n minibatch = minibatch[:, :, c * 3:c * 7, c * 2:c * 6]\n\n factor = minibatch.shape[2] // 256\n if factor > 1:\n minibatch = F.interpolate(\n minibatch,\n size=(256, 256),\n mode='bilinear',\n align_corners=False)\n # calculator and store lpips score\n distance = self.percept(minibatch[::2], minibatch[1::2]).view(\n minibatch.shape[0] // 2) / (\n self.epsilon**2)\n self.dist_list.append(distance.to('cpu').numpy())\n\n @torch.no_grad()\n def summary(self):\n \"\"\"Summarize the results.\n\n Returns:\n dict | list: Summarized results.\n \"\"\"\n distances = np.concatenate(self.dist_list, 0)\n lo = np.percentile(distances, 1, interpolation='lower')\n hi = np.percentile(distances, 99, interpolation='higher')\n filtered_dist = np.extract(\n np.logical_and(lo <= distances, distances <= hi), distances)\n ppl_score = filtered_dist.mean()\n self._result_str = f'{ppl_score:.1f}'\n return ppl_score\n\n def get_sampler(self, model, batch_size, sample_model):\n if sample_model == 'ema':\n generator = model.generator_ema\n else:\n generator = model.generator\n ppl_sampler = PPLSampler(generator, self.num_images, batch_size,\n self.space, self.sampling, self.epsilon,\n self.latent_dim)\n return ppl_sampler\n\n\nclass PPLSampler:\n \"\"\"StyleGAN series generator's sampling iterator for PPL metric.\n\n Args:\n generator (nn.Module): StyleGAN series' generator.\n num_images (int): The number of evaluated generated samples.\n batch_size (int): Batch size of generated images.\n space (str, optional): Latent space. Defaults to 'W'.\n sampling (str, optional): Sampling mode, whether sampling in full\n path or endpoints. Defaults to 'end'.\n epsilon (float, optional): Epsilon parameter for path sampling.\n Defaults to 1e-4.\n latent_dim (int, optional): Latent dimension of input noise.\n Defaults to 512.\n \"\"\"\n\n def __init__(self,\n generator,\n num_images,\n batch_size,\n space='W',\n sampling='end',\n epsilon=1e-4,\n latent_dim=512):\n assert space in ['Z', 'W']\n assert sampling in ['full', 'end']\n n_batch = num_images // batch_size\n\n resid = num_images - (n_batch * batch_size)\n self.batch_sizes = [batch_size] * n_batch + ([resid]\n if resid > 0 else [])\n self.device = get_module_device(generator)\n self.generator = generator\n self.latent_dim = latent_dim\n self.space = space\n self.sampling = sampling\n self.epsilon = epsilon\n\n def __iter__(self):\n self.idx = 0\n return self\n\n @torch.no_grad()\n def __next__(self):\n if self.idx >= len(self.batch_sizes):\n raise StopIteration\n batch = self.batch_sizes[self.idx]\n injected_noise = self.generator.make_injected_noise()\n inputs = torch.randn([batch * 2, self.latent_dim], device=self.device)\n if self.sampling == 'full':\n lerp_t = torch.rand(batch, device=self.device)\n else:\n lerp_t = torch.zeros(batch, device=self.device)\n\n if self.space == 'W':\n assert hasattr(self.generator, 'style_mapping')\n latent = self.generator.style_mapping(inputs)\n latent_t0, latent_t1 = latent[::2], latent[1::2]\n latent_e0 = torch.lerp(latent_t0, latent_t1, lerp_t[:, None])\n latent_e1 = torch.lerp(latent_t0, latent_t1,\n lerp_t[:, None] + self.epsilon)\n latent_e = torch.stack([latent_e0, latent_e1],\n 1).view(*latent.shape)\n image = self.generator([latent_e],\n input_is_latent=True,\n injected_noise=injected_noise)\n else:\n latent_t0, latent_t1 = inputs[::2], inputs[1::2]\n latent_e0 = slerp(latent_t0, latent_t1, lerp_t[:, None])\n latent_e1 = slerp(latent_t0, latent_t1,\n lerp_t[:, None] + self.epsilon)\n latent_e = torch.stack([latent_e0, latent_e1],\n 1).view(*inputs.shape)\n image = self.generator([latent_e],\n input_is_latent=False,\n injected_noise=injected_noise)\n\n self.idx += 1\n return image\n" ]
[ [ "torch.abs", "torch.jit.load", "torch.mean", "torch.nn.functional.softmax", "torch.cat", "numpy.asarray", "torch.zeros", "torch.sum", "numpy.concatenate", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "numpy.iscomplexobj", "torch.nn.functional.interpolate", "torch.distributed.get_rank", "numpy.trace", "torch.randn", "numpy.eye", "numpy.std", "torch.rand", "torch.lerp", "scipy.signal.fftconvolve", "torch.distributed.is_initialized", "torch.cuda.empty_cache", "torch.zeros_like", "numpy.cov", "torch.stack", "torch.distributed.get_world_size", "numpy.array", "numpy.logical_and", "numpy.diagonal", "numpy.abs", "numpy.isfinite", "torch.distributed.all_gather", "numpy.percentile", "torch.matmul", "scipy.stats.entropy", "numpy.prod", "scipy.linalg.sqrtm" ] ]
bonlime/pytorch-tools
[ "46fceaba823a613171efdb91436a8357b492af5e" ]
[ "pytorch_tools/optim/adamp.py" ]
[ "\"\"\"\nAdamP\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n\n+ eps inside sqrt\n+ init sq with ones instead of zeros\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.optimizer import Optimizer, required\nimport math\n\n\nclass AdamP(Optimizer):\n def __init__(\n self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-10, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False\n ):\n defaults = dict(\n lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov\n )\n super(AdamP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n return F.cosine_similarity(x, y, dim=1, eps=eps).abs_()\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n grad = p.grad.data\n beta1, beta2 = group[\"betas\"]\n nesterov = group[\"nesterov\"]\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # state['exp_avg_sq'] = torch.zeros_like(p.data) # original\n state[\"exp_avg_sq\"] = torch.ones_like(p.data) # much better init\n\n # Adam\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n\n state[\"step\"] += 1\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) # original\n denom = exp_avg_sq.add_(group[\"eps\"]).sqrt() / math.sqrt(bias_correction2) # eps inside sqrt\n step_size = group[\"lr\"] / bias_correction1\n\n if nesterov:\n perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom\n else:\n perturb = exp_avg / denom\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n perturb, wd_ratio = self._projection(\n p, grad, perturb, group[\"delta\"], group[\"wd_ratio\"], group[\"eps\"]\n )\n\n # Weight decay\n if group[\"weight_decay\"] > 0:\n p.data.mul_(1 - group[\"lr\"] * group[\"weight_decay\"] * wd_ratio)\n\n # Step\n p.data.add_(perturb, alpha=-step_size)\n\n return loss\n" ]
[ [ "torch.zeros_like", "torch.nn.functional.cosine_similarity", "torch.ones_like" ] ]
belac626/AeroPy
[ "4f045306427e08b742237b7393ce9602f1072d60", "4f045306427e08b742237b7393ce9602f1072d60" ]
[ "aeropy/geometry/test_inflections.py", "aeropy/geometry/other_methods.py" ]
[ "from __future__ import print_function\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport aeropy.xfoil_module as xf\r\nfrom aeropy.CST.module_2D import *\r\nfrom aeropy.aero_module import Reynolds\r\nfrom aeropy.airfoil_module import CST, create_x\r\n\r\nAu = [0.23993240191629417, 0.34468227138908186, 0.18125405377549103, \r\n 0.35371349126072665, 0.2440815012119143, 0.25724974995738387]\r\nAl = [0.18889012559339036, -0.24686758992053115, 0.077569769493868401,\r\n -0.547827192265256, -0.0047342206759065641, -0.23994805474814629]\r\nc_avian = .36 #m\r\ndeltaz = 0.0093943568219451313*c_avian\r\n\r\nairfoil = 'avian'\r\nx = create_x(c_avian, distribution = 'linear')\r\ny = CST(x, c_avian, [deltaz/2., deltaz/2.], Au = Au, Al= Al)\r\n# Create file for Xfoil to read coordinates\r\nxf.create_input(x, y['u'], y['l'], airfoil, different_x_upper_lower = False)\r\n\r\nData = xf.find_coefficients(airfoil, 2., Reynolds=Reynolds(10000, 30, c_avian), iteration=100, NACA=False)\r\nprint(Data)\r\n\r\n\r\npsi_u_inflection, psi_l_inflection = find_inflection_points(Au, Al)\r\nprint('upper: ', psi_u_inflection)\r\nprint('lower: ', psi_l_inflection)\r\npsi = np.linspace(0.001,0.999,100)\r\nxi = CST(psi, 1, [deltaz/2., deltaz/2.], Au, Al)\r\nplt.plot(psi, xi['u'], 'b', label = 'Upper outer mold line')\r\nplt.plot(psi, xi['l'],'b--', label = 'Lower outer mold line')\r\n\r\nxi_u_inflection = CST(psi_u_inflection, 1, [deltaz/2., deltaz/2.], Au, Al)\r\nplt.scatter(psi_u_inflection, xi_u_inflection['u'])\r\n\r\nxi_l_inflection = CST(psi_l_inflection, 1, [deltaz/2., deltaz/2.], Au, Al)\r\nplt.scatter(psi_l_inflection, xi_l_inflection['l'])\r\nplt.xlabel('$\\psi$', fontsize = 40)\r\nplt.ylabel(r'$\\xi$', fontsize = 40)\r\nplt.grid()\r\n# plt.show()\r\n\r\n# plt.figure()\r\n# plt.plot(psi,dxi_u(psi,Au, deltaz), 'g', label = r'$d\\xi_u$')\r\n# plt.plot(psi,dxi_l(psi,Al,deltaz), 'g--', label = r'$d\\xi_l$')\r\n\r\nplt.plot(psi,ddxi_u(psi,Au), 'r', label = 'Upper second derivative')\r\nplt.plot(psi,ddxi_l(psi,Al), 'r--', label = 'Lower second derivative')\r\n\r\n# Plot camber\r\ncamber = calculate_camber(psi, Au, Al, deltaz/c_avian)\r\nplt.plot(psi,camber, 'k', label = 'camber')\r\npsi_camber, xi_camber = calculate_max_camber(Au, Al, deltaz/c_avian)\r\nprint(psi_camber, xi_camber, type(psi_camber), type(xi_camber))\r\nprint('average camber: ', calculate_average_camber( Au, Al, deltaz/c_avian))\r\nplt.scatter(psi_camber, xi_camber, label = 'Inflection points')\r\nplt.ylim([-0.1,0.1])\r\nplt.legend(loc='best')\r\nplt.show()\r\n", "import math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef hicks_henne(x, z, alfa):\r\n z = np.array(z)\r\n for j in range(len(x)):\r\n x_j = x[j]\r\n f_1 = math.sqrt(x_j)*(1-x_j)/math.exp(15*x_j)\r\n f_2 = math.sqrt(x_j)*(1-x_j)/math.exp(15*x_j)\r\n f_3 = math.sqrt(x_j)*(1-x_j)/math.exp(15*x_j)\r\n f_4 = math.sqrt(x_j)*(1-x_j)/math.exp(15*x_j)\r\n f_5 = math.sqrt(x_j)*(1-x_j)/math.exp(10*x_j)\r\n for i in range(len(alfa)):\r\n b_i = math.sin(math.pi*x[j]**(math.log(0.5)/math.log(x_list[i])))**t[i]\r\n z[j] += alfa[i]*b_i\r\n return z\r\n\r\ndef B(x, k, i, t):\r\n if k == 0:\r\n return 1.0 if t[i] <= x < t[i+1] else 0.0\r\n if t[i+k] == t[i]:\r\n c1 = 0.0\r\n else:\r\n c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)\r\n if t[i+k+1] == t[i+1]:\r\n c2 = 0.0\r\n else:\r\n c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)\r\n return c1 + c2\r\n\r\ndef bspline(x, t, c, k):\r\n n = len(t) - k - 1\r\n assert (n >= k+1) and (len(c) >= n)\r\n return sum(c[i] * B(x, k, i, t) for i in range(n)) \r\n\r\n# Bezier Curve\r\nk = 2\r\nt = [0, 1, 2, 3, 4, 5, 6]\r\nc = [-1, 2, 0, -1]\r\nx = np.linspace(1.5, 4.5, 50)\r\ny_bezier = []\r\nfor x_i in x:\r\n y_bezier.append(bspline(x_i, t, c ,k))\r\n \r\n# Hicks-Henne\r\ny_hh = hicks_henne(x, y_bezier, [.1], [.0000001],[4.]) \r\nplt.plot(x,y_bezier,label='Bezier')\r\nplt.plot(x,y_hh,label='Hickes-Henne')\r\nplt.xlabel('x')\r\nplt.ylabel('z')\r\nplt.legend()\r\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
leandro-santiago/bloomwisard
[ "4c02610c4ef2d2cf8424797c8a815da182ca2383" ]
[ "experiment/diabetes/overall_info.py" ]
[ "import numpy as np\nimport math\nimport sys\nfrom timeit import default_timer as timer\nsys.path.append(\"../../\") \nfrom core import wnn\nfrom encoding import thermometer\nfrom encoding import util\n\n#Load Diabetes data\nbase_path = \"../../dataset/diabetes/\"\n\n#2/3 Test\nbits_encoding = 20\ntrain_data, train_label, test_data, test_label, data_min, data_max = util.load_3data(base_path)\n\nths = []\n\nfor i in range(len(data_max)):\n ths.append(thermometer.Thermometer(data_min[i], data_max[i], bits_encoding))\n\ntrain_bin = []\ntest_bin = []\n\ni = 0\nfor data in train_data:\n train_bin.append(np.array([], dtype=bool))\n t = 0\n for v in data:\n binarr = ths[t].binarize(v)\n train_bin[i] = np.append(train_bin[i], binarr) \n t += 1\n i += 1\n\n\ni = 0\nfor data in test_data:\n test_bin.append(np.array([], dtype=bool))\n t = 0\n for v in data:\n binarr = ths[t].binarize(v)\n test_bin[i] = np.append(test_bin[i], binarr) \n t += 1\n i += 1\n\n#K-fold\nfolds_train_bin = []\nfolds_test_bin = []\nfolds_train_label = []\nfolds_test_label = []\nfolds_thermomethers = []\nk = 10\n\nfor i in range(k):\n aux_train_data, aux_train_label, aux_test_data, aux_test_label, data_min, data_max = util.load_fold(base_path, i)\n folds_train_label.append(aux_train_label)\n folds_test_label.append(aux_test_label)\n \n #print data_min, data_max\n folds_thermomethers.append([])\n\n for t in range(len(data_max)):\n folds_thermomethers[i].append(thermometer.Thermometer(data_min[t], data_max[t], bits_encoding))\n\n folds_train_bin.append([])\n folds_test_bin.append([])\n\n j = 0\n for data in aux_train_data:\n folds_train_bin[i].append([])\n folds_train_bin[i][j].append(np.array([], dtype=bool))\n t = 0\n for v in data:\n binarr = folds_thermomethers[i][t].binarize(v)\n folds_train_bin[i][j] = np.append(folds_train_bin[i][j], binarr) \n t += 1\n j += 1\n\n j = 0\n for data in aux_test_data:\n folds_test_bin[i].append([])\n folds_test_bin[i][j].append(np.array([], dtype=bool))\n t = 0\n for v in data:\n binarr = folds_thermomethers[i][t].binarize(v)\n folds_test_bin[i][j] = np.append(folds_test_bin[i][j], binarr) \n t += 1\n j += 1\n\n\n#Parameters\nnum_classes = 2\ntuple_bit = 20\ntest_length = len(test_label)\nnum_runs = 20\n\nacc_list = []\ntraining_time = []\ntesting_time = []\n\ndacc_list = []\ndtraining_time = []\ndtesting_time = []\n\nbacc_list = []\nbtraining_time = []\nbtesting_time = []\nentry_size = len(train_bin[0])\n\n#Wisard\nfor r in range(num_runs):\n wisard = wnn.Wisard(entry_size, tuple_bit, num_classes)\n\n #Training\n start = timer()\n wisard.train(train_bin, train_label)\n training_time.append(timer() - start)\n\n #Testing\n start = timer()\n rank_result = wisard.rank(test_bin) \n testing_time.append(timer() - start)\n\n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n if rank_result[i] == test_label[i]:\n num_hits += 1\n\n acc_list.append(float(num_hits)/float(test_length))\n\nwisard_stats = wisard.stats()\ndel wisard\n\n#DictWisard\nfor r in range(num_runs):\n dwisard = wnn.DictWisard(entry_size, tuple_bit, num_classes)\n\n #Training\n start = timer()\n dwisard.train(train_bin, train_label)\n dtraining_time.append(timer() - start)\n \n #Testing\n start = timer()\n rank_result = dwisard.rank(test_bin) \n dtesting_time.append(timer() - start)\n\n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n #if rank_result[i] == test_label[i]:\n if not (rank_result[i] ^ test_label[i]):\n num_hits += 1\n\n dacc_list.append(float(num_hits)/float(test_length))\ndwisard_stats = dwisard.stats()\ndel dwisard\n\n#Bloom Wisard\n#capacity = len(train_label)\ncapacity = 50\nerror = 0.1\nerrors = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\nb_stats = []\nb_training_time = [] \nb_testing_time = [] \nb_acc = []\nb_error = []\n\nfor e in range(len(errors)):\n btraining_time = []\n btesting_time = []\n bacc_list = []\n\n for r in range(num_runs):\n bwisard = wnn.BloomWisard(entry_size, tuple_bit, num_classes, capacity, error=errors[e])\n\n #Training\n start = timer()\n bwisard.train(train_bin, train_label)\n btraining_time.append(timer() - start)\n\n #Testing\n start = timer()\n rank_result = bwisard.rank(test_bin) \n btesting_time.append(timer() - start)\n \n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n if rank_result[i] == test_label[i]:\n num_hits += 1\n\n bacc_list.append(float(num_hits)/float(test_length))\n \n b_training_time.append(btraining_time)\n b_testing_time.append(btesting_time)\n b_acc.append(bacc_list)\n b_stats.append(bwisard.stats())\n b_error.append(bwisard.error())\n#bwisard_stats = bwisard.stats()\n#berror = bwisard.error()\ndel bwisard\n\n#K-fold cross validation ---------------------------------------------------------\n#Wisard\n\ntest_length = len(folds_test_label[0])\nkf_training_time = []\nkf_testing_time = []\nkf_wacc_list = []\n\nfor r in range(num_runs):\n for f in range(k):\n wisard = wnn.Wisard(entry_size, tuple_bit, num_classes)\n \n #Training\n start = timer()\n wisard.train(folds_train_bin[f], folds_train_label[f])\n kf_training_time.append(timer() - start)\n \n #Testing\n start = timer()\n rank_result = wisard.rank(folds_test_bin[f]) \n kf_testing_time.append(timer() - start)\n\n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n if rank_result[i] == folds_test_label[f][i]:\n num_hits += 1\n\n kf_wacc_list.append(float(num_hits)/float(test_length))\n\nkf_wisard_stats = wisard.stats()\ndel wisard\n\n#DictWisard\nkf_dtraining_time = []\nkf_dtesting_time = []\nkf_dacc_list = []\nfor r in range(num_runs):\n for f in range(k):\n dwisard = wnn.DictWisard(entry_size, tuple_bit, num_classes)\n\n #Training\n start = timer()\n dwisard.train(folds_train_bin[f], folds_train_label[f])\n kf_dtraining_time.append(timer() - start)\n \n #Testing\n start = timer()\n rank_result = dwisard.rank(folds_test_bin[f]) \n kf_dtesting_time.append(timer() - start)\n\n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n #if rank_result[i] == folds_test_label[f][i]:\n if not (rank_result[i] ^ folds_test_label[f][i]):\n num_hits += 1\n\n kf_dacc_list.append(float(num_hits)/float(test_length))\nkf_dwisard_stats = dwisard.stats()\ndel dwisard\n\n\n#Bloom Wisard\n#capacity2 = len(folds_train_label[0])\ncapacity2 = 50\nerror = 0.1\nkf_btraining_time = []\nkf_btesting_time = []\nkf_bacc_list = []\n\nkb_stats = []\nkb_training_time = [] \nkb_testing_time = [] \nkb_acc = []\nkb_error = []\n\nfor e in range(len(errors)):\n kf_btraining_time = []\n kf_btesting_time = []\n kf_bacc_list = []\n\n for r in range(num_runs):\n for f in range(k):\n bwisard = wnn.BloomWisard(entry_size, tuple_bit, num_classes, capacity2, error=errors[e])\n \n #Training\n start = timer()\n bwisard.train(folds_train_bin[f], folds_train_label[f])\n kf_btraining_time.append(timer() - start)\n\n #Testing\n start = timer()\n rank_result = bwisard.rank(folds_test_bin[f]) \n kf_btesting_time.append(timer() - start)\n \n #Accuracy\n num_hits = 0\n\n for i in range(test_length):\n if rank_result[i] == folds_test_label[f][i]:\n num_hits += 1\n\n kf_bacc_list.append(float(num_hits)/float(test_length))\n kb_training_time.append(kf_btraining_time)\n kb_testing_time.append(kf_btesting_time)\n kb_acc.append(kf_bacc_list)\n kb_stats.append(bwisard.stats())\n kb_error.append(bwisard.error())\n#kf_bwisard_stats = bwisard.stats()\n#kf_berror = bwisard.error()\ndel bwisard\n\n#Writing output file\nwith open(\"stats.csv\", \"w\") as out:\n out.write(\"WNN; Entry size; Tuple size; # Rams; Capacity; Error; # Hashes; Ram size; # Discriminators; Total Bits; Acc(%); Acc Std; Training(s); Training Std; Testing(s); Testing Std; Runs;\\n\")\n out.write(\"Wisard;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(wisard_stats[0]) + \";;;;\" + str(wisard_stats[1]) + \";\" + str(num_classes) + \";\" + str(wisard_stats[3]) + \";\")\n out.write(str(np.mean(acc_list)) + \";\" + str(np.std(acc_list)) + \";\" + str(np.mean(training_time)) + \";\" + str(np.std(training_time)) + \";\" + str(np.mean(testing_time)) + \";\" + str(np.std(testing_time)) + \";\" + str(num_runs) + \";\\n\")\n\n out.write(\"Dict Wisard;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(dwisard_stats[0]) + \";;;;\" + str(dwisard_stats[1]) + \";\" + str(num_classes) + \";\" + str(dwisard_stats[2]) + \";\")\n out.write(str(np.mean(dacc_list)) + \";\" + str(np.std(dacc_list)) + \";\" + str(np.mean(dtraining_time)) + \";\" + str(np.std(dtraining_time)) + \";\" + str(np.mean(dtesting_time)) + \";\" + str(np.std(dtesting_time)) + \";\" + str(num_runs) + \";\\n\")\n\n for i in range(len(errors)):\n out.write(\"Bloom Wisard;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(b_stats[i][0]) + \";\" + str(capacity) + \";\" + str(b_error[i]) + \";\" + str(b_stats[i][4]) + \";\" + str(b_stats[i][1]) + \";\" + str(num_classes) + \";\" + str(b_stats[i][3]) + \";\")\n out.write(str(np.mean(b_acc[i])) + \";\" + str(np.std(b_acc[i])) + \";\" + str(np.mean(b_training_time[i])) + \";\" + str(np.std(b_training_time[i])) + \";\" + str(np.mean(b_testing_time[i])) + \";\" + str(np.std(b_testing_time[i])) + \";\" + str(num_runs) + \";\\n\")\n\n out.write(\"Wisard-10fold;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(kf_wisard_stats[0]) + \";;;;\" + str(kf_wisard_stats[1]) + \";\" + str(num_classes) + \";\" + str(kf_wisard_stats[3]) + \";\")\n out.write(str(np.mean(kf_wacc_list)) + \";\" + str(np.std(kf_wacc_list)) + \";\" + str(np.mean(kf_training_time)) + \";\" + str(np.std(kf_training_time)) + \";\" + str(np.mean(kf_testing_time)) + \";\" + str(np.std(kf_testing_time)) + \";\" + str(num_runs) + \";\\n\")\n\n out.write(\"Dict Wisard-10fold;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(kf_dwisard_stats[0]) + \";;;;\" + str(kf_dwisard_stats[1]) + \";\" + str(num_classes) + \";\" + str(kf_dwisard_stats[2]) + \";\")\n out.write(str(np.mean(kf_dacc_list)) + \";\" + str(np.std(kf_dacc_list)) + \";\" + str(np.mean(kf_dtraining_time)) + \";\" + str(np.std(kf_dtraining_time)) + \";\" + str(np.mean(kf_dtesting_time)) + \";\" + str(np.std(kf_dtesting_time)) + \";\" + str(num_runs) + \";\\n\")\n\n for i in range(len(errors)):\n out.write(\"Bloom Wisard-10fold;\" + str(entry_size) + \";\" + str(tuple_bit) + \";\" + str(kb_stats[i][0]) + \";\" + str(capacity2) + \";\" + str(kb_error[i]) + \";\" + str(kb_stats[i][4]) + \";\" + str(kb_stats[i][1]) + \";\" + str(num_classes) + \";\" + str(kb_stats[i][3]) + \";\")\n out.write(str(np.mean(kb_acc[i])) + \";\" + str(np.std(kb_acc[i])) + \";\" + str(np.mean(kb_training_time[i])) + \";\" + str(np.std(kb_training_time[i])) + \";\" + str(np.mean(kb_testing_time[i])) + \";\" + str(np.std(kb_testing_time[i])) + \";\" + str(num_runs) + \";\\n\")\n" ]
[ [ "numpy.std", "numpy.append", "numpy.array", "numpy.mean" ] ]
gopalakrishna-r/tensorpack
[ "3aa99653d1201a49f6be0542a764d335153e84f6" ]
[ "tensorpack/contrib/keras.py" ]
[ "# -*- coding: utf-8 -*-\n# File: keras.py\n\nfrom contextlib import contextmanager\nimport six\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom ..callbacks import Callback, CallbackToHook, InferenceRunner, InferenceRunnerBase, ScalarStats\nfrom ..models.regularize import regularize_cost_from_collection\nfrom ..tfutils.collection import backup_collection, restore_collection\nfrom ..tfutils.common import get_op_tensor_name\nfrom ..tfutils.scope_utils import cached_name_scope\nfrom ..tfutils.summary import add_moving_summary\nfrom ..tfutils.tower import get_current_tower_context\nfrom ..train import SimpleTrainer, SyncMultiGPUTrainerParameterServer, Trainer\nfrom ..train.interface import apply_default_prefetch\nfrom ..train.trainers import DistributedTrainerBase\nfrom ..utils import logger\nfrom ..utils.gpu import get_nr_gpu\n\n__all__ = ['KerasPhaseCallback', 'setup_keras_trainer', 'KerasModel']\n\n\nTOTAL_LOSS_NAME = 'total_loss'\n\n\ndef _check_name(tensor, name):\n tensorname = get_op_tensor_name(tensor.name)[0]\n assert tensorname.split('/')[-1] == name, \\\n \"{} does not match {}, you may have name conflict somewhere!\".format(tensor.name, name)\n\n\nclass KerasModelCaller(object):\n \"\"\"\n Keras model doesn't support variable scope reuse.\n This is a wrapper around keras model to mimic reuse.\n \"\"\"\n def __init__(self, get_model):\n self.get_model = get_model\n self.cached_model = None\n\n def __call__(self, *input_tensors):\n \"\"\"\n Args:\n input_tensors ([tf.Tensor])\n Returns:\n output tensors of this tower, evaluated with the input tensors.\n \"\"\"\n reuse = tf.compat.v1.get_variable_scope().reuse\n\n old_trainable_names = {x.name for x in tf.trainable_variables()}\n trainable_backup = backup_collection([tf.GraphKeys.TRAINABLE_VARIABLES])\n update_ops_backup = backup_collection([tf.GraphKeys.UPDATE_OPS])\n\n def post_process_model(model):\n added_trainable_names = {x.name for x in tf.trainable_variables()}\n restore_collection(trainable_backup)\n\n for v in model.weights:\n # In Keras, the collection is not respected and could contain non-trainable vars.\n # We put M.weights into the collection instead.\n if v.name not in old_trainable_names and v.name in added_trainable_names:\n tf.add_to_collection(tf.GraphKeys.TRAINABLE_VARIABLES, v)\n new_trainable_names = {x.name for x in tf.trainable_variables()}\n\n for n in added_trainable_names:\n if n not in new_trainable_names:\n logger.warn(\"Keras created trainable variable '{}' which is actually not trainable. \"\n \"This was automatically corrected.\".format(n))\n\n # Keras models might not use this collection at all (in some versions).\n # This is a BC-breaking change of tf.keras: https://github.com/tensorflow/tensorflow/issues/19643\n restore_collection(update_ops_backup)\n for op in model.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, op)\n\n if self.cached_model is None:\n assert not reuse\n\n # starting from some versions, tf.keras starts to prepend name scope to variable names ..\n @contextmanager\n def clear_tower0_name_scope():\n ns = tf.get_default_graph().get_name_scope()\n if ns == 'tower0':\n with tf.name_scope('/'):\n yield\n else:\n yield\n\n with clear_tower0_name_scope():\n model = self.cached_model = self.get_model(*input_tensors)\n assert isinstance(model, keras.Model), \\\n \"Your get_model function should return a `tf.keras.Model`!\"\n outputs = model.outputs\n elif reuse:\n # use the cached Keras model to mimic reuse\n # NOTE: ctx.is_training won't be useful inside model,\n # because inference will always use the cached Keras model\n model = self.cached_model\n outputs = model.call(*input_tensors)\n else:\n # create new Keras model if not reuse\n model = self.get_model(*input_tensors)\n outputs = model.outputs\n\n post_process_model(model)\n\n if isinstance(outputs, list) and len(outputs) == 1:\n return outputs[0]\n return outputs\n\n\nclass KerasPhaseCallback(Callback):\n \"\"\"\n Keras needs an extra input if learning_phase is used by the model\n This callback will be used:\n 1. By the trainer with isTrain=True\n 2. By InferenceRunner with isTrain=False, in the form of hooks\n\n If you use :class:`KerasModel` or :func:`setup_keras_trainer`,\n this callback will be automatically added when needed.\n \"\"\"\n def __init__(self, isTrain):\n assert isinstance(isTrain, bool), isTrain\n self._isTrain = isTrain\n self._learning_phase = keras.backend.learning_phase()\n\n def _setup_graph(self):\n logger.info(\"Using Keras learning phase {} in the graph!\".format(\n self._learning_phase.name))\n cbs = self.trainer._callbacks.cbs\n for cb in cbs:\n # XXX HACK\n if isinstance(cb, InferenceRunnerBase):\n h = CallbackToHook(KerasPhaseCallback(False))\n cb.register_hook(h)\n\n def _before_run(self, ctx):\n return tf.train.SessionRunArgs(\n fetches=[], feed_dict={self._learning_phase: int(self._isTrain)})\n\n\ndef setup_keras_trainer(\n trainer, get_model,\n input_signature, target_signature,\n input, optimizer, loss, metrics):\n \"\"\"\n Args:\n trainer (SingleCostTrainer):\n get_model (input1, input2, ... -> tf.keras.Model):\n A function which takes tensors, builds and returns a Keras model.\n It will be part of the tower function.\n input (InputSource):\n optimizer (tf.train.Optimizer):\n loss, metrics: list of strings\n \"\"\"\n assert isinstance(optimizer, tf.train.Optimizer), optimizer\n assert isinstance(loss, list), loss\n assert len(loss) >= 1, \"No loss was given!\"\n assert isinstance(metrics, list), metrics\n model_caller = KerasModelCaller(get_model)\n\n nr_inputs = len(input_signature)\n\n def get_cost(*inputs):\n ctx = get_current_tower_context()\n input_tensors = list(inputs[:nr_inputs])\n target_tensors = list(inputs[nr_inputs:])\n # TODO mapping between target tensors & output tensors\n\n outputs = model_caller(*input_tensors)\n\n if isinstance(outputs, tf.Tensor):\n outputs = [outputs]\n assert len(outputs) == len(target_tensors), \\\n \"len({}) != len({})\".format(str(outputs), str(target_tensors))\n assert len(outputs) == len(loss), \\\n \"len({}) != len({})\".format(str(outputs), str(loss))\n\n loss_tensors = []\n for idx, loss_name in enumerate(loss):\n with cached_name_scope('keras_loss', top_level=False):\n loss_fn = keras.losses.get(loss_name)\n curr_loss = loss_fn(target_tensors[idx], outputs[idx])\n curr_loss = tf.reduce_mean(curr_loss, name=loss_name)\n _check_name(curr_loss, loss_name)\n loss_tensors.append(curr_loss)\n\n loss_reg = regularize_cost_from_collection()\n if loss_reg is not None:\n total_loss = tf.add_n(loss_tensors + [loss_reg], name=TOTAL_LOSS_NAME)\n add_moving_summary(loss_reg, total_loss, *loss_tensors)\n else:\n total_loss = tf.add_n(loss_tensors, name=TOTAL_LOSS_NAME)\n add_moving_summary(total_loss, *loss_tensors)\n\n if metrics and (ctx.is_main_training_tower or not ctx.is_training):\n # for list: one metric for each output\n metric_tensors = []\n for oid, metric_name in enumerate(metrics):\n output_tensor = outputs[oid]\n target_tensor = target_tensors[oid] # TODO may not have the same mapping?\n with cached_name_scope('keras_metric', top_level=False):\n metric_fn = keras.metrics.get(metric_name)\n metric_tensor = metric_fn(target_tensor, output_tensor)\n metric_tensor = tf.reduce_mean(metric_tensor, name=metric_name)\n _check_name(metric_tensor, metric_name)\n # check name conflict here\n metric_tensors.append(metric_tensor)\n add_moving_summary(*metric_tensors)\n\n return total_loss\n\n trainer.setup_graph(\n input_signature + target_signature,\n input,\n get_cost,\n lambda: optimizer)\n if isinstance(keras.backend.learning_phase(), tf.Tensor) and len(keras.backend.learning_phase().consumers()) > 0:\n # check if learning_phase is used in this model\n trainer.register_callback(KerasPhaseCallback(True))\n\n\nclass KerasModel(object):\n def __init__(self, get_model, input_signature=None, target_signature=None,\n input=None, trainer=None):\n \"\"\"\n Args:\n get_model (input1, input2, ... -> keras.Model):\n A function which takes tensors, builds and returns a Keras model.\n It will be part of the tower function.\n input_signature ([tf.TensorSpec]): required. The signature for inputs.\n target_signature ([tf.TensorSpec]): required. The signature for the targets tensors.\n input (InputSource | DataFlow): the InputSource or DataFlow where the input data comes from.\n trainer (Trainer): the default will check the number of available GPUs and use them all.\n \"\"\"\n self.get_model = get_model\n assert callable(get_model), get_model\n self.input_signature = input_signature\n self.target_signature = target_signature\n if trainer is None:\n nr_gpu = get_nr_gpu()\n if nr_gpu <= 1:\n trainer = SimpleTrainer()\n else:\n # the default multi-gpu trainer\n trainer = SyncMultiGPUTrainerParameterServer(nr_gpu)\n assert isinstance(trainer, Trainer), trainer\n assert not isinstance(trainer, DistributedTrainerBase)\n\n assert input is not None, \"Argument 'input' is required!\"\n self.input = apply_default_prefetch(input, trainer)\n self.trainer = trainer\n\n def compile(self, optimizer, loss, metrics=None):\n \"\"\"\n Args:\n optimizer (tf.train.Optimizer):\n loss, metrics: string or list of strings\n \"\"\"\n if isinstance(loss, six.string_types):\n loss = [loss]\n if metrics is None:\n metrics = []\n if isinstance(metrics, six.string_types):\n metrics = [metrics]\n\n self._stats_to_inference = loss + metrics + [TOTAL_LOSS_NAME]\n setup_keras_trainer(\n self.trainer, get_model=self.get_model,\n input_signature=self.input_signature,\n target_signature=self.target_signature,\n input=self.input,\n optimizer=optimizer,\n loss=loss,\n metrics=metrics)\n\n def fit(self, validation_data=None, **kwargs):\n \"\"\"\n Args:\n validation_data (DataFlow or InputSource): to be used for inference.\n The inference callback is added as the first in the callback list.\n If you need to use it in a different order, please write it in the callback list manually.\n kwargs: same arguments as :meth:`Trainer.train_with_defaults`.\n \"\"\"\n callbacks = kwargs.pop('callbacks', [])\n if validation_data is not None:\n # There is no way to guess where users want this callback. So we have to choose one.\n # MinSaver may need results from this callback,\n # so we put this callback at first.\n callbacks.insert(0, InferenceRunner(\n validation_data, ScalarStats(self._stats_to_inference)))\n self.trainer.train_with_defaults(callbacks=callbacks, **kwargs)\n" ]
[ [ "tensorflow.keras.metrics.get", "tensorflow.reduce_mean", "tensorflow.keras.backend.learning_phase", "tensorflow.trainable_variables", "tensorflow.add_to_collection", "tensorflow.keras.losses.get", "tensorflow.name_scope", "tensorflow.compat.v1.get_variable_scope", "tensorflow.get_default_graph", "tensorflow.add_n" ] ]
kuzn137/DisasterResponsePipeline
[ "d2b87410bcd41210a25eb5b533f78f3c3cbe0fe6" ]
[ "models/train_classifier.py" ]
[ "import sys\n# import libraries\nimport nltk\nimport pickle\nfrom sklearn.base import BaseEstimator, TransformerMixin\nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\nfrom sklearn.model_selection import GridSearchCV\nimport nltk\nnltk.download('punkt') \nnltk.download('stopwords')\n# import libraries\nimport nltk\nimport pickle\nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\nfrom sklearn.model_selection import GridSearchCV\nimport nltk\nnltk.download('punkt') \nnltk.download('stopwords')\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom sqlalchemy import create_engine\nfrom sklearn.preprocessing import OneHotEncoder\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.ensemble import RandomForestClassifier\nimport re\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nurl_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\nimport re\nfrom sklearn.multioutput import MultiOutputClassifier\nimport numpy as np\nimport warnings\nwarnings.simplefilter('ignore')\nfrom sklearn.metrics import f1_score, accuracy_score, classification_report, fbeta_score, make_scorer\n\ndatabase_filepath = './data/DisasterResponse.db'\ndef load_data(database_filepath):\n \"\"\"Function to load data for the model.\n Args: \n database_path\n Returns: \n pd.dataframes: incoming features vector X, outcome vector Y; list: categories names\n \"\"\"\n engine = create_engine('sqlite:///'+ database_filepath)\n df = pd.read_sql(\"SELECT * FROM DisasterResponse\", engine)\n #exclude colums that are not needed in model\n col=[i for i in df.columns if i not in ['id','original', 'genre']]\n X = df[\"message\"]\n Y = df.iloc[:,4:]\n #global category_names\n category_names = Y.columns\n return X, Y, category_names\n\ndef tokenize(text):\n \"\"\"Function tokenize text.\n Args: \n messages text\n Returns: \n Tokens\n \"\"\"\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n \ndef evaluate_model(model, X_test, y_test, category_names):\n \"\"\"Function to print model evalution scores, comparing real test data and predicted\n Args: \n model, incoming features dataframe X_test, test labeles dataframe y_test, list of category names\n Returns: \n None\n \"\"\"\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))\n\ndef build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize))\n , ('tfidf', TfidfTransformer())\n , ('clf', MultiOutputClassifier(RandomForestClassifier()))])\n\n parameters = {'vect__min_df': [1, 5],\n # 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[50, 100], \n 'clf__estimator__min_samples_split':[5],\n #'vect__max_features': (5000, 10000)\n \n }\n\n #cv = GridSearchCV(estimator=pipeline, param_grid=parameters, verbose=3) \n #my_scorer = make_scorer(f1_score(y_test, y_pred, average='macro'), greater_is_better=True)\n cv = GridSearchCV(pipeline, param_grid=parameters, scoring=\"f1_weighted\")\n\n\n return cv\n \n\n \n\ndef save_model(model, model_filepath):\n \"\"\"Function saves model to pickle file.\n Args: \n model, path where to save model\n Returns: \n None\n \"\"\"\n pickle.dump(model, open(model_filepath, 'wb'))\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_extraction.text.TfidfTransformer", "sklearn.metrics.classification_report", "pandas.read_sql" ] ]
lalonderodney/INN-Inflated-Neural-Nets
[ "50ce42e4584815d066d0fd39a7f12f55130910e5" ]
[ "custom_data_aug.py" ]
[ "'''\nINN: Inflated Neural Networks for IPMN Diagnosis\nOriginal Paper by Rodney LaLonde, Irene Tanner, Katerina Nikiforaki, Georgios Z. Papadakis, Pujan Kandel,\nCandice W. Bolan, Michael B. Wallace, Ulas Bagci\n(https://link.springer.com/chapter/10.1007/978-3-030-32254-0_12, https://arxiv.org/abs/1804.04241)\nCode written by: Rodney LaLonde\nIf you use significant portions of this code or the ideas from our paper, please cite it :)\nIf you have any questions, please email me at lalonde@knights.ucf.edu.\n\nThis file contains the custom data augmentation functions.\n'''\n\nimport numpy as np\nimport cv2\nfrom scipy.ndimage.interpolation import map_coordinates\n\n# Function to distort image\ndef elastic_transform(image, alpha=2000, sigma=40, alpha_affine=40, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n\n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n for i in range(shape[2]):\n image[:,:,i] = cv2.warpAffine(image[:,:,i], M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n image = image.reshape(shape)\n\n blur_size = int(4*sigma) | 1\n\n dx = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha\n dy = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha\n\n x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))\n\n def_img = np.zeros_like(image)\n for i in range(shape[2]):\n def_img[:,:,i] = map_coordinates(image[:,:,i], indices, order=1).reshape(shape_size)\n\n return def_img\n\n\ndef salt_pepper_noise(image, salt=0.2, amount=0.004):\n row, col, chan = image.shape\n num_salt = np.ceil(amount * row * salt)\n num_pepper = np.ceil(amount * row * (1.0 - salt))\n\n for n in range(chan//2): # //2 so we don't augment the mask\n # Add Salt noise\n coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape[0:2]]\n image[coords[0], coords[1], n] = 1\n\n # Add Pepper noise\n coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape[0:2]]\n image[coords[0], coords[1], n] = 0\n\n return image\n" ]
[ [ "numpy.reshape", "numpy.arange", "scipy.ndimage.interpolation.map_coordinates", "numpy.ceil", "numpy.zeros_like", "numpy.float32", "numpy.random.RandomState" ] ]
gilwoolee/brl_gym
[ "9c0784e9928f12d2ee0528c79a533202d3afb640" ]
[ "brl_gym/envs/mujoco/maze_continuous.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom gym.spaces import Box\nfrom gym.envs.mujoco import mujoco_env\nfrom mujoco_py import MjViewer\nimport os\nasset_dir = \"/home/gilwoo/Workspace/brl_gym/brl_gym/envs/mujoco/\"\n\n# Goal can be anywhere\nclass MazeContinuous(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n self.agent_bid = 0\n utils.EzPickle.__init__(self)\n self.target_sid = 0\n\n self.fullpath = os.path.join(asset_dir, \"assets\", 'mazeContinuous.xml')\n mujoco_env.MujocoEnv.__init__(self, self.fullpath, 30) # originally 5\n\n self.agent_bid = self.sim.model.body_name2id('agent')\n\n self.action_space = Box(np.ones(3) * -1, np.ones(3))\n\n def step(self, a):\n # if len(a) == 3:\n # a = np.clip(a, np.array([-1.4, -1.4, -1]), np.array([1.4, 1.4, 1]))\n # else:\n # a = np.clip(a, np.array([-1.4, -1.4]), np.array([1.4, 1.4]))\n self.do_simulation(a[:2], self.frame_skip)\n\n agent_pos = self.data.body_xpos[self.agent_bid].ravel()\n target_pos = self.data.site_xpos[self.target_sid].ravel()\n dist = np.linalg.norm(agent_pos-target_pos)\n\n reward = 0 #-np.linalg.norm(a) * 0.1\n done = False\n if dist < 0.1:\n reward = 500.0 # bonus for being very close\n done = True\n\n obs = self._get_obs()\n if len(a) == 3:\n if a[2] > 0:\n dist, noise_scale = self._sense()\n reward += -0.1 # used to be -0.1\n obs[-2:] = dist\n info = {'goal_dist':dist, 'noise_scale':noise_scale}\n else:\n info = {}\n else:\n info = {}\n\n return obs, reward, done, info\n\n def _get_obs(self):\n goal_pose = self.model.site_pos[self.target_sid][:2]\n agent_pos = self.data.body_xpos[self.agent_bid].ravel()\n target_pos = self.data.site_xpos[self.target_sid].ravel()\n\n return np.concatenate([agent_pos[:2], self.data.qvel.ravel(), [0, 0]])\n\n def _sense(self):\n goal_pose = self.model.site_pos[self.target_sid][:2]\n agent_pos = self.data.body_xpos[self.agent_bid].ravel()\n goal_dist = goal_pose - agent_pos[:2]\n\n # Noisy distance\n noise_scale = np.linalg.norm(goal_dist) * 2.0 # (1.8*np.sqrt(2))\n goal_dist += np.random.normal() * noise_scale\n\n return goal_dist, noise_scale\n\n\n def reset_model(self):\n\n # randomize the agent and goal\n agent_x = 0.0 #self.np_random.uniform(low=-0.2, high=0.2)\n agent_y = self.np_random.uniform(low=-0.4, high=0.4)\n\n qp = np.array([agent_x, agent_y])\n qv = self.init_qvel.copy()\n self.set_state(qp, qv)\n self.target_sid = self.sim.model.site_name2id('target')\n\n goal_x = self.np_random.uniform(low=-1.3, high=1.3)\n goal_x = round(goal_x / 0.3) * 0.3 - 0.05\n goal_y = self.np_random.uniform(low=-1.3, high=1.3)\n\n self.model.site_pos[self.target_sid][0] = goal_x\n self.model.site_pos[self.target_sid][1] = goal_y\n\n # Visualize the goal\n self.model.site_rgba[self.target_sid] = np.array([1.0, 0.0, 0.0, 0.1])\n\n self.sim.forward()\n return self._get_obs()\n\n def mj_viewer_setup(self):\n self.viewer = MjViewer(self.sim)\n self.sim.forward()\n\n def get_state(self):\n\n return np.concatenate([self.data.qpos, self.data.qvel]).ravel()\n\n def set_state(self, qpos, qvel):\n state = self.sim.get_state()\n for i in range(len(qpos)):\n state.qpos[i] = qpos[i]\n for i in range(len(qvel)):\n state.qvel[i] = qvel[i]\n self.sim.set_state(state)\n self.sim.forward()\n\n\n\nif __name__ == \"__main__\":\n env = MazeContinuous()\n o = env.reset()\n\n d = False\n while not d:\n # o, r, d, info = env.step(env.action_space.sample())\n o, r, d, info = env.step(np.array([0,-0.1]))\n #print(info)\n env.render()\n print(\"final reward\", r)\n" ]
[ [ "numpy.linalg.norm", "numpy.ones", "numpy.concatenate", "numpy.random.normal", "numpy.array" ] ]
lcontento/cytokit
[ "168c7ebd598664f0833d26078b167456ce4a899d" ]
[ "pub/analysis/mc38-spheroid/spheroid_cytometer.py" ]
[ "import numpy as np\nfrom skimage import util\nfrom skimage import draw\nfrom skimage import filters\nfrom skimage import measure\nfrom skimage import exposure\nfrom skimage import transform\nfrom skimage import feature\nfrom skimage import morphology\nfrom skimage import segmentation\nfrom skimage import img_as_float\nfrom skimage.feature.blob import _prune_blobs\nfrom centrosome import propagate\nfrom cytokit import math as ck_math\nfrom cytokit.cytometry import cytometer\nfrom scipy import ndimage as ndi\nimport logging\nlogger = logging.getLogger(__name__)\n\n####################\n# 20X Implementation\n####################\n\n\nclass SpheroidCytometer20x(cytometer.Cytometer):\n \n def __init__(self, config, sampling=None):\n \"\"\"Cytometer Initialization\n\n Args:\n config: Experiment configuration\n sampling: Sampling distance proportions as 3-item list-like with order (z, y, x)\n \"\"\"\n super().__init__(config)\n \n if sampling is not None:\n assert len(sampling) == 3, \\\n 'Sampling factors must be 3 item list-like'\n assert all([s > 0 for s in sampling]), \\\n 'All sampling factors must be > 0 (given {})'.format(sampling)\n self.sampling = np.array(sampling)\n else:\n # Relative voxel sizes (typically around [10, 1, 1])\n self.sampling = np.array([config.axial_sampling_ratio, 1.0, 1.0])\n \n # Multiplicative factors for rescaling isotropic arguments\n self.factors = 1.0 / self.sampling\n logger.debug('Cytometer initialized with sampling rates %s', tuple(self.sampling))\n \n def initialize(self):\n pass\n \n \n def get_primary_object_mask(self, img, img_pk):\n assert img.ndim == 3, 'Expecting 3D image, got shape {}'.format(img.shape)\n assert img_pk.dtype == np.bool\n # Remove frequencies above scale of individual cells (this results in clusters near spheroid centers)\n img = np.abs(ndi.gaussian_filter(img, sigma=1*self.factors) - ndi.gaussian_filter(img, sigma=8*self.factors))\n img = img.max(axis=0)\n img = img > filters.threshold_otsu(img)\n img = img | img_pk # Merge threshold mask with given peaks/markers\n img = morphology.binary_closing(img, selem=morphology.disk(8))\n img = ndi.morphology.binary_fill_holes(img)\n img = morphology.binary_opening(img, selem=morphology.disk(8))\n return img\n \n def segment(self, img, include_intermediate_results=False, **kwargs):\n assert img.ndim == 3, 'Expecting 3D image, got shape {}'.format(img.shape)\n img = ndi.median_filter(img, size=(1, 3, 3))\n img = img_as_float(img)\n img = util.invert(img)\n \n img_mz = img.max(axis=0)\n img_mz = exposure.rescale_intensity(img_mz, out_range=(0, 1))\n peaks, img_dog, sigmas = blob_dog(img_mz, min_sigma=8, max_sigma=128, sigma_ratio=1.6, overlap=.25, threshold=1.75)\n \n img_pk = np.zeros(img_mz.shape, dtype=bool)\n img_pk[peaks[:,0].astype(int), peaks[:,1].astype(int)] = True\n img_pk = morphology.label(img_pk)\n \n # Get mask to conduct segmentation over\n img_pm = self.get_primary_object_mask(img, morphology.binary_dilation(img_pk > 0, morphology.disk(32)))\n \n \n img_dt = ndi.distance_transform_edt(img_pm)\n \n # Use propogation rather than watershed as it often captures a much more accurate boundary\n img_obj = propagate.propagate(img_mz, img_pk, img_pm, .01)[0].astype(np.uint16)\n img_bnd = img_obj * segmentation.find_boundaries(img_obj, mode='inner', background=0)\n \n img_seg = [img_obj, img_obj, img_bnd, img_bnd]\n if include_intermediate_results:\n to_uint16 = lambda im: exposure.rescale_intensity(im, out_range='uint16').astype(np.uint16)\n img_seg += [\n to_uint16(img_mz), \n to_uint16(img_dog[0]),\n to_uint16(img_dog[1]),\n img_pm.astype(np.uint16),\n img_pk.astype(np.uint16)\n ]\n \n # Stack and add new axis to give to (z, ch, h, w)\n img_seg = np.stack(img_seg)[np.newaxis]\n assert img_seg.dtype == np.uint16, 'Expecting 16bit result, got type {}'.format(img_seg.dtype)\n assert img_seg.ndim == 4, 'Expecting 4D result, got shape {}'.format(img_seg.shape)\n return img_seg\n\n def quantify(self, tile, segments, **kwargs):\n assert tile.ndim == 5\n # Run max-z projection across all channels and insert new axis where z dimension was\n tile = tile.max(axis=1)[:, np.newaxis]\n assert tile.ndim == 5, 'Expecting result after max-z projection to be 5D but got shape {}'.format(tile.shape)\n assert tile.shape[0] == tile.shape[1] == 1\n return cytometer.CytometerBase.quantify(tile, segments, **kwargs)\n\n def augment(self, df):\n df = cytometer.CytometerBase.augment(df, self.config.microscope_params)\n # Attempt to sum live + dead intensities if both channels are present\n for agg_fun in ['mean', 'sum']:\n cols = df.filter(regex='ci:(LIVE|DEAD):{}'.format(agg_fun)).columns.tolist()\n if len(cols) == 2:\n df['ci:LIVE+DEAD:{}'.format(agg_fun)] = df[cols[0]] + df[cols[0]]\n return df\n \n \n###################\n# 2X Implementation\n###################\n \n\ndef get_circle_mask(radius, shape, center=None, translation=None):\n center = np.asarray(shape)//2 if center is None else np.asarray(center)\n if translation is not None:\n center += np.asarray(translation).astype(int)\n rr, cc = draw.circle(*center, radius=radius, shape=shape)\n arr = np.zeros(shape, dtype=bool)\n arr[rr, cc] = 1\n return arr.astype(bool)\n\nclass SpheroidCytometer2x(cytometer.Cytometer):\n\n def segment(self, img, well_radius=800, well_mask_radius=765, include_intermediate_results=False, **kwargs):\n # Assume image is single plane z-stack and grab first 2D image to process\n assert img.ndim == 3\n assert img.shape[0] == 1\n img = img[0]\n \n logger.debug(\n 'Running 2x segmentation on image with shape %s, type %s (args: well_radius = %s, well_mask_radius = %s, include_intermediate_results=%s)',\n img.shape, img.dtype, well_radius, well_mask_radius, include_intermediate_results\n )\n\n # Remove outliers, convert to float\n img = ndi.median_filter(img, size=(3, 3))\n img = img_as_float(img)\n\n # Apply bandpass and compute gradients\n img_bp = ndi.gaussian_filter(img, sigma=6) - ndi.gaussian_filter(img, sigma=10)\n img_gr = ndi.generic_gradient_magnitude(img_bp, ndi.sobel)\n\n # Get and apply well mask translation\n img_well = get_circle_mask(well_radius, img_gr.shape)\n shifts = feature.register_translation(img_gr, img_well)[0]\n img_well = get_circle_mask(well_mask_radius, img_gr.shape, translation=shifts)\n img_gm = img_gr * img_well\n\n # Apply local threshold and cleanup binary result\n img_bm = img_gm > filters.threshold_local(img_gm, 255)\n img_bm = ndi.binary_fill_holes(img_bm, structure=morphology.disk(1))\n img_bm = morphology.binary_opening(img_bm, selem=morphology.disk(8))\n\n # Run segmentation\n img_dt = ndi.distance_transform_edt(img_bm)\n img_dt = ndi.gaussian_filter(img_dt, sigma=1)\n img_pk = morphology.label(feature.peak_local_max(img_dt, indices=False, min_distance=8))\n img_obj = segmentation.watershed(-img_dt, img_pk, mask=img_bm).astype(np.uint16)\n img_bnd = img_obj * segmentation.find_boundaries(img_obj, mode='inner', background=0)\n\n # Compile list of object image results (and append intermediates if necessary)\n img_seg = [img_obj, img_obj, img_bnd, img_bnd]\n if include_intermediate_results:\n to_uint16 = lambda im: exposure.rescale_intensity(im, out_range='uint16').astype(np.uint16)\n img_seg += [\n to_uint16(img_bp), \n segmentation.find_boundaries(img_well, mode='inner', background=0).astype(np.uint16),\n to_uint16(img_gm),\n to_uint16(img_dt), \n img_pk.astype(np.uint16)\n ]\n \n # Stack and add new axis to give to (z, ch, h, w)\n img_seg = np.stack(img_seg)[np.newaxis]\n assert img_seg.dtype == np.uint16, 'Expecting 16bit result, got type {}'.format(img_seg.dtype)\n assert img_seg.ndim == 4, 'Expecting 4D result, got shape {}'.format(img_seg.shape)\n return img_seg\n \n def quantify(self, tile, segments, **kwargs):\n return cytometer.CytometerBase.quantify(tile, segments, **kwargs)\n \n def augment(self, df):\n df = cytometer.CytometerBase.augment(df, self.config.microscope_params)\n # Attempt to sum live + dead intensities if both channels are present\n for agg_fun in ['mean', 'sum']:\n cols = df.filter(regex='ci:(LIVE|DEAD):{}'.format(agg_fun)).columns.tolist()\n if len(cols) == 2:\n df['ci:LIVE+DEAD:{}'.format(agg_fun)] = df[cols[0]] + df[cols[0]]\n return df\n \n \n###################\n# Utility Functions\n###################\n\ndef blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,\n overlap=.5, *, exclude_border=False):\n r\"\"\"Lift from https://github.com/scikit-image/scikit-image/blob/2962429237988cb60b9b317aa020ca3bab100b7f/skimage/feature/blob.py#L168\n \n Modifications are added here to return more intermediate results\n \"\"\"\n image = img_as_float(image)\n\n # if both min and max sigma are scalar, function returns only one sigma\n scalar_sigma = np.isscalar(max_sigma) and np.isscalar(min_sigma)\n\n # Gaussian filter requires that sequence-type sigmas have same\n # dimensionality as image. This broadcasts scalar kernels\n if np.isscalar(max_sigma):\n max_sigma = np.full(image.ndim, max_sigma, dtype=float)\n if np.isscalar(min_sigma):\n min_sigma = np.full(image.ndim, min_sigma, dtype=float)\n\n # Convert sequence types to array\n min_sigma = np.asarray(min_sigma, dtype=float)\n max_sigma = np.asarray(max_sigma, dtype=float)\n\n # k such that min_sigma*(sigma_ratio**k) > max_sigma\n k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))\n\n # a geometric progression of standard deviations for gaussian kernels\n sigma_list = np.array([min_sigma * (sigma_ratio ** i)\n for i in range(k + 1)])\n\n gaussian_images = [ndi.gaussian_filter(image, s) for s in sigma_list]\n\n # computing difference between two successive Gaussian blurred images\n # multiplying with average standard deviation provides scale invariance\n dog_images = [(gaussian_images[i] - gaussian_images[i + 1])\n * np.mean(sigma_list[i]) for i in range(k)]\n\n image_cube = np.stack(dog_images, axis=-1)\n\n # local_maxima = get_local_maxima(image_cube, threshold)\n local_maxima = feature.peak_local_max(image_cube, threshold_abs=threshold,\n footprint=np.ones((3,) * (image.ndim + 1)),\n threshold_rel=0.0,\n exclude_border=exclude_border)\n # Catch no peaks\n if local_maxima.size == 0:\n return np.empty((0, 3)), dog_images, sigma_list\n\n # Convert local_maxima to float64\n lm = local_maxima.astype(np.float64)\n\n # translate final column of lm, which contains the index of the\n # sigma that produced the maximum intensity value, into the sigma\n sigmas_of_peaks = sigma_list[local_maxima[:, -1]]\n\n if scalar_sigma:\n # select one sigma column, keeping dimension\n sigmas_of_peaks = sigmas_of_peaks[:, 0:1]\n\n # Remove sigma index and replace with sigmas\n lm = np.hstack([lm[:, :-1], sigmas_of_peaks])\n\n # See: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/blob.py#L129\n #return lm, _prune_blobs(lm, overlap), sigma_list, dog_images\n return _prune_blobs(lm, overlap), dog_images, sigma_list" ]
[ [ "numpy.hstack", "numpy.log", "scipy.ndimage.gaussian_filter", "scipy.ndimage.morphology.binary_fill_holes", "numpy.asarray", "scipy.ndimage.median_filter", "scipy.ndimage.distance_transform_edt", "numpy.stack", "numpy.full", "numpy.ones", "scipy.ndimage.generic_gradient_magnitude", "numpy.mean", "numpy.isscalar", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
andrfish/tensorflow-alexnet
[ "4c44e9a0ec90ec4731775a2d94415d2b5727f34d" ]
[ "simple_kaggle_mnist_alexnet.py" ]
[ "\"\"\"\nThis is simple Alexnet train implementation modified for Kaggle mnist data.\n\"\"\"\n\nimport time\nimport tensorflow as tf\n\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\n\nimport kaggle_mnist_input as loader\nimport os\nimport csv\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_integer('training_epoch', 30, \"training epoch\")\ntf.app.flags.DEFINE_integer('batch_size', 128, \"batch size\")\ntf.app.flags.DEFINE_integer('validation_interval', 100, \"validation interval\")\n\ntf.app.flags.DEFINE_float('dropout_keep_prob', 0.5, \"dropout keep prob\")\ntf.app.flags.DEFINE_float('learning_rate', 0.001, \"learning rate\")\ntf.app.flags.DEFINE_float('rms_decay', 0.9, \"rms optimizer decay\")\ntf.app.flags.DEFINE_float('weight_decay', 0.0005, \"l2 regularization weight decay\")\ntf.app.flags.DEFINE_string('train_path', 'data/train.csv', \"path to download training data\")\ntf.app.flags.DEFINE_string('test_path', 'data/test.csv', \"path to download test data\")\ntf.app.flags.DEFINE_integer('validation_size', 2000, \"validation size in training data\")\ntf.app.flags.DEFINE_string('save_name', os.getcwd() + '/var.ckpt', \"path to save variables\")\ntf.app.flags.DEFINE_boolean('is_train', True, \"True for train, False for test\")\ntf.app.flags.DEFINE_string('test_result', 'result.csv', \"test file path\")\n\nimage_size = 28\nimage_channel = 1\nlabel_cnt = 10\n\ninputs = tf.placeholder(\"float\", [None, image_size, image_size, image_channel])\nlabels = tf.placeholder(\"float\", [None, label_cnt])\ndropout_keep_prob = tf.placeholder(\"float\", None)\nlearning_rate_ph = tf.placeholder(\"float\", None)\n\n# conv layer 1\nconv1_weights = tf.Variable(tf.random_normal([7, 7, image_channel, 96], dtype=tf.float32, stddev=0.01))\nconv1_biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32))\nconv1 = tf.nn.conv2d(inputs, conv1_weights, [1, 3, 3, 1], padding='SAME')\nconv1 = tf.nn.bias_add(conv1, conv1_biases)\nconv1_relu = tf.nn.relu(conv1)\nconv1_norm = tf.nn.local_response_normalization(conv1_relu, depth_radius=2, alpha=0.0001, beta=0.75, bias=1.0)\nconv1_pool = tf.nn.max_pool(conv1_norm, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')\n\n# conv layer 2\nconv2_weights = tf.Variable(tf.random_normal([5, 5, 96, 256], dtype=tf.float32, stddev=0.01))\nconv2_biases = tf.Variable(tf.constant(1.0, shape=[256], dtype=tf.float32))\nconv2 = tf.nn.conv2d(conv1_pool, conv2_weights, [1, 1, 1, 1], padding='SAME')\nconv2 = tf.nn.bias_add(conv2, conv2_biases)\nconv2_relu = tf.nn.relu(conv2)\nconv2_norm = tf.nn.local_response_normalization(conv2_relu)\nconv2_pool = tf.nn.max_pool(conv2_norm, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')\n\n# conv layer 3\nconv3_weights = tf.Variable(tf.random_normal([3, 3, 256, 384], dtype=tf.float32, stddev=0.01))\nconv3_biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32))\nconv3 = tf.nn.conv2d(conv2_pool, conv3_weights, [1, 1, 1, 1], padding='SAME')\nconv3 = tf.nn.bias_add(conv3, conv3_biases)\nconv3_relu = tf.nn.relu(conv3)\n\n# conv layer 4\nconv4_weights = tf.Variable(tf.random_normal([3, 3, 384, 384], dtype=tf.float32, stddev=0.01))\nconv4_biases = tf.Variable(tf.constant(1.0, shape=[384], dtype=tf.float32))\nconv4 = tf.nn.conv2d(conv3_relu, conv4_weights, [1, 1, 1, 1], padding='SAME')\nconv4 = tf.nn.bias_add(conv4, conv4_biases)\nconv4_relu = tf.nn.relu(conv4)\n\n# conv layer 5\nconv5_weights = tf.Variable(tf.random_normal([3, 3, 384, 256], dtype=tf.float32, stddev=0.01))\nconv5_biases = tf.Variable(tf.constant(1.0, shape=[256], dtype=tf.float32))\nconv5 = tf.nn.conv2d(conv4_relu, conv5_weights, [1, 1, 1, 1], padding='SAME')\nconv5 = tf.nn.bias_add(conv5, conv5_biases)\nconv5_relu = tf.nn.relu(conv5)\nconv5_pool = tf.nn.max_pool(conv5_relu, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n# fc layer 1\nfc1_weights = tf.Variable(tf.random_normal([256 * 3 * 3, 4096], dtype=tf.float32, stddev=0.01))\nfc1_biases = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32))\nconv5_reshape = tf.reshape(conv5_pool, [-1, fc1_weights.get_shape().as_list()[0]])\nfc1 = tf.matmul(conv5_reshape, fc1_weights)\nfc1 = tf.nn.bias_add(fc1, fc1_biases)\nfc1_relu = tf.nn.relu(fc1)\nfc1_drop = tf.nn.dropout(fc1_relu, dropout_keep_prob)\n\n# fc layer 2\nfc2_weights = tf.Variable(tf.random_normal([4096, 4096], dtype=tf.float32, stddev=0.01))\nfc2_biases = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32))\nfc2 = tf.matmul(fc1_drop, fc2_weights)\nfc2 = tf.nn.bias_add(fc2, fc2_biases)\nfc2_relu = tf.nn.relu(fc2)\nfc2_drop = tf.nn.dropout(fc2_relu, dropout_keep_prob)\n\n# fc layer 3 - output\nfc3_weights = tf.Variable(tf.random_normal([4096, label_cnt], dtype=tf.float32, stddev=0.01))\nfc3_biases = tf.Variable(tf.constant(1.0, shape=[label_cnt], dtype=tf.float32))\nfc3 = tf.matmul(fc2_drop, fc3_weights)\nlogits = tf.nn.bias_add(fc3, fc3_biases)\n\n# loss\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n# l2 regularization\nregularizers = (tf.nn.l2_loss(conv1_weights) + tf.nn.l2_loss(conv1_biases) +\n tf.nn.l2_loss(conv2_weights) + tf.nn.l2_loss(conv2_biases) +\n tf.nn.l2_loss(conv3_weights) + tf.nn.l2_loss(conv3_biases) +\n tf.nn.l2_loss(conv4_weights) + tf.nn.l2_loss(conv4_biases) +\n tf.nn.l2_loss(conv5_weights) + tf.nn.l2_loss(conv5_biases) +\n tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +\n tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases) +\n tf.nn.l2_loss(fc3_weights) + tf.nn.l2_loss(fc3_biases))\nloss += FLAGS.weight_decay * regularizers\n\n# accuracy\npredict = tf.argmax(logits, 1)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predict, tf.argmax(labels, 1)), tf.float32))\n\n# train\ntrain = tf.train.RMSPropOptimizer(learning_rate_ph, FLAGS.rms_decay).minimize(loss)\n# train = tf.train.MomentumOptimizer(learning_rate_ph, FLAGS.momentum).minimize(loss)\n\n# session\ninit = tf.initialize_all_variables()\nsess = tf.Session()\nsess.run(init)\n\n# tf saver\nsaver = tf.train.Saver()\nif os.path.isfile(FLAGS.save_name):\n saver.restore(sess, FLAGS.save_name)\n\ntotal_start_time = time.time()\n\n# begin training\nif FLAGS.is_train:\n # load mnist data\n train_images, train_labels, train_range, validation_images, validation_labels, validation_indices = loader.load_mnist_train(\n FLAGS.validation_size, FLAGS.batch_size)\n \n total_train_len = len(train_images)\n i = 0\n learning_rate = FLAGS.learning_rate\n\n for epoch in range(FLAGS.training_epoch):\n epoch_start_time = time.time()\n\n overall_loss = 0.0\n for start, end in train_range:\n batch_start_time = time.time()\n trainX = train_images[start:end]\n trainY = train_labels[start:end]\n _, loss_result = sess.run([train, loss], feed_dict={inputs: trainX, labels: trainY,\n dropout_keep_prob: FLAGS.dropout_keep_prob,\n learning_rate_ph: learning_rate})\n #print('[%s][training][epoch %d, step %d exec %.2f seconds] [file: %5d ~ %5d / %5d] loss : %3.10f' % (\n # time.strftime(\"%Y-%m-%d %H:%M:%S\"), epoch, i, (time.time() - batch_start_time), start, end,\n # total_train_len, loss_result))\n overall_loss += loss_result\n\n if i % FLAGS.validation_interval == 0 and i > 0:\n validation_start_time = time.time()\n shuffle_indices = loader.shuffle_validation(validation_indices, FLAGS.batch_size)\n validationX = validation_images[shuffle_indices]\n validationY = validation_labels[shuffle_indices]\n accuracy_result, loss_result = sess.run([accuracy, loss],\n feed_dict={inputs: validationX, labels: validationY,\n dropout_keep_prob: 1.0})\n #print('[%s][validation][epoch %d, step %d exec %.2f seconds] accuracy : %1.3f, loss : %3.10f' % (\n # time.strftime(\"%Y-%m-%d %H:%M:%S\"), epoch, i, (time.time() - validation_start_time),\n # accuracy_result, loss_result))\n\n i += 1\n\n overall_loss /= len(train_range)\n print(\"[%s][epoch exec %s seconds] epoch : %d, loss: %3.10f\" % (\n time.strftime(\"%Y-%m-%d %H:%M:%S\"), (time.time() - epoch_start_time), epoch + 1, overall_loss))\n saver.save(sess, FLAGS.save_name)\n print()\n# begin test\nelse:\n i = 1\n test_images, test_ranges = loader.load_mnist_test(FLAGS.batch_size)\n\n test_result_file = open(FLAGS.test_result, 'wb')\n csv_writer = csv.writer(test_result_file)\n csv_writer.writerow(['ImageId', 'Label'])\n\n for file_start, file_end in test_ranges:\n testX = test_images[file_start:file_end]\n predict_label = sess.run(predict, feed_dict={inputs: testX, dropout_keep_prob: 1.0})\n\n for cur_predict in predict_label:\n csv_writer.writerow([i, cur_predict])\n print('[Result %s: %s]' % (i, cur_predict))\n i += 1\n\nprint(\"[%s][total exec %s seconds\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\"), (time.time() - total_start_time)))\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.max_pool", "tensorflow.nn.l2_loss", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.nn.conv2d", "tensorflow.app.flags.DEFINE_integer", "tensorflow.initialize_all_variables", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.train.RMSPropOptimizer", "tensorflow.placeholder", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.get_logger", "tensorflow.app.flags.DEFINE_float", "tensorflow.nn.local_response_normalization", "tensorflow.random_normal" ] ]
qdev-dk/publications
[ "b46b51b0b267d6aab3edc043ab0a09cad61aaca4" ]
[ "main.py" ]
[ "import arxivtodf\nimport dftohtml\nimport pandas as pd\n\nname_file_path = '/var/projects/qdev/names.xlsx'\nnames_df = pd.read_excel(name_file_path, dtype={'names':str, 'fullnames':str, 'search_queries':str, 'homepageids':str})\n\nsearch_queries = names_df['search_queries'].to_list()\nnames = names_df['names'].to_list()\nfullnames =names_df['fullnames'].to_list()\nhomepageids = names_df['homepageids'].to_list()\n\n\nbasehref = '/var/projects/publications/data/'\nbasehref2 = '/var/projects/publications/web/'\nstatic = '/var/projects/qdev/pub_static/'\n\n\nfor n in range(len(names)):\n try:\n df_query = arxivtodf.df_from_query(search_queries[n], start=0, max_results=500)\n\n df_query.to_excel(basehref+names[n]+'.xlsx')\n df_static = pd.read_excel(static+'static_'+names[n]+'.xlsx',index_col=0,dtype=str)\n merged = pd.merge(df_static[['idnr','DOI']], df_query[['idnr','DOI']], on=['idnr','DOI'], how='right', indicator=True)\n bool_df = merged._merge=='right_only'\n df_final = pd.concat([df_static,df_query[bool_df.to_list()]])\n\n dftohtml.df_to_html_file(str(homepageids[n]), fullnames[n], df_query, names, basehref2+names[n]+'.html')\n except Exception as e:\n print(e)\n pass\n" ]
[ [ "pandas.merge", "pandas.read_excel" ] ]
maxuanquang/cc
[ "c6279d27ce51970adec58b6952c50b123acd6ff4" ]
[ "data/prepare_train_data_custom.py" ]
[ "from __future__ import division\nimport argparse\nimport scipy.misc\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nfrom path import Path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"dataset_dir\", metavar='DIR',\n help='path to original dataset')\nparser.add_argument(\"--dataset-format\", type=str, required=True, choices=[\"kitti\", \"cityscapes\"])\nparser.add_argument(\"--static-frames\", default=None,\n help=\"list of imgs to discard for being static, if not set will discard them based on speed \\\n (careful, on KITTI some frames have incorrect speed)\")\nparser.add_argument(\"--with-gt\", action='store_true',\n help=\"If available (e.g. with KITTI), will store ground truth along with images, for validation\")\nparser.add_argument(\"--dump-root\", type=str, required=True, help=\"Where to dump the data\")\nparser.add_argument(\"--height\", type=int, default=128, help=\"image height\")\nparser.add_argument(\"--width\", type=int, default=416, help=\"image width\")\nparser.add_argument(\"--num-threads\", type=int, default=4, help=\"number of threads to use\")\n\nargs = parser.parse_args()\n\n\ndef dump_example(scene):\n scene_list = data_loader.collect_scenes(scene)\n for scene_data in scene_list:\n dump_dir = args.dump_root/scene_data['rel_path']\n dump_dir.makedirs_p()\n intrinsics = scene_data['intrinsics']\n fx = intrinsics[0, 0]\n fy = intrinsics[1, 1]\n cx = intrinsics[0, 2]\n cy = intrinsics[1, 2]\n\n dump_cam_file = dump_dir/'cam.txt'\n with open(dump_cam_file, 'w') as f:\n f.write('%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy))\n\n for sample in data_loader.get_scene_imgs(scene_data):\n assert(len(sample) >= 2)\n img, frame_nb = sample[0], sample[1]\n dump_img_file = dump_dir/'{}.jpg'.format(frame_nb)\n scipy.misc.imsave(dump_img_file, img)\n if len(sample) == 3:\n dump_depth_file = dump_dir/'{}.npy'.format(frame_nb)\n np.save(dump_depth_file, sample[2])\n\n if len(dump_dir.files('*.jpg')) < 3:\n dump_dir.rmtree()\n\n\ndef main():\n args.dump_root = Path(args.dump_root)\n args.dump_root.mkdir_p()\n\n global data_loader\n\n if args.dataset_format == 'kitti':\n from kitti_raw_loader import KittiRawLoader\n data_loader = KittiRawLoader(args.dataset_dir,\n static_frames_file=args.static_frames,\n img_height=args.height,\n img_width=args.width,\n get_gt=args.with_gt)\n\n if args.dataset_format == 'cityscapes':\n from cityscapes_loader import cityscapes_loader\n data_loader = cityscapes_loader(args.dataset_dir,\n img_height=args.height,\n img_width=args.width)\n\n print('Retrieving frames')\n def convert_scenes(missing_folders):\n date = missing_folders[0][:10]\n root = Path('/content/drive/MyDrive/Dự án/KITTI Dataset/Raw Data' + f'/{date}')\n converted = []\n for folder in missing_folders:\n converted.append(root/folder[:-3])\n return converted\n missing_folders = []\n scenes = convert_scenes(missing_folders)\n Parallel(n_jobs=args.num_threads)(delayed(dump_example)(scene) for scene in tqdm(scenes))\n # Parallel(n_jobs=args.num_threads)(delayed(dump_example)(scene) for scene in tqdm(data_loader.scenes))\n # Split into train/val\n # print('Generating train val lists')\n # np.random.seed(8964)\n # subfolders = args.dump_root.dirs()\n # with open(args.dump_root / 'train.txt', 'w') as tf:\n # with open(args.dump_root / 'val.txt', 'w') as vf:\n # for s in tqdm(subfolders):\n # if np.random.random() < 0.1:\n # vf.write('{}\\n'.format(s.name))\n # else:\n # tf.write('{}\\n'.format(s.name))\n # remove useless groundtruth data for training comment if you don't want to erase it\n# for gt_file in s.files('*.npy'):\n# gt_file.remove_p()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.save" ] ]
MattyBoy4444/home-assistant
[ "b7b8296c73b28634bb9c60efe1ad976b1092fce8" ]
[ "homeassistant/components/binary_sensor/trend.py" ]
[ "\"\"\"\nA sensor that monitors trends in other components.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.trend/\n\"\"\"\nfrom collections import deque\nimport logging\nimport math\n\nimport voluptuous as vol\n\nfrom homeassistant.components.binary_sensor import (\n DEVICE_CLASSES_SCHEMA, ENTITY_ID_FORMAT, PLATFORM_SCHEMA,\n BinarySensorDevice)\nfrom homeassistant.const import (\n ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, CONF_DEVICE_CLASS, CONF_ENTITY_ID,\n CONF_FRIENDLY_NAME, STATE_UNKNOWN, CONF_SENSORS)\nfrom homeassistant.core import callback\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import generate_entity_id\nfrom homeassistant.helpers.event import async_track_state_change\nfrom homeassistant.util import utcnow\n\nREQUIREMENTS = ['numpy==1.15.3']\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_ATTRIBUTE = 'attribute'\nATTR_GRADIENT = 'gradient'\nATTR_MIN_GRADIENT = 'min_gradient'\nATTR_INVERT = 'invert'\nATTR_SAMPLE_DURATION = 'sample_duration'\nATTR_SAMPLE_COUNT = 'sample_count'\n\nCONF_ATTRIBUTE = 'attribute'\nCONF_INVERT = 'invert'\nCONF_MAX_SAMPLES = 'max_samples'\nCONF_MIN_GRADIENT = 'min_gradient'\nCONF_SAMPLE_DURATION = 'sample_duration'\n\nSENSOR_SCHEMA = vol.Schema({\n vol.Required(CONF_ENTITY_ID): cv.entity_id,\n vol.Optional(CONF_ATTRIBUTE): cv.string,\n vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,\n vol.Optional(CONF_FRIENDLY_NAME): cv.string,\n vol.Optional(CONF_INVERT, default=False): cv.boolean,\n vol.Optional(CONF_MAX_SAMPLES, default=2): cv.positive_int,\n vol.Optional(CONF_MIN_GRADIENT, default=0.0): vol.Coerce(float),\n vol.Optional(CONF_SAMPLE_DURATION, default=0): cv.positive_int,\n})\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_SENSORS): vol.Schema({cv.slug: SENSOR_SCHEMA}),\n})\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the trend sensors.\"\"\"\n sensors = []\n\n for device_id, device_config in config[CONF_SENSORS].items():\n entity_id = device_config[ATTR_ENTITY_ID]\n attribute = device_config.get(CONF_ATTRIBUTE)\n device_class = device_config.get(CONF_DEVICE_CLASS)\n friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device_id)\n invert = device_config[CONF_INVERT]\n max_samples = device_config[CONF_MAX_SAMPLES]\n min_gradient = device_config[CONF_MIN_GRADIENT]\n sample_duration = device_config[CONF_SAMPLE_DURATION]\n\n sensors.append(\n SensorTrend(\n hass, device_id, friendly_name, entity_id, attribute,\n device_class, invert, max_samples, min_gradient,\n sample_duration)\n )\n if not sensors:\n _LOGGER.error(\"No sensors added\")\n return\n add_entities(sensors)\n\n\nclass SensorTrend(BinarySensorDevice):\n \"\"\"Representation of a trend Sensor.\"\"\"\n\n def __init__(self, hass, device_id, friendly_name, entity_id,\n attribute, device_class, invert, max_samples,\n min_gradient, sample_duration):\n \"\"\"Initialize the sensor.\"\"\"\n self._hass = hass\n self.entity_id = generate_entity_id(\n ENTITY_ID_FORMAT, device_id, hass=hass)\n self._name = friendly_name\n self._entity_id = entity_id\n self._attribute = attribute\n self._device_class = device_class\n self._invert = invert\n self._sample_duration = sample_duration\n self._min_gradient = min_gradient\n self._gradient = None\n self._state = None\n self.samples = deque(maxlen=max_samples)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return true if sensor is on.\"\"\"\n return self._state\n\n @property\n def device_class(self):\n \"\"\"Return the sensor class of the sensor.\"\"\"\n return self._device_class\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes of the sensor.\"\"\"\n return {\n ATTR_ENTITY_ID: self._entity_id,\n ATTR_FRIENDLY_NAME: self._name,\n ATTR_GRADIENT: self._gradient,\n ATTR_INVERT: self._invert,\n ATTR_MIN_GRADIENT: self._min_gradient,\n ATTR_SAMPLE_COUNT: len(self.samples),\n ATTR_SAMPLE_DURATION: self._sample_duration,\n }\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n async def async_added_to_hass(self):\n \"\"\"Complete device setup after being added to hass.\"\"\"\n @callback\n def trend_sensor_state_listener(entity, old_state, new_state):\n \"\"\"Handle state changes on the observed device.\"\"\"\n try:\n if self._attribute:\n state = new_state.attributes.get(self._attribute)\n else:\n state = new_state.state\n if state != STATE_UNKNOWN:\n sample = (utcnow().timestamp(), float(state))\n self.samples.append(sample)\n self.async_schedule_update_ha_state(True)\n except (ValueError, TypeError) as ex:\n _LOGGER.error(ex)\n\n async_track_state_change(\n self.hass, self._entity_id,\n trend_sensor_state_listener)\n\n async def async_update(self):\n \"\"\"Get the latest data and update the states.\"\"\"\n # Remove outdated samples\n if self._sample_duration > 0:\n cutoff = utcnow().timestamp() - self._sample_duration\n while self.samples and self.samples[0][0] < cutoff:\n self.samples.popleft()\n\n if len(self.samples) < 2:\n return\n\n # Calculate gradient of linear trend\n await self.hass.async_add_job(self._calculate_gradient)\n\n # Update state\n self._state = (\n abs(self._gradient) > abs(self._min_gradient) and\n math.copysign(self._gradient, self._min_gradient) == self._gradient\n )\n\n if self._invert:\n self._state = not self._state\n\n def _calculate_gradient(self):\n \"\"\"Compute the linear trend gradient of the current samples.\n\n This need run inside executor.\n \"\"\"\n import numpy as np\n timestamps = np.array([t for t, _ in self.samples])\n values = np.array([s for _, s in self.samples])\n coeffs = np.polyfit(timestamps, values, 1)\n self._gradient = coeffs[0]\n" ]
[ [ "numpy.polyfit", "numpy.array" ] ]
DanialTaheri/KATRec
[ "a54d40196c25a09796665521cf482ba4eada072d" ]
[ "Model/utility/load_data.py" ]
[ "'''\nCreated on Dec 18, 2018\nTensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:\nWang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.\n@author: Xiang Wang (xiangwang@u.nus.edu)\n'''\nimport collections\nimport numpy as np\nimport random as rd\n\nclass Data(object):\n def __init__(self, FLAGS, path):\n self.path = path\n self.FLAGS = FLAGS\n\n\n self.batch_size = FLAGS.batch_size\n\n train_file = path + '/train.txt'\n test_file = path + '/test.txt'\n\n kg_file = path + '/kg_final.txt'\n\n # ----------get number of users and items & then load rating data from train_file & test_file------------.\n self.n_train, self.n_test = 0, 0\n self.n_users, self.n_items = 0, 0\n # Here we get the train_data and create a dictionary with user as the key and purchased items as the values\n #self.train_data is the [[user1, item2], [user1, item3],...]\n self.train_data, self.train_user_dict = self._load_ratings(train_file)\n self.test_data, self.test_user_dict = self._load_ratings(test_file)\n self.exist_users = self.train_user_dict.keys()\n\n self._statistic_ratings()\n\n # ----------get number of entities and relations & then load kg data from kg_file ------------.\n self.n_relations, self.n_entities, self.n_triples = 0, 0, 0\n self.kg_data, self.kg_dict, self.relation_dict = self._load_kg(kg_file)\n\n # ----------print the basic info about the dataset-------------. \n self.batch_size_kg = self.n_triples // FLAGS.n_instances + 1\n# else:\n# self.batch_size_kg = self.n_triples // self.n_train // self.batch_size \n self._print_data_info()\n\n # reading train & test interaction data.\n def _load_ratings(self, file_name):\n user_dict = dict()\n inter_mat = list()\n lines = open(file_name, 'r').readlines()\n for l in lines:\n tmps = l.strip()\n inters = [int(i) for i in tmps.split(' ')]\n\n u_id, pos_ids = inters[0], inters[1:]\n pos_ids = list(set(pos_ids))\n\n for i_id in pos_ids:\n inter_mat.append([u_id, i_id])\n\n if len(pos_ids) > 0:\n user_dict[u_id] = pos_ids\n return np.array(inter_mat), user_dict\n\n def _statistic_ratings(self):\n self.n_users = max(max(self.train_data[:, 0]), max(self.test_data[:, 0])) \n self.n_items = max(max(self.train_data[:, 1]), max(self.test_data[:, 1])) \n self.n_train = len(self.train_data)\n self.n_test = len(self.test_data)\n\n # reading train & test interaction data.\n def _load_kg(self, file_name):\n def _construct_kg(kg_np):\n kg = collections.defaultdict(list)\n rd = collections.defaultdict(list)\n\n for head, relation, tail in kg_np:\n kg[head].append((tail, relation))\n rd[relation].append((head, tail))\n return kg, rd\n\n kg_np = np.loadtxt(file_name, dtype=np.int32)\n kg_np = np.unique(kg_np, axis=0)\n\n # self.n_relations = len(set(kg_np[:, 1]))\n # self.n_entities = len(set(kg_np[:, 0]) | set(kg_np[:, 2]))\n self.n_relations = max(kg_np[:, 1]) \n self.n_entities = max(max(kg_np[:, 0]), max(kg_np[:, 2])) \n self.n_triples = len(kg_np)\n\n kg_dict, relation_dict = _construct_kg(kg_np)\n\n return kg_np, kg_dict, relation_dict\n \n def _print_data_info(self):\n print('[n_users, n_items]=[%d, %d]' % (self.n_users, self.n_items))\n print('[n_train, n_test]=[%d, %d]' % (self.n_train, self.n_test))\n print('[n_entities, n_relations, n_triples]=[%d, %d, %d]' % (self.n_entities, self.n_relations, self.n_triples))\n print('[batch_size, batch_size_kg]=[%d, %d]' % (self.batch_size, self.batch_size_kg))\n # For every batch size, the following function randomly pick a product of every users and put them in a pos_items and pick a negative item for every user and put it in the negative items. This way we have users, pos-items and neg_items for every batch\n \"\"\"\n def _generate_train_cf_batch(self): \n if self.batch_size <= self.n_users:\n users = rd.sample(self.exist_users, self.batch_size)\n else:\n users = [rd.choice(self.exist_users) for _ in range(self.batch_size)]\n\n def sample_pos_items_for_u(u, num):\n pos_items = self.train_user_dict[u]\n n_pos_items = len(pos_items)\n pos_batch = []\n while True:\n if len(pos_batch) == num: break\n pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]\n pos_i_id = pos_items[pos_id]\n\n if pos_i_id not in pos_batch:\n pos_batch.append(pos_i_id)\n return pos_batch\n\n def sample_neg_items_for_u(u, num):\n neg_items = []\n while True:\n if len(neg_items) == num: break\n neg_i_id = np.random.randint(low=0, high=self.n_items,size=1)[0]\n\n if neg_i_id not in self.train_user_dict[u] and neg_i_id not in neg_items:\n neg_items.append(neg_i_id)\n return neg_items\n\n pos_items, neg_items = [], []\n for u in users:\n pos_items += sample_pos_items_for_u(u, 1)\n neg_items += sample_neg_items_for_u(u, 1)\n return users, pos_items, neg_items\n \n \"\"\"\n def _generate_train_cf_batch(self): \n if self.batch_size <= self.n_users:\n users_in_batch = rd.sample(self.exist_users, self.batch_size)\n else:\n users_in_batch = [rd.choice(self.exist_users) for _ in range(self.batch_size)]\n\n def sample_pos_items_for_u(u, num):\n pos_items = self.train_user_dict[u]\n n_pos_items = len(pos_items)\n pos_batch = []\n while True:\n if len(pos_batch) == num: break\n \n if num > 1:\n pos_batch.extend(pos_items)\n else:\n pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]\n pos_i_id = pos_items[pos_id]\n\n if pos_i_id not in pos_batch:\n pos_batch.append(pos_i_id)\n return pos_batch\n\n def sample_neg_items_for_u(u, num):\n neg_items = []\n while True:\n if len(neg_items) == num: break\n neg_i_id = np.random.randint(low=0, high=self.n_items,size=1)[0]\n\n if neg_i_id not in self.train_user_dict[u] and neg_i_id not in neg_items:\n neg_items.append(neg_i_id)\n return neg_items\n\n pos_items, neg_items = [], []\n n_items = 0 # number of items in the batch\n users = [] # we want to put the full sequence of one user and add one item of other users randomly \n # Check if you are in the range of batch_size\n \n while n_items <= self.batch_size:\n u = rd.sample(users_in_batch, 1)[0]\n if u in users:\n break\n else:\n # put the full sequence of the first user\n if len(users) == 0: \n \n users.extend([u for _ in range(len(self.train_user_dict[u]))])\n n_items += len(self.train_user_dict[u])\n pos_items += sample_pos_items_for_u(u, len(self.train_user_dict[u]))\n neg_items += sample_neg_items_for_u(u, len(self.train_user_dict[u]))\n # put one item of other users\n else:\n users.append(u)\n n_items += 1\n pos_items += sample_pos_items_for_u(u, 1)\n neg_items += sample_neg_items_for_u(u, 1)\n\n return users, pos_items, neg_items\n\n\n\n\n def get_sparsity_split(self):\n try:\n split_uids, split_state = [], []\n lines = open(self.path + '/sparsity.split', 'r').readlines()\n\n for idx, line in enumerate(lines):\n if idx % 2 == 0:\n split_state.append(line.strip())\n print(line.strip())\n else:\n split_uids.append([int(uid) for uid in line.strip().split(' ')])\n print('get sparsity split.')\n\n except Exception:\n split_uids, split_state = self.create_sparsity_split()\n f = open(self.path + '/sparsity.split', 'w')\n for idx in range(len(split_state)):\n f.write(split_state[idx] + '\\n')\n f.write(' '.join([str(uid) for uid in split_uids[idx]]) + '\\n')\n print('create sparsity split.')\n\n return split_uids, split_state\n\n\n\n def create_sparsity_split(self):\n all_users_to_test = list(self.test_user_dict.keys())\n user_n_iid = dict()\n\n # generate a dictionary to store (key=n_iids, value=a list of uid).\n for uid in all_users_to_test:\n train_iids = self.train_user_dict[uid]\n test_iids = self.test_user_dict[uid]\n\n n_iids = len(train_iids) + len(test_iids)\n\n if n_iids not in user_n_iid.keys():\n user_n_iid[n_iids] = [uid]\n else:\n user_n_iid[n_iids].append(uid)\n split_uids = list()\n\n # split the whole user set into four subset.\n temp = []\n count = 1\n fold = 4\n n_count = (self.n_train + self.n_test)\n n_rates = 0\n\n split_state = []\n for idx, n_iids in enumerate(sorted(user_n_iid)):\n temp += user_n_iid[n_iids]\n n_rates += n_iids * len(user_n_iid[n_iids])\n n_count -= n_iids * len(user_n_iid[n_iids])\n\n if n_rates >= count * 0.25 * (self.n_train + self.n_test):\n split_uids.append(temp)\n\n state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' %(n_iids, len(temp), n_rates)\n split_state.append(state)\n print(state)\n\n temp = []\n n_rates = 0\n fold -= 1\n\n if idx == len(user_n_iid.keys()) - 1 or n_count == 0:\n split_uids.append(temp)\n\n state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' % (n_iids, len(temp), n_rates)\n split_state.append(state)\n print(state)\n\n\n return split_uids, split_state" ]
[ [ "numpy.array", "numpy.random.randint", "numpy.loadtxt", "numpy.unique" ] ]
ivarlokhorst/dask-geomodeling
[ "392aac0d7ee271c9e817e9654cf178f75ae76265" ]
[ "dask_geomodeling/geometry/aggregate.py" ]
[ "\"\"\"\nModule containing raster blocks that aggregate rasters.\n\"\"\"\nfrom math import ceil, floor, log, sqrt\nfrom collections import defaultdict\nfrom functools import partial\nimport warnings\n\nfrom scipy import ndimage\nimport numpy as np\nimport geopandas as gpd\n\nfrom dask import config\nfrom dask_geomodeling import measurements\nfrom dask_geomodeling import utils\nfrom dask_geomodeling.raster import RasterBlock\n\nfrom .base import GeometryBlock\n\n__all__ = [\"AggregateRaster\", \"AggregateRasterAboveThreshold\"]\n\n\nclass Bucket:\n \"\"\"\n Track objects in an imaginary grid that may span up to 4 cells.\n \"\"\"\n\n def __init__(self):\n self.cells = set()\n self.indices = []\n\n def __contains__(self, cells):\n \"\"\"\n Return wether any of the cells defined by indices is already occupied.\n \"\"\"\n return bool(self.cells & cells)\n\n def add(self, index, cells):\n \"\"\"\n Update the set of occupied cells with cells and append index to the\n list of indices.\n\n Note that this does not fail if cells intersects with already occupied\n cells.\n \"\"\"\n self.indices.append(index)\n self.cells.update(cells)\n\n\ndef calculate_level_and_cells(bbox):\n \"\"\"\n Return a tuple (level, cells).\n\n :param bboxes: list of (xmin, ymin, xmax, ymax) tuples\n\n The returned cells is a set of indices which represent occupied cells (at\n most 4) in an imaginary sparse grid that has a cellsize defined by the\n integer level. Level 0 corresponds to the unit cell. Each doubling of the\n cellsize level increase corresponds to a doubling of the cellsize of the\n previous level.\n \"\"\"\n x1, y1, x2, y2 = bbox\n level = -ceil(log(max(x2 - x1, y2 - y1), 2))\n\n width = 0.5 ** level\n height = 0.5 ** level\n\n j1 = floor(x1 / width)\n j2 = floor(x2 / width)\n i1 = floor(y1 / height)\n i2 = floor(y2 / height)\n\n return level, {(i1, j1), (i1, j2), (i2, j1), (i2, j2)}\n\n\ndef bucketize(bboxes):\n \"\"\"\n Return list of lists with indices into bboxes.\n\n :param bboxes: list of (xmin, ymin, xmax, ymax) tuples\n\n Each sublist in the returned list points to a subset of disjoint bboxes.\n Instead of aiming for the smallest amount of subsets possible, this\n approach focuses on speed by avoiding costly intersection operations on all\n bboxes in a bucket.\n \"\"\"\n bucket_dict = defaultdict(list)\n\n for index, bbox in enumerate(bboxes):\n level, cells = calculate_level_and_cells(bbox)\n\n # select bucket by feature size\n bucket_list = bucket_dict[level]\n\n for bucket in bucket_list:\n # see if it fits in this bucket\n if cells in bucket:\n continue\n # a suitable bucket has been found, break out of the for loop\n break\n else:\n # no break, no suitable bucket found, assign and append a new one\n bucket = Bucket()\n bucket_list.append(bucket)\n\n # add the item to the bucket\n bucket.add(index=index, cells=cells)\n\n return [\n bucket.indices for bucket_list in bucket_dict.values() for bucket in bucket_list\n ]\n\n\nclass AggregateRaster(GeometryBlock):\n \"\"\"\n Compute a statistics of a raster for each geometry in a geometry source.\n\n A statistic is computed in a specific projection and with a specified cell\n size. If ``projection`` or ``pixel_size`` are not given, these default to\n the native projection of the provided raster source.\n\n Should the combination of the requested pixel_size and the extent of the\n source geometry cause the requested raster size to exceed max_pixels, the\n pixel_size is adjusted automatically if ``auto_pixel_size = True``, else\n a RuntimeError is raised.\n\n Please note that for any field operation on the result of the aggregation\n a GetSeriesBlock should be used to retrieve data from the added column. The\n name of the added column is determined by the ``column_name`` parameter.\n\n Args:\n source (GeometryBlock): The geometry source for which the statistics are\n determined.\n raster (RasterBlock): The raster source that is sampled.\n statistic (str): The type of statistical analysis that should be\n performed. The options are: ``{\"sum\", \"count\", \"min\", \"max\", \"mean\",\n \"median\", \"p<percentile>\"}``. Percentiles are provided for example as\n follows: ``\"p50\"``. Default ``\"sum\"``.\n projection (str, optional): Projection to perform the aggregation in, for\n example ``\"EPSG:28992\"``. Defaults to the native projection of the\n supplied raster.\n pixel_size (float, optional): The raster cell size used in the\n aggregation. Defaults to the cell size of the supplied raster.\n max_pixels (int, optional): The maximum number of pixels (cells) in the\n aggregation. Defaults to the ``geomodeling.raster-limit`` setting.\n column_name (str, optional): The name of the column where the result\n should be placed. Defaults to ``\"agg\"``.\n auto_pixel_size (boolean): Determines whether the pixel size is adjusted\n automatically when ``\"max_pixels\"`` is exceeded. Default False.\n\n Returns: \n GeometryBlock with aggregation results in an added column\n\n The global raster-limit setting can be adapted as follows:\n >>> from dask import config\n >>> config.set({\"geomodeling.raster-limit\": 10 ** 9})\n \"\"\"\n\n # extensive (opposite: intensive) means: additive, proportional to size\n STATISTICS = {\n \"sum\": {\"func\": ndimage.sum, \"extensive\": True},\n \"count\": {\"func\": ndimage.sum, \"extensive\": True},\n \"min\": {\"func\": ndimage.minimum, \"extensive\": False},\n \"max\": {\"func\": ndimage.maximum, \"extensive\": False},\n \"mean\": {\"func\": ndimage.mean, \"extensive\": False},\n \"median\": {\"func\": ndimage.median, \"extensive\": False},\n \"percentile\": {\"func\": measurements.percentile, \"extensive\": False},\n }\n\n def __init__(\n self,\n source,\n raster,\n statistic=\"sum\",\n projection=None,\n pixel_size=None,\n max_pixels=None,\n column_name=\"agg\",\n auto_pixel_size=False,\n *args\n ):\n if not isinstance(source, GeometryBlock):\n raise TypeError(\"'{}' object is not allowed\".format(type(source)))\n if not isinstance(raster, RasterBlock):\n raise TypeError(\"'{}' object is not allowed\".format(type(raster)))\n if not isinstance(statistic, str):\n raise TypeError(\"'{}' object is not allowed\".format(type(statistic)))\n statistic = statistic.lower()\n percentile = utils.parse_percentile_statistic(statistic)\n if percentile:\n statistic = \"p{0}\".format(percentile)\n elif statistic not in self.STATISTICS or statistic == \"percentile\":\n raise ValueError(\"Unknown statistic '{}'\".format(statistic))\n\n if projection is None:\n projection = raster.projection\n if not isinstance(projection, str):\n raise TypeError(\"'{}' object is not allowed\".format(type(projection)))\n if pixel_size is None:\n # get the pixel_size from the raster geo_transform\n geo_transform = raster.geo_transform\n if geo_transform is None:\n raise ValueError(\n \"Cannot get the pixel_size from the source \"\n \"raster. Please provide a pixel_size.\"\n )\n pixel_size = min(abs(float(geo_transform[1])), abs(float(geo_transform[5])))\n else:\n pixel_size = abs(float(pixel_size))\n if pixel_size == 0.0:\n raise ValueError(\"Pixel size cannot be 0\")\n if max_pixels is not None:\n max_pixels = int(max_pixels)\n if not isinstance(auto_pixel_size, bool):\n raise TypeError(\"'{}' object is not allowed\".format(type(auto_pixel_size)))\n\n super(AggregateRaster, self).__init__(\n source,\n raster,\n statistic,\n projection,\n pixel_size,\n max_pixels,\n column_name,\n auto_pixel_size,\n *args\n )\n\n @property\n def source(self):\n return self.args[0]\n\n @property\n def raster(self):\n return self.args[1]\n\n @property\n def statistic(self):\n return self.args[2]\n\n @property\n def projection(self):\n return self.args[3]\n\n @property\n def pixel_size(self):\n return self.args[4]\n\n @property\n def max_pixels(self):\n return self.args[5]\n\n @property\n def column_name(self):\n return self.args[6]\n\n @property\n def auto_pixel_size(self):\n return self.args[7]\n\n @property\n def columns(self):\n return self.source.columns | {self.column_name}\n\n def get_sources_and_requests(self, **request):\n if request.get(\"mode\") == \"extent\":\n return [(self.source, request), (None, None), ({\"mode\": \"extent\"}, None)]\n\n req_srs = request[\"projection\"]\n agg_srs = self.projection\n\n # acquire the extent of the geometry data\n extent_request = {**request, \"mode\": \"extent\"}\n extent = self.source.get_data(**extent_request)[\"extent\"]\n\n if extent is None:\n # make sources_and_request so that we get an empty result\n return [\n (None, None),\n (None, None),\n ({\"empty\": True, \"projection\": req_srs}, None),\n ]\n\n # transform the extent into the projection in which we aggregate\n x1, y1, x2, y2 = utils.transform_extent(extent, req_srs, agg_srs)\n\n # estimate the amount of required pixels\n required_pixels = int(((x2 - x1) * (y2 - y1)) / (self.pixel_size ** 2))\n\n # in case this request is too large, we adapt pixel size\n max_pixels = self.max_pixels\n if max_pixels is None:\n max_pixels = config.get(\"geomodeling.raster-limit\")\n pixel_size = self.pixel_size\n\n if required_pixels > max_pixels and self.auto_pixel_size:\n # adapt with integer multiples of pixel_size\n pixel_size *= ceil(sqrt(required_pixels / max_pixels))\n elif required_pixels > max_pixels:\n raise RuntimeError(\n \"The required raster size for the aggregation exceeded \"\n \"the maximum ({} > {})\".format(required_pixels, max_pixels)\n )\n\n # snap the extent to (0, 0) to prevent subpixel shifts\n x1 = floor(x1 / pixel_size) * pixel_size\n y1 = floor(y1 / pixel_size) * pixel_size\n x2 = ceil(x2 / pixel_size) * pixel_size\n y2 = ceil(y2 / pixel_size) * pixel_size\n\n # compute the width and height\n width = max(int((x2 - x1) / pixel_size), 1)\n height = max(int((y2 - y1) / pixel_size), 1)\n\n raster_request = {\n \"mode\": \"vals\",\n \"projection\": agg_srs,\n \"start\": request.get(\"start\"),\n \"stop\": request.get(\"stop\"),\n \"aggregation\": None, # TODO\n \"bbox\": (x1, y1, x2, y2),\n \"width\": width,\n \"height\": height,\n }\n\n process_kwargs = {\n \"mode\": request.get(\"mode\", \"intersects\"),\n \"pixel_size\": self.pixel_size,\n \"agg_srs\": agg_srs,\n \"req_srs\": req_srs,\n \"actual_pixel_size\": pixel_size,\n \"statistic\": self.statistic,\n \"result_column\": self.column_name,\n \"agg_bbox\": (x1, y1, x2, y2),\n }\n\n return [\n (self.source, request),\n (self.raster, raster_request),\n (process_kwargs, None),\n ]\n\n @staticmethod\n def process(geom_data, raster_data, process_kwargs):\n if process_kwargs.get(\"empty\"):\n return {\n \"features\": gpd.GeoDataFrame([]),\n \"projection\": process_kwargs[\"projection\"],\n }\n elif process_kwargs[\"mode\"] == \"extent\":\n return geom_data\n\n features = geom_data[\"features\"]\n if len(features) == 0:\n return geom_data\n\n result = features.copy()\n\n # transform the features into the aggregation projection\n req_srs = process_kwargs[\"req_srs\"]\n agg_srs = process_kwargs[\"agg_srs\"]\n\n agg_geometries = utils.geoseries_transform(\n features[\"geometry\"], req_srs, agg_srs,\n )\n\n statistic = process_kwargs[\"statistic\"]\n percentile = utils.parse_percentile_statistic(statistic)\n if percentile:\n statistic = \"percentile\"\n agg_func = partial(\n AggregateRaster.STATISTICS[statistic][\"func\"], qval=percentile\n )\n else:\n agg_func = AggregateRaster.STATISTICS[statistic][\"func\"]\n\n extensive = AggregateRaster.STATISTICS[statistic][\"extensive\"]\n result_column = process_kwargs[\"result_column\"]\n\n # this is only there for the AggregateRasterAboveThreshold\n threshold_name = process_kwargs.get(\"threshold_name\")\n\n # investigate the raster data\n if raster_data is None:\n values = no_data_value = None\n else:\n values = raster_data[\"values\"]\n no_data_value = raster_data[\"no_data_value\"]\n if values is None or np.all(values == no_data_value): # skip the rest\n result[result_column] = 0 if extensive else np.nan\n return {\"features\": result, \"projection\": req_srs}\n depth, height, width = values.shape\n\n pixel_size = process_kwargs[\"pixel_size\"]\n actual_pixel_size = process_kwargs[\"actual_pixel_size\"]\n\n # process in groups of disjoint subsets of the features\n agg = np.full((depth, len(features)), np.nan, dtype=\"f4\")\n for select in bucketize(features.bounds.values):\n agg_geometries_bucket = agg_geometries.iloc[select]\n index = features.index[select]\n\n rasterize_result = utils.rasterize_geoseries(\n agg_geometries_bucket,\n process_kwargs[\"agg_bbox\"],\n agg_srs,\n height,\n width,\n values=index,\n )\n labels = rasterize_result[\"values\"][0]\n\n # if there is a threshold, generate a raster with thresholds\n if threshold_name:\n thresholds = features.loc[\n labels.ravel(), threshold_name\n ].values.reshape(labels.shape)\n else:\n thresholds = None\n\n for frame_no, frame in enumerate(values):\n # limit statistics to active pixels\n active = frame != no_data_value\n # if there is a threshold, mask the frame\n if threshold_name:\n valid = ~np.isnan(thresholds) # to suppress warnings\n active[~valid] = False # no threshold -> no aggregation\n active[valid] &= frame[valid] >= thresholds[valid]\n\n # if there is no single active value: do not aggregate\n if not active.any():\n continue\n\n with warnings.catch_warnings():\n # we may get divide by 0 if any geometry does not contain\n # any 'active' values\n warnings.simplefilter(\"ignore\")\n agg[frame_no][select] = agg_func(\n 1 if statistic == \"count\" else frame[active],\n labels=labels[active],\n index=index,\n )\n\n if extensive: # sum and count\n agg[~np.isfinite(agg)] = 0\n # extensive aggregations have to be scaled\n if actual_pixel_size != pixel_size:\n agg *= (actual_pixel_size / pixel_size) ** 2\n else:\n agg[~np.isfinite(agg)] = np.nan # replaces inf by nan\n\n if depth == 1:\n result[result_column] = agg[0]\n else:\n # store an array in a dataframe cell: set each cell with [np.array]\n result[result_column] = [[x] for x in agg.T]\n\n return {\"features\": result, \"projection\": req_srs}\n\n\nclass AggregateRasterAboveThreshold(AggregateRaster):\n \"\"\"\n Compute a statistics of a per-feature masked raster for each geometry in a\n geometry source.\n\n Per feature, a threshold can be supplied to mask the raster with. Only\n values that exceed the threshold of a specific feature are included for\n the statistical value of that feature.\n\n See :class:``dask_geomodeling.geometry.aggregate.AggregateRaster`` for\n further information.\n\n Args:\n threshold_name (str): The column that holds the thresholds.\n **kwargs: See :class:``dask_geomodeling.geometry.aggregate.AggregateRaster``\n\n Returns:\n GeometryBlock with aggregation results in an added column\n \"\"\"\n def __init__(\n self,\n source,\n raster,\n statistic=\"sum\",\n projection=None,\n pixel_size=None,\n max_pixels=None,\n column_name=\"agg\",\n auto_pixel_size=False,\n threshold_name=None,\n ):\n if not isinstance(threshold_name, str):\n raise TypeError(\"'{}' object is not allowed\".format(type(threshold_name)))\n if threshold_name not in source.columns:\n raise KeyError(\"Column '{}' is not available\".format(threshold_name))\n super().__init__(\n source,\n raster,\n statistic,\n projection,\n pixel_size,\n max_pixels,\n column_name,\n auto_pixel_size,\n threshold_name,\n )\n\n @property\n def threshold_name(self):\n return self.args[8]\n\n def get_sources_and_requests(self, **request):\n src_and_req = super().get_sources_and_requests(**request)\n process_kwargs = src_and_req[2][0]\n process_kwargs[\"threshold_name\"] = self.threshold_name\n return src_and_req\n" ]
[ [ "numpy.all", "numpy.isnan", "numpy.isfinite" ] ]
TencentYoutuResearch/Pruning-PFF
[ "0ed6a266cfbfdc1858ce475b3b1e78b18eb5cd2b" ]
[ "models/vgg.py" ]
[ "import torch\nimport torch.nn as nn\nfrom .stripe import *\n\n__all__ = ['VGG']\ndefault_cfg = {\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\nclass VGG(nn.Module):\n def __init__(self, num_classes=10, cfg=None):\n super(VGG, self).__init__()\n if cfg is None:\n cfg = default_cfg['VGG16']\n self.features = self._make_layers(cfg)\n self.classifier = Linear(512, num_classes)\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [FilterStripe(in_channels, x),\n BatchNorm(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n def update_skeleton(self, sr, threshold):\n for key, m in self.named_modules():\n if isinstance(m, FilterStripe):\n out_mask = m.update_skeleton(sr, threshold)\n elif isinstance(m, BatchNorm):\n m.update_mask(out_mask)\n\n def prune(self, threshold):\n mask_num = 0\n in_mask = torch.ones(3) > 0\n for key, m in self.named_modules():\n if isinstance(m, FilterStripe):\n m.prune_in(in_mask)\n in_mask = m.prune_out(threshold)\n mask_num += m.FilterSkeleton.nelement()\n m._break(threshold)\n if isinstance(m, BatchNorm):\n m.prune(in_mask)\n if isinstance(m, Linear):\n m.prune_in(in_mask)\n return mask_num\n" ]
[ [ "torch.nn.Sequential", "torch.ones", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
hotchya/basic-pytorch
[ "82dca4ee8ccc3596a58d2573f914c756e8740ed9" ]
[ "main.py" ]
[ "import os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport models\n\ndef save_model(model):\n state = {\n 'acc' : args.acc,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }\n file_name = '.'.join([args.model, args.dataset, 'pth.tar'])\n torch.save(state, os.path.join('saved_models/', file_name))\n print('save model : {}'.format(file_name))\n \n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data))\n return\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n output = model(data)\n test_loss += criterion(output, target).data\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n acc = 100. * float(correct) / len(test_loader.dataset)\n\n if acc > args.acc and not args.evaluate:\n args.acc = acc\n save_model(model)\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Best Acc : {}'.format(\n test_loss * args.batch_size, correct, len(test_loader.dataset),\n 100. * float(correct) / len(test_loader.dataset), args.acc))\n return\n\nif __name__=='__main__':\n # argument\n parser = argparse.ArgumentParser(description='Basic Pytorch')\n parser.add_argument('--dataset', action='store', default='MNIST',\n help='dataset: MNIST |')\n parser.add_argument('--batch-size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 256)')\n parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',\n help='input batch size for testing (default: 256)')\n parser.add_argument('--model', action='store', default='LeNet5',\n help='model: LeNet5 |')\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 5e-4)')\n parser.add_argument('--epochs', type=int, default=200, metavar='N',\n help='number of epochs to train (default: 200)')\n parser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--evaluate', action=\"store_true\",\n help='evaluate model')\n parser.add_argument('--pretrained', action='store_true',\n help='pretrained')\n\n args = parser.parse_args()\n\n ## accuracy state\n args.acc = 0.0\n\n ## cuda flag\n args.cuda = torch.cuda.is_available()\n print(args.cuda)\n\n ## control random seed [torch, cuda, cuDnn]\n torch.manual_seed(1)\n\n if args.cuda:\n torch.cuda.manual_seed(1)\n ## There is a problem that the computation processing speed is reduced\n torch.backends.cudnn.deterministic=True\n torch.backends.cudnn.benchmark = False\n\n\n ## load dataset\n if args.dataset == 'MNIST':\n transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ])\n train_data = datasets.MNIST('data', train=True, download=True, transform=transform)\n test_data = datasets.MNIST('data', train=False, download=True, transform=transform)\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, **kwargs)\n\n num_classes = 10\n \n\n ## load model\n if args.model == 'LeNet5':\n model = models.LeNet5()\n args.momentum = 0\n args.weight_decay = 0\n \n if args.cuda:\n model.cuda()\n \n ## optimizer & criterion\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n criterion = nn.CrossEntropyLoss()\n\n\n ## load pretrained model\n if args.pretrained:\n file_name = '.'.join([args.model, args.dataset, 'pth.tar'])\n pretrained_model = torch.load('saved_models/'+file_name)\n args.acc = pretrained_model['acc']\n model.load_state_dict(pretrained_model['model_state_dict'])\n optimizer.load_state_dict(pretrained_model['optimizer_state_dict'])\n\n # print the number of model parameters\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n print('Total parameter number:', params, '\\n')\n\n\n ## test\n if args.evaluate:\n test()\n exit()\n\n ## train\n for epoch in range(1, args.epochs + 1):\n train(epoch)\n test()" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.autograd.Variable" ] ]
bmathias12/model-analysis
[ "87bedbcf817557a40aaaf7bad614763f8713e5b3" ]
[ "binaryclassifier/plotting/prediction_density.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import gaussian_kde\n\ndef plot_prediction_density(\n y_true, scores, figsize=(8,5),\n title='Prediction Density Plot',\n colors=['red', 'blue']):\n\n class_set = sorted(set(y_true))\n\n x_grid = np.linspace(0, 1, 1000)\n\n fig, ax = plt.subplots(figsize=figsize)\n for i, value in enumerate(class_set):\n arr = scores[y_true == value]\n kernel = gaussian_kde(arr, bw_method='scott')\n kde = kernel.evaluate(x_grid)\n ax.plot(x_grid, kde, linewidth=2.5, label='Target = {}'.format(value),\n color=colors[i])\n ax.fill_between(x_grid, kde, alpha=0.6, color=colors[i])\n plt.title(title)\n plt.xlabel('Model Score')\n plt.ylabel('Kernel Density')\n plt.legend()\n plt.close(fig)\n return fig" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "scipy.stats.gaussian_kde", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
waddupitzme/graph-neural-pde
[ "004a30c9e838866ac8b78d14b7414224a24014a5" ]
[ "src/function_transformer_attention.py" ]
[ "import torch\nfrom torch import nn\nfrom torch_geometric.utils import softmax\nimport torch_sparse\nfrom torch_geometric.utils.loop import add_remaining_self_loops\nimport numpy as np\nfrom data import get_dataset\nfrom utils import MaxNFEException, squareplus\nfrom base_classes import ODEFunc\n\n\nclass ODEFuncTransformerAtt(ODEFunc):\n\n def __init__(self, in_features, out_features, opt, data, device):\n super(ODEFuncTransformerAtt, self).__init__(opt, data, device)\n\n if opt['self_loop_weight'] > 0:\n self.edge_index, self.edge_weight = add_remaining_self_loops(data.edge_index, data.edge_attr,\n fill_value=opt['self_loop_weight'])\n else:\n self.edge_index, self.edge_weight = data.edge_index, data.edge_attr\n self.multihead_att_layer = SpGraphTransAttentionLayer(in_features, out_features, opt,\n device, edge_weights=self.edge_weight).to(device)\n\n def multiply_attention(self, x, attention, v=None):\n # todo would be nice if this was more efficient\n if self.opt['mix_features']:\n vx = torch.mean(torch.stack(\n [torch_sparse.spmm(self.edge_index, attention[:, idx], v.shape[0], v.shape[0], v[:, :, idx]) for idx in\n range(self.opt['heads'])], dim=0),\n dim=0)\n ax = self.multihead_att_layer.Wout(vx)\n else:\n mean_attention = attention.mean(dim=1)\n ax = torch_sparse.spmm(self.edge_index, mean_attention, x.shape[0], x.shape[0], x)\n return ax\n\n def forward(self, t, x): # t is needed when called by the integrator\n if self.nfe > self.opt[\"max_nfe\"]:\n raise MaxNFEException\n\n self.nfe += 1\n attention, values = self.multihead_att_layer(x, self.edge_index)\n ax = self.multiply_attention(x, attention, values)\n\n if not self.opt['no_alpha_sigmoid']:\n alpha = torch.sigmoid(self.alpha_train)\n else:\n alpha = self.alpha_train\n f = alpha * (ax - x)\n if self.opt['add_source']:\n f = f + self.beta_train * self.x0\n return f\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass SpGraphTransAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, opt, device, concat=True, edge_weights=None):\n super(SpGraphTransAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = opt['leaky_relu_slope']\n self.concat = concat\n self.device = device\n self.opt = opt\n self.h = int(opt['heads'])\n self.edge_weights = edge_weights\n\n try:\n self.attention_dim = opt['attention_dim']\n except KeyError:\n self.attention_dim = out_features\n\n assert self.attention_dim % self.h == 0, \"Number of heads ({}) must be a factor of the dimension size ({})\".format(\n self.h, self.attention_dim)\n self.d_k = self.attention_dim // self.h\n\n if self.opt['beltrami'] and self.opt['attention_type'] == \"exp_kernel\":\n self.output_var_x = nn.Parameter(torch.ones(1))\n self.lengthscale_x = nn.Parameter(torch.ones(1))\n self.output_var_p = nn.Parameter(torch.ones(1))\n self.lengthscale_p = nn.Parameter(torch.ones(1))\n self.Qx = nn.Linear(opt['hidden_dim']-opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Qx)\n self.Vx = nn.Linear(opt['hidden_dim']-opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Vx)\n self.Kx = nn.Linear(opt['hidden_dim']-opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Kx)\n\n self.Qp = nn.Linear(opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Qp)\n self.Vp = nn.Linear(opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Vp)\n self.Kp = nn.Linear(opt['pos_enc_hidden_dim'], self.attention_dim)\n self.init_weights(self.Kp)\n\n else:\n self.Q = nn.Linear(in_features, self.attention_dim)\n self.init_weights(self.Q)\n\n self.V = nn.Linear(in_features, self.attention_dim)\n self.init_weights(self.V)\n\n self.K = nn.Linear(in_features, self.attention_dim)\n self.init_weights(self.K)\n\n self.activation = nn.Sigmoid() # nn.LeakyReLU(self.alpha)\n\n self.Wout = nn.Linear(self.d_k, in_features)\n self.init_weights(self.Wout)\n\n def init_weights(self, m):\n if type(m) == nn.Linear:\n # nn.init.xavier_uniform_(m.weight, gain=1.414)\n # m.bias.data.fill_(0.01)\n nn.init.constant_(m.weight, 1e-5)\n\n def forward(self, x, edge):\n \"\"\"\n x might be [features, augmentation, positional encoding, labels]\n \"\"\"\n # if self.opt['beltrami'] and self.opt['attention_type'] == \"exp_kernel\":\n if self.opt['beltrami'] and self.opt['attention_type'] == \"exp_kernel\":\n label_index = self.opt['feat_hidden_dim'] + self.opt['pos_enc_hidden_dim']\n p = x[:, self.opt['feat_hidden_dim']: label_index]\n x = torch.cat((x[:, :self.opt['feat_hidden_dim']], x[:, label_index:]), dim=1)\n\n qx = self.Qx(x)\n kx = self.Kx(x)\n vx = self.Vx(x)\n # perform linear operation and split into h heads\n kx = kx.view(-1, self.h, self.d_k)\n qx = qx.view(-1, self.h, self.d_k)\n vx = vx.view(-1, self.h, self.d_k)\n # transpose to get dimensions [n_nodes, attention_dim, n_heads]\n kx = kx.transpose(1, 2)\n qx = qx.transpose(1, 2)\n vx = vx.transpose(1, 2)\n src_x = qx[edge[0, :], :, :]\n dst_x = kx[edge[1, :], :, :]\n\n qp = self.Qp(p)\n kp = self.Kp(p)\n vp = self.Vp(p)\n # perform linear operation and split into h heads\n kp = kp.view(-1, self.h, self.d_k)\n qp = qp.view(-1, self.h, self.d_k)\n vp = vp.view(-1, self.h, self.d_k)\n # transpose to get dimensions [n_nodes, attention_dim, n_heads]\n kp = kp.transpose(1, 2)\n qp = qp.transpose(1, 2)\n vp = vp.transpose(1, 2)\n src_p = qp[edge[0, :], :, :]\n dst_p = kp[edge[1, :], :, :]\n\n prods = self.output_var_x ** 2 * torch.exp(\n -torch.sum((src_x - dst_x) ** 2, dim=1) / (2 * self.lengthscale_x ** 2)) \\\n * self.output_var_p ** 2 * torch.exp(\n -torch.sum((src_p - dst_p) ** 2, dim=1) / (2 * self.lengthscale_p ** 2))\n\n v = None\n\n else:\n q = self.Q(x)\n k = self.K(x)\n v = self.V(x)\n\n # perform linear operation and split into h heads\n\n k = k.view(-1, self.h, self.d_k)\n q = q.view(-1, self.h, self.d_k)\n v = v.view(-1, self.h, self.d_k)\n\n # transpose to get dimensions [n_nodes, attention_dim, n_heads]\n\n k = k.transpose(1, 2)\n q = q.transpose(1, 2)\n v = v.transpose(1, 2)\n\n src = q[edge[0, :], :, :]\n dst_k = k[edge[1, :], :, :]\n\n if self.opt['attention_type'] == \"scaled_dot\":\n prods = torch.sum(src * dst_k, dim=1) / np.sqrt(self.d_k)\n\n elif self.opt['attention_type'] == \"cosine_sim\":\n cos = torch.nn.CosineSimilarity(dim=1, eps=1e-5)\n prods = cos(src, dst_k)\n\n elif self.opt['attention_type'] == \"pearson\":\n src_mu = torch.mean(src, dim=1, keepdim=True)\n dst_mu = torch.mean(dst_k, dim=1, keepdim=True)\n src = src - src_mu\n dst_k = dst_k - dst_mu\n cos = torch.nn.CosineSimilarity(dim=1, eps=1e-5)\n prods = cos(src, dst_k)\n\n if self.opt['reweight_attention'] and self.edge_weights is not None:\n prods = prods * self.edge_weights.unsqueeze(dim=1)\n if self.opt['square_plus']:\n attention = squareplus(prods, edge[self.opt['attention_norm_idx']])\n else:\n attention = softmax(prods, edge[self.opt['attention_norm_idx']])\n return attention, (v, prods)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nif __name__ == '__main__':\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'heads': 2, 'K': 10,\n 'attention_norm_idx': 0, 'add_source': False,\n 'alpha_dim': 'sc', 'beta_dim': 'sc', 'max_nfe': 1000, 'mix_features': False\n }\n dataset = get_dataset(opt, '../data', False)\n t = 1\n func = ODEFuncTransformerAtt(dataset.data.num_features, 6, opt, dataset.data, device)\n out = func(t, dataset.data.x)\n" ]
[ [ "torch.mean", "torch.sigmoid", "torch.ones", "numpy.sqrt", "torch.cat", "torch.nn.init.constant_", "torch.sum", "torch.nn.CosineSimilarity", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.cuda.is_available" ] ]
ScripteJunkie/T3
[ "f0f205f39bf3dc23c2dc13d0037fbae6ac296874" ]
[ "python/HSVImageCalibration.py" ]
[ "import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\n# Load image\nimage = cv2.imread('/python/Unused Images/Screenshot 2022-04-02 01-28-33.png')\n\nwidth = int(image.shape[1] / 2)\nheight = int(image.shape[0] / 2)\ndim = (width, height)\n\nimage = cv2.resize(image, dim)\n\n# Create a window\ncv2.namedWindow('image')\n\n# Create trackbars for color change\n# Hue is from 0-179 for Opencv\ncv2.createTrackbar('HMin', 'image', 0, 179, nothing)\ncv2.createTrackbar('SMin', 'image', 0, 255, nothing)\ncv2.createTrackbar('VMin', 'image', 0, 255, nothing)\ncv2.createTrackbar('HMax', 'image', 0, 179, nothing)\ncv2.createTrackbar('SMax', 'image', 0, 255, nothing)\ncv2.createTrackbar('VMax', 'image', 0, 255, nothing)\n\n# Set default value for Max HSV trackbars\ncv2.setTrackbarPos('HMax', 'image', 179)\ncv2.setTrackbarPos('SMax', 'image', 255)\ncv2.setTrackbarPos('VMax', 'image', 255)\n\n# Initialize HSV min/max values\nhMin = sMin = vMin = hMax = sMax = vMax = 0\nphMin = psMin = pvMin = phMax = psMax = pvMax = 0\n\nwhile(1):\n # Get current positions of all trackbars\n hMin = cv2.getTrackbarPos('HMin', 'image')\n sMin = cv2.getTrackbarPos('SMin', 'image')\n vMin = cv2.getTrackbarPos('VMin', 'image')\n hMax = cv2.getTrackbarPos('HMax', 'image')\n sMax = cv2.getTrackbarPos('SMax', 'image')\n vMax = cv2.getTrackbarPos('VMax', 'image')\n\n # Set minimum and maximum HSV values to display\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n\n # Convert to HSV format and color threshold\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower, upper)\n result = cv2.bitwise_and(image, image, mask=mask)\n\n # Print if there is a change in HSV value\n if((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax) ):\n print(\"(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)\" % (hMin , sMin , vMin, hMax, sMax , vMax))\n phMin = hMin\n psMin = sMin\n pvMin = vMin\n phMax = hMax\n psMax = sMax\n pvMax = vMax\n\n # Display result image\n cv2.imshow('image', result)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
azzatha/RTG-Simulation-tool
[ "b9001876e79d2e0c5936df073fd16dec4c52e4a8" ]
[ "calculate_prob_cdf.py" ]
[ "import pandas as pd\nfrom sklearn import preprocessing\nimport numpy as np\nimport glob\nimport math\n\n# Function to Convert the cM to probability of recombination\ndef probability (genaticMaps):\n prob = (1.0 - math.exp(-genaticMaps[\"cM\"] / 50)) / 2.0\n return prob\n\n\n# Read the files in the current directory that contains all the genetic map\nfor filename in glob.glob('*.txt'):\n genaticMaps = pd.read_csv(filename, sep=\"\\t\")\n\n # 1. Convert the cM to probability of recombination\n genaticMaps[\"Recmb_Prob\"] = genaticMaps.apply(probability, axis=1)\n\n # 2. Normalize the probability\n X_normalized = preprocessing.normalize(genaticMaps[\"Recmb_Prob\"], norm='l1')\n df_normalized = pd.DataFrame(X_normalized)\n df1_transposed = df_normalized.T\n df1_transposed['chr'] = genaticMaps[\"chr\"]\n df1_transposed['pos'] = genaticMaps[\"pos\"]\n df1_transposed['prob'] = df1_transposed[0]\n del df1_transposed[0]\n\n #3. Calculate CDF\n df1_transposed['cdf'] = np.cumsum(df1_transposed[\"prob\"])\n\n # Save the final result to a new file\n df1_transposed.to_csv(\"./genetic_map/CDF_\"+filename, sep=\"\\t\", index=False)\n" ]
[ [ "sklearn.preprocessing.normalize", "pandas.read_csv", "numpy.cumsum", "pandas.DataFrame" ] ]
pchaos/quanttesting
[ "98331670547e8a45ba93b49f3e9c660495645114" ]
[ "testing/examples/talib-macd.py" ]
[ "'''\nTa-lib计算MACD\n'''\nimport pandas as pd\nimport numpy as np\nimport talib as ta\nimport tushare as ts\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nrc('mathtext', default='regular')\nsns.set_style('white')\n# %matplotlib\nplt.rcParams[\"figure.figsize\"] = (20, 10)\n\ndw = ts.get_k_data(\"600600\")\nclose = dw.close.values\ndw['macd'], dw['macdsignal'], dw['macdhist'] = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)\ndw[['close','macd','macdsignal','macdhist']].plot()\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.rc" ] ]
fountain-y/ranksortloss
[ "e9c4ba360b2dd905babb0ed1a445fca588c5ecec" ]
[ "mmdet/core/evaluation/mean_ap.py" ]
[ "from multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\nfrom .class_names import get_classes\n\nimport pandas as pd\n\n\ndef average_precision(recalls, precisions, mode='area'):\n \"\"\"Calculate average precision (for single or multiple scales).\n\n Args:\n recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )\n precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )\n mode (str): 'area' or '11points', 'area' means calculating the area\n under precision-recall curve, '11points' means calculating\n the average precision of recalls at [0, 0.1, ..., 1]\n\n Returns:\n float or ndarray: calculated average precision\n \"\"\"\n no_scale = False\n if recalls.ndim == 1:\n no_scale = True\n recalls = recalls[np.newaxis, :]\n precisions = precisions[np.newaxis, :]\n assert recalls.shape == precisions.shape and recalls.ndim == 2\n num_scales = recalls.shape[0]\n ap = np.zeros(num_scales, dtype=np.float32)\n if mode == 'area':\n zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)\n ones = np.ones((num_scales, 1), dtype=recalls.dtype)\n mrec = np.hstack((zeros, recalls, ones))\n mpre = np.hstack((zeros, precisions, zeros))\n for i in range(mpre.shape[1] - 1, 0, -1):\n mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])\n for i in range(num_scales):\n ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]\n ap[i] = np.sum(\n (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n elif mode == '11points':\n for i in range(num_scales):\n for thr in np.arange(0, 1 + 1e-3, 0.1):\n precs = precisions[i, recalls[i, :] >= thr]\n prec = precs.max() if precs.size > 0 else 0\n ap[i] += prec\n ap /= 11\n else:\n raise ValueError(\n 'Unrecognized mode, only \"area\" and \"11points\" are supported')\n if no_scale:\n ap = ap[0]\n return ap\n\n\ndef tpfp_imagenet(det_bboxes,\n gt_bboxes,\n gt_bboxes_ignore=None,\n default_iou_thr=0.5,\n area_ranges=None):\n \"\"\"Check if detected bboxes are true positive or false positive.\n\n Args:\n det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n of shape (k, 4). Default: None\n default_iou_thr (float): IoU threshold to be considered as matched for\n medium and large bboxes (small ones have special rules).\n Default: 0.5.\n area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. Default: None.\n\n Returns:\n tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n each array is (num_scales, m).\n \"\"\"\n # an indicator of ignored gts\n gt_ignore_inds = np.concatenate(\n (np.zeros(gt_bboxes.shape[0], dtype=np.bool),\n np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))\n # stack gt_bboxes and gt_bboxes_ignore for convenience\n gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n num_dets = det_bboxes.shape[0]\n num_gts = gt_bboxes.shape[0]\n if area_ranges is None:\n area_ranges = [(None, None)]\n num_scales = len(area_ranges)\n # tp and fp are of shape (num_scales, num_gts), each row is tp or fp\n # of a certain scale.\n tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n if gt_bboxes.shape[0] == 0:\n if area_ranges == [(None, None)]:\n fp[...] = 1\n else:\n det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (\n det_bboxes[:, 3] - det_bboxes[:, 1])\n for i, (min_area, max_area) in enumerate(area_ranges):\n fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n return tp, fp\n ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)\n gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]\n gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]\n iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),\n default_iou_thr)\n # sort all detections by scores in descending order\n sort_inds = np.argsort(-det_bboxes[:, -1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n gt_covered = np.zeros(num_gts, dtype=bool)\n # if no area range is specified, gt_area_ignore is all False\n if min_area is None:\n gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n else:\n gt_areas = gt_w * gt_h\n gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n for i in sort_inds:\n max_iou = -1\n matched_gt = -1\n # find best overlapped available gt\n for j in range(num_gts):\n # different from PASCAL VOC: allow finding other gts if the\n # best overlaped ones are already matched by other det bboxes\n if gt_covered[j]:\n continue\n elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:\n max_iou = ious[i, j]\n matched_gt = j\n # there are 4 cases for a det bbox:\n # 1. it matches a gt, tp = 1, fp = 0\n # 2. it matches an ignored gt, tp = 0, fp = 0\n # 3. it matches no gt and within area range, tp = 0, fp = 1\n # 4. it matches no gt but is beyond area range, tp = 0, fp = 0\n if matched_gt >= 0:\n gt_covered[matched_gt] = 1\n if not (gt_ignore_inds[matched_gt]\n or gt_area_ignore[matched_gt]):\n tp[k, i] = 1\n elif min_area is None:\n fp[k, i] = 1\n else:\n bbox = det_bboxes[i, :4]\n area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n if area >= min_area and area < max_area:\n fp[k, i] = 1\n return tp, fp\n\n\ndef tpfp_default(det_bboxes,\n gt_bboxes,\n gt_bboxes_ignore=None,\n iou_thr=0.5,\n area_ranges=None):\n \"\"\"Check if detected bboxes are true positive or false positive.\n\n Args:\n det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n of shape (k, 4). Default: None\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. Default: None.\n\n Returns:\n tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n each array is (num_scales, m).\n \"\"\"\n # an indicator of ignored gts\n gt_ignore_inds = np.concatenate(\n (np.zeros(gt_bboxes.shape[0], dtype=np.bool),\n np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))\n # stack gt_bboxes and gt_bboxes_ignore for convenience\n gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n num_dets = det_bboxes.shape[0]\n num_gts = gt_bboxes.shape[0]\n if area_ranges is None:\n area_ranges = [(None, None)]\n num_scales = len(area_ranges)\n # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n # a certain scale\n tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n # if there is no gt bboxes in this image, then all det bboxes\n # within area range are false positives\n if gt_bboxes.shape[0] == 0:\n if area_ranges == [(None, None)]:\n fp[...] = 1\n else:\n det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (\n det_bboxes[:, 3] - det_bboxes[:, 1])\n for i, (min_area, max_area) in enumerate(area_ranges):\n fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n return tp, fp\n\n ious = bbox_overlaps(det_bboxes, gt_bboxes)\n # for each det, the max iou with all gts\n ious_max = ious.max(axis=1)\n # for each det, which gt overlaps most with it\n ious_argmax = ious.argmax(axis=1)\n # sort all dets in descending order by scores\n sort_inds = np.argsort(-det_bboxes[:, -1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n gt_covered = np.zeros(num_gts, dtype=bool)\n # if no area range is specified, gt_area_ignore is all False\n if min_area is None:\n gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n else:\n gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n gt_bboxes[:, 3] - gt_bboxes[:, 1])\n gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n for i in sort_inds:\n if ious_max[i] >= iou_thr:\n matched_gt = ious_argmax[i]\n if not (gt_ignore_inds[matched_gt]\n or gt_area_ignore[matched_gt]):\n if not gt_covered[matched_gt]:\n gt_covered[matched_gt] = True\n tp[k, i] = 1\n else:\n fp[k, i] = 1\n # otherwise ignore this detected bbox, tp = 0, fp = 0\n elif min_area is None:\n fp[k, i] = 1\n else:\n bbox = det_bboxes[i, :4]\n area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n if area >= min_area and area < max_area:\n fp[k, i] = 1\n return tp, fp\n\n\ndef get_cls_results(det_results, annotations, class_id):\n \"\"\"Get det results and gt information of a certain class.\n\n Args:\n det_results (list[list]): Same as `eval_map()`.\n annotations (list[dict]): Same as `eval_map()`.\n class_id (int): ID of a specific class.\n\n Returns:\n tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes\n \"\"\"\n cls_dets = [img_res[class_id] for img_res in det_results]\n cls_gts = []\n cls_gts_ignore = []\n for ann in annotations:\n gt_inds = ann['labels'] == class_id\n cls_gts.append(ann['bboxes'][gt_inds, :])\n\n if ann.get('labels_ignore', None) is not None:\n ignore_inds = ann['labels_ignore'] == class_id\n cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])\n else:\n cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))\n\n return cls_dets, cls_gts, cls_gts_ignore\n\n\ndef eval_map(det_results,\n annotations,\n scale_ranges=None,\n iou_thr=0.5,\n dataset=None,\n logger=None,\n tpfp_fn=None,\n nproc=4):\n \"\"\"Evaluate mAP of a dataset.\n\n Args:\n det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n The outer list indicates images, and the inner list indicates\n per-class detected bboxes.\n annotations (list[dict]): Ground truth annotations where each item of\n the list indicates an image. Keys of annotations are:\n\n - `bboxes`: numpy array of shape (n, 4)\n - `labels`: numpy array of shape (n, )\n - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n - `labels_ignore` (optional): numpy array of shape (k, )\n scale_ranges (list[tuple] | None): Range of scales to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. A range of\n (32, 64) means the area range between (32**2, 64**2).\n Default: None.\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n dataset (list[str] | str | None): Dataset name or dataset classes,\n there are minor differences in metrics for different datsets, e.g.\n \"voc07\", \"imagenet_det\", etc. Default: None.\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmcv.utils.print_log()` for details. Default: None.\n tpfp_fn (callable | None): The function used to determine true/\n false positives. If None, :func:`tpfp_default` is used as default\n unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this\n case). If it is given as a function, then this function is used\n to evaluate tp & fp. Default None.\n nproc (int): Processes used for computing TP and FP.\n Default: 4.\n\n Returns:\n tuple: (mAP, [dict, dict, ...])\n \"\"\"\n assert len(det_results) == len(annotations)\n\n num_imgs = len(det_results)\n num_scales = len(scale_ranges) if scale_ranges is not None else 1\n num_classes = len(det_results[0]) # positive class num\n area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]\n if scale_ranges is not None else None)\n\n pool = Pool(nproc)\n eval_results = []\n for i in range(num_classes):\n # get gt and det bboxes of this class\n cls_dets, cls_gts, cls_gts_ignore = get_cls_results(\n det_results, annotations, i)\n # choose proper function according to datasets to compute tp and fp\n if tpfp_fn is None:\n if dataset in ['det', 'vid']:\n tpfp_fn = tpfp_imagenet\n else:\n tpfp_fn = tpfp_default\n if not callable(tpfp_fn):\n raise ValueError(\n f'tpfp_fn has to be a function or None, but got {tpfp_fn}')\n\n # compute tp and fp for each image with multiple processes\n tpfp = pool.starmap(\n tpfp_fn,\n zip(cls_dets, cls_gts, cls_gts_ignore,\n [iou_thr for _ in range(num_imgs)],\n [area_ranges for _ in range(num_imgs)]))\n tp, fp = tuple(zip(*tpfp))\n # calculate gt number of each scale\n # ignored gts or gts beyond the specific scale are not counted\n num_gts = np.zeros(num_scales, dtype=int)\n for j, bbox in enumerate(cls_gts):\n if area_ranges is None:\n num_gts[0] += bbox.shape[0]\n else:\n gt_areas = (bbox[:, 2] - bbox[:, 0]) * (\n bbox[:, 3] - bbox[:, 1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n num_gts[k] += np.sum((gt_areas >= min_area)\n & (gt_areas < max_area))\n # sort all det bboxes by score, also sort tp and fp\n cls_dets = np.vstack(cls_dets)\n num_dets = cls_dets.shape[0]\n sort_inds = np.argsort(-cls_dets[:, -1])\n tp = np.hstack(tp)[:, sort_inds]\n fp = np.hstack(fp)[:, sort_inds]\n # calculate recall and precision with tp and fp\n tp = np.cumsum(tp, axis=1)\n fp = np.cumsum(fp, axis=1)\n eps = np.finfo(np.float32).eps\n recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)\n precisions = tp / np.maximum((tp + fp), eps)\n # calculate AP\n if scale_ranges is None:\n recalls = recalls[0, :]\n precisions = precisions[0, :]\n num_gts = num_gts.item()\n mode = 'area' if dataset != 'voc07' else '11points'\n ap = average_precision(recalls, precisions, mode)\n eval_results.append({\n 'num_gts': num_gts,\n 'num_dets': num_dets,\n 'recall': recalls,\n 'precision': precisions,\n 'ap': ap\n })\n pool.close()\n if scale_ranges is not None:\n # shape (num_classes, num_scales)\n all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])\n all_num_gts = np.vstack(\n [cls_result['num_gts'] for cls_result in eval_results])\n mean_ap = []\n for i in range(num_scales):\n if np.any(all_num_gts[:, i] > 0):\n mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())\n else:\n mean_ap.append(0.0)\n else:\n aps = []\n for cls_result in eval_results:\n if cls_result['num_gts'] > 0:\n aps.append(cls_result['ap'])\n mean_ap = np.array(aps).mean().item() if aps else 0.0\n\n print_map_summary(\n mean_ap, eval_results, dataset, area_ranges, logger=logger)\n\n return mean_ap, eval_results\n\n\ndef print_map_summary(mean_ap,\n results,\n dataset=None,\n scale_ranges=None,\n logger=None):\n \"\"\"Print mAP and results of each class.\n\n A table will be printed to show the gts/dets/recall/AP of each class and\n the mAP.\n\n Args:\n mean_ap (float): Calculated from `eval_map()`.\n results (list[dict]): Calculated from `eval_map()`.\n dataset (list[str] | str | None): Dataset name or dataset classes.\n scale_ranges (list[tuple] | None): Range of scales to be evaluated.\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmcv.utils.print_log()` for details. Default: None.\n \"\"\"\n\n if logger == 'silent':\n return\n\n if isinstance(results[0]['ap'], np.ndarray):\n num_scales = len(results[0]['ap'])\n else:\n num_scales = 1\n\n if scale_ranges is not None:\n assert len(scale_ranges) == num_scales\n\n num_classes = len(results)\n\n recalls = np.zeros((num_scales, num_classes), dtype=np.float32)\n precisions = np.zeros((num_scales, num_classes), dtype=np.float32)\n aps = np.zeros((num_scales, num_classes), dtype=np.float32)\n num_gts = np.zeros((num_scales, num_classes), dtype=int)\n for i, cls_result in enumerate(results):\n if cls_result['recall'].size > 0:\n recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]\n if cls_result['precision'].size > 0:\n precisions[:, i] = np.array(cls_result['precision'], ndmin=2)[:, -1]\n aps[:, i] = cls_result['ap']\n num_gts[:, i] = cls_result['num_gts']\n\n if dataset is None:\n label_names = [str(i) for i in range(num_classes)]\n elif mmcv.is_str(dataset):\n label_names = get_classes(dataset)\n else:\n label_names = dataset\n\n if not isinstance(mean_ap, list):\n mean_ap = [mean_ap]\n\n # header = ['class', 'gts', 'dets', 'recall', 'ap']\n header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']\n\n df = pd.DataFrame(columns=header)\n\n for i in range(num_scales):\n if scale_ranges is not None:\n print_log(f'Scale range {scale_ranges[i]}', logger=logger)\n table_data = [header]\n tpcnt = 0\n detcnt = 0\n gtcnt = 0\n for j in range(num_classes):\n row_data = [\n label_names[j], \n num_gts[i, j], \n results[j]['num_dets'],\n f'{recalls[i, j]:.3f}', \n f'{precisions[i, j]:.3f}',\n f'{aps[i, j]:.3f}'\n ]\n table_data.append(row_data)\n\n df = df.append({\n 'class':label_names[j],\n 'gts':num_gts[i, j],\n 'dets':results[j]['num_dets'],\n 'recall':f'{recalls[i, j]:.4f}',\n 'precision':f'{precisions[i, j]:.4f}',\n 'ap':f'{aps[i, j]:.4f}',\n },\n ignore_index=True)\n\n tpcnt += num_gts[i, j] * recalls[i, j]\n gtcnt += num_gts[i, j]\n detcnt += results[j]['num_dets']\n \n # table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])\n table_data.append(['mAP', '', '', '', '', f'{mean_ap[i]:.3f}'])\n table = AsciiTable(table_data)\n table.inner_footing_row_border = True\n print_log('\\n' + table.table, logger=logger)\n\n print('\\ntp:', tpcnt)\n print('fp:', detcnt - tpcnt)\n print('fn:', gtcnt - tpcnt)\n print('det:', detcnt)\n print('gt:', gtcnt)\n\n print('mean recall:', np.mean(recalls[i]))\n print('mean precision:', np.mean(precisions[i]))\n\n df.to_csv('result.csv', sep=',', index=False, encoding='utf_8_sig')" ]
[ [ "numpy.hstack", "numpy.minimum", "numpy.maximum", "numpy.arange", "numpy.cumsum", "pandas.DataFrame", "numpy.ones", "numpy.finfo", "numpy.empty", "numpy.zeros_like", "numpy.any", "numpy.mean", "numpy.where", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
dilberdillu/graphcu
[ "eaf05d1b287beab594a48c2b2a777f60a5815b4b" ]
[ "graphcu/graph.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 19 15:06:21 2018\n\n@author: dillu\n\"\"\"\n\nfrom networkx.exception import NetworkXError\nimport matplotlib.pyplot as plt\n\nclass Graph(object):\n \n node_dict_factory = dict\n node_attr_dict_factory = dict\n adjlist_outer_dict_factory = dict\n adjlist_inner_dict_factory = dict\n edge_attr_dict_factory = dict\n graph_attr_dict_factory = dict\n \n \n \n def __init__(self, incoming_graph_data=None, **attr):\n self.graph_attr_dict_factory = self.graph_attr_dict_factory\n self.node_dict_factory = self.node_dict_factory\n self.node_attr_dict_factory = self.node_attr_dict_factory\n self.adjlist_outer_dict_factory = self.adjlist_outer_dict_factory\n self.adjlist_inner_dict_factory = self.adjlist_inner_dict_factory\n self.edge_attr_dict_factory = self.edge_attr_dict_factory\n \n self.graph = self.graph_attr_dict_factory()\n self._node = self.node_dict_factory()\n self._adj = self.adjlist_outer_dict_factory()\n \n '''\n if incoming_graph_data is not None:\n convert.to_networkx_graph(incoming_graph_data, create_using=self)\n \n self.graph.update(attr)\n '''\n \n \n def add_node(self, node_for_adding, **attr):\n #addnode\n if node_for_adding not in self._node:\n self._adj[node_for_adding] = self.adjlist_inner_dict_factory()\n attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()\n attr_dict.update(attr)\n else:\n self._node[node_for_adding].update(attr)\n \n def update(self, edges=None, nodes=None):\n \n if edges is not None:\n if nodes is not None:\n self.add_nodes_from(nodes)\n self.add_edges_from(edges)\n else:\n try:\n graph_nodes = edges.nodes\n graph_edges = edges.edges\n except AttributeError:\n self.add_edges_from(edges)\n else:\n self.add_nodes_from(graph_nodes.data())\n self.add_edges_from(graph_edges.data())\n self.graph.update(edges.graph)\n elif nodes is not None:\n self.add_nodes_from(nodes)\n else:\n raise NetworkXError(\"update needs nodes or edges input\")\n \n \n def add_nodes_from(self, nodes_for_adding, **attr):\n #add nodes from\n for n in nodes_for_adding:\n try:\n if n not in self._node:\n self._adj[n] = self.adjlist_inner_dict_factory()\n attr_dict = self._node[n] = self.node_attr_dict_factory()\n attr_dict.update(attr)\n else:\n self._node[n].update(attr)\n except TypeError:\n nn, ndict = n\n if nn not in self._node:\n self.adj[nn] = self.adjlist_inner_dict_factory()\n newdict = attr.copy()\n newdict.update(newdict)\n attr_dict = self._node[nn] = self.node_attr_dict_factory()\n attr_dict.update(newdict)\n else:\n olddict = self._node[nn]\n olddict.update(attr)\n olddict.update(ndict)\n \n \n \n def add_edges_from(self, ebunch_to_add, **attr):\n for e in ebunch_to_add:\n ne = len(e)\n if ne == 3:\n u, v, dd = e\n elif ne == 2:\n u, v = e\n dd = {}\n else:\n raise NetworkXError(\"Egde tuple %s must be a 2-tuple or 3tuple\"%(e,))\n if u not in self._node:\n self._adj[u] = self.adjlist_inner_dict_factory()\n self._node[u] = self.node_attr_dict_factory()\n if v not in self._node:\n self._adj[v] = self.adjlist_inner_dict_factory()\n self._node[v] = self.node_attr_dict_factory()\n datadict = self._adj[u].get(v, self.edge_attr_dict_factory())\n datadict.update(attr)\n datadict.update(dd)\n self._adj[u][v] = datadict\n self._adj[v][u] = datadict\n \n \n\ng = Graph()\nmylist = tuple([(1,2), (2,6), (2,3), (3,4), (3,5)])\ng.add_edges_from(mylist)\n\nh = plt.draw(g)\n\n" ]
[ [ "matplotlib.pyplot.draw" ] ]
avinesh09/mnist-deepml
[ "ffbdd2d58e47d704d7a05aaf6e109ac56a15a99d" ]
[ "mnist-saved-model.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n#!/usr/bin/env python2.7\nr\"\"\"Train and export a simple Softmax Regression TensorFlow model.\nThe model is from the TensorFlow \"MNIST For ML Beginner\" tutorial. This program\nsimply follows all its training instructions, and uses TensorFlow SavedModel to\nexport the trained model with proper signatures that can be loaded by standard\ntensorflow_model_server.\nUsage: mnist_saved_model.py [--training_iteration=x] [--model_version=y] \\\n export_dir\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\n# This is a placeholder for a Google-internal import.\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib import predictor\n\n\nimport mnist_input_data\n\ntf.app.flags.DEFINE_integer('training_iteration', 10000,\n 'number of training iterations.')\ntf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')\ntf.app.flags.DEFINE_string('data_dir', '/tmp/model/mnist/data', 'Working directory.')\ntf.app.flags.DEFINE_string('model_dir', '/opt/mnist/model', 'export model directory.')\ntf.app.flags.DEFINE_string('summary_dir', '/opt/mnist/summaries', 'summaries directory.')\nFLAGS = tf.app.flags.FLAGS\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef main(_):\n '''\n if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):\n print('Usage: mnist_export.py [--training_iteration=x] '\n '[--model_version=y] export_dir')\n sys.exit(-1)\n if FLAGS.training_iteration <= 0:\n print('Please specify a positive value for training iteration.')\n sys.exit(-1)\n if FLAGS.model_version <= 0:\n print('Please specify a positive value for version number.')\n sys.exit(-1)\n if FLAGS.model_version <= 0:\n print('Please specify a positive value for version number.')\n sys.exit(-1)\n '''\n\n #FLAGS.model_version = 4\n #FLAGS.model_dir = \"model\"\n #FLAGS.data_dir = \"model/data\"\n #FLAGS.summary_dir = \"model/summ\"\n MODEL_EXPORT_PATH = FLAGS.model_dir\n MODEL_SUMMARY_DIR = FLAGS.summary_dir\n\n # Train model\n print('Training model...')\n mnist = mnist_input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n sess = tf.InteractiveSession()\n serialized_tf_example = tf.placeholder(tf.string, name='tf_example')\n feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name\n y_ = tf.placeholder('float', shape=[None, 10])\n w = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n variable_summaries(w)\n variable_summaries(b)\n sess.run(tf.global_variables_initializer())\n y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')\n cross_entropy = -tf.reduce_sum(y_ * tf.log(y))\n train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n values, indices = tf.nn.top_k(y, 10)\n table = tf.contrib.lookup.index_to_string_table_from_tensor(\n tf.constant([str(i) for i in xrange(10)]))\n prediction_classes = table.lookup(tf.to_int64(indices))\n\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(MODEL_SUMMARY_DIR + '/log' + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(MODEL_SUMMARY_DIR + '/log' + '/test')\n\n for _ in range(FLAGS.training_iteration):\n if _%200 == 0:\n print(\"training model with batch \",_)\n batch = mnist.train.next_batch(100)\n train_step.run(feed_dict={x: batch[0], y_: batch[1]})\n print(\"training done, performing other functions.\")\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n summaries, _, train_accuracy = sess.run([merged, train_step, accuracy],\n feed_dict={\n x: mnist.test.images,\n y_: mnist.test.labels\n })\n train_writer.add_summary(summaries)\n print('training accuracy %g' % train_accuracy)\n print('Done training!')\n\n # Export model\n # WARNING(break-tutorial-inline-code): The following code snippet is\n # in-lined in tutorials, please update tutorial documents accordingly\n # whenever code changes.\n # export_path_base = sys.argv[-1]\n export_path = os.path.join(\n tf.compat.as_bytes(MODEL_EXPORT_PATH),\n tf.compat.as_bytes(str(FLAGS.model_version)))\n print('Exporting trained model to', export_path)\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n\n # Build the signature_def_map.\n classification_inputs = tf.saved_model.utils.build_tensor_info(\n serialized_tf_example)\n classification_outputs_classes = tf.saved_model.utils.build_tensor_info(\n prediction_classes)\n classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)\n\n classification_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\n tf.saved_model.signature_constants.CLASSIFY_INPUTS:\n classification_inputs\n },\n outputs={\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:\n classification_outputs_classes,\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:\n classification_outputs_scores\n },\n method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))\n\n tensor_info_x = tf.saved_model.utils.build_tensor_info(x)\n tensor_info_y = tf.saved_model.utils.build_tensor_info(y)\n\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'images': tensor_info_x},\n outputs={'scores': tensor_info_y},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))\n\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n 'predict_images':\n prediction_signature,\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n classification_signature,\n },\n legacy_init_op=legacy_init_op)\n\n builder.save()\n\n print('Done exporting!')\n\n\ndef load(session, tag_constants, export_dir):\n return tf.saved_model.loader.load(session, tag_constants, export_dir)\n\nif __name__ == '__main__':\n main(sys.argv)\n\nif __name__ == '__mainold__':\n\n with tf.Session(graph=tf.Graph()) as sess:\n '''\n serialized_tf_example = tf.placeholder(tf.string, name='tf_example')\n feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32), }\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name\n y_ = tf.placeholder('float', shape=[None, 10])\n w = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n\n load(sess, [tf.saved_model.tag_constants.SERVING], \"model/1\")\n\n test_data_set = mnist_input_data.read_data_sets(\"model/data\").test\n\n print (len(test_data_set.images))\n print(len(test_data_set.labels))\n\n predictor = tf.argmax(y, 1)\n\n acc, predictions = sess.run([accuracy, predictor], feed_dict={\n x: test_data_set.images\n })\n\n print('Accuracy', acc)\n print('Predictions', predictions)\n '''\n from tensorflow.core.framework import types_pb2, tensor_shape_pb2\n test_data_set = mnist_input_data.read_data_sets(\"model/data\").test\n inputs = {'x': tf.TensorInfo(\n name='x:0',\n dtype=types_pb2.DT_FLOAT,\n tensor_shape=tensor_shape_pb2.TensorShapeProto())}\n outputs = {'y': tf.TensorInfo(\n name='y:0',\n dtype=types_pb2.DT_FLOAT)}\n signature_def = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=inputs,\n outputs=outputs,\n method_name='tensorflow/serving/predict')\n\n\n saved_model_predictor = predictor.from_saved_model(export_dir=\"model/4\", signature_def=signature_def)\n\n #print (\"test data : \", test_data_set.images[0:5])\n output_dict = saved_model_predictor({'x': test_data_set.images[0:20]})\n\n from matplotlib import pyplot as plt\n import numpy as np\n\n def input_image(arr):\n two_d = (np.reshape(arr, (28, 28)) * 255).astype(np.uint8)\n plt.imshow(two_d, interpolation='nearest')\n return plt\n\n def output_image(arr):\n plt.imshow(arr, interpolation='nearest')\n return plt\n\n out_arr = output_dict[\"y\"]\n for i in range(10):\n input_image(test_data_set.images[i]).show()\n arr = [out_arr[i]]\n output_image(arr).show()\n\n print(output_dict)\n output_tensor_name = saved_model_predictor\n\n print(len(output_dict[\"y\"]))\n" ]
[ [ "matplotlib.pyplot.imshow", "tensorflow.FixedLenFeature", "tensorflow.zeros", "tensorflow.cast", "tensorflow.app.flags.DEFINE_string", "tensorflow.summary.scalar", "tensorflow.to_int64", "tensorflow.Graph", "numpy.reshape", "tensorflow.app.flags.DEFINE_integer", "tensorflow.nn.top_k", "tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto", "tensorflow.name_scope", "tensorflow.square", "tensorflow.argmax", "tensorflow.contrib.predictor.from_saved_model", "tensorflow.matmul", "tensorflow.InteractiveSession", "tensorflow.parse_example", "tensorflow.saved_model.builder.SavedModelBuilder", "tensorflow.identity", "tensorflow.placeholder", "tensorflow.compat.as_bytes", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge_all", "tensorflow.TensorInfo", "tensorflow.saved_model.utils.build_tensor_info", "tensorflow.summary.histogram", "tensorflow.reduce_max", "tensorflow.summary.FileWriter", "tensorflow.saved_model.signature_def_utils.build_signature_def", "tensorflow.reduce_mean", "tensorflow.reduce_min", "tensorflow.log", "tensorflow.tables_initializer", "tensorflow.saved_model.loader.load" ] ]
KalcMatej99/DQN-Trading
[ "cc28827fded23e088fd524d7989f12c38dbbe06f" ]
[ "Main.py" ]
[ "# Importing DataLoaders for each model. These models include rule-based, vanilla DQN and encoder-decoder DQN.\nfrom tokenize import String\nfrom xmlrpc.client import Boolean\nfrom DataLoader.DataLoader import YahooFinanceDataLoader\nfrom DataLoader.DataForPatternBasedAgent import DataForPatternBasedAgent\nfrom DataLoader.DataAutoPatternExtractionAgent import DataAutoPatternExtractionAgent\nfrom DataLoader.DataSequential import DataSequential\n\nfrom DeepRLAgent.MLPEncoder.Train import Train as SimpleMLP\nfrom DeepRLAgent.SimpleCNNEncoder.Train import Train as SimpleCNN\nfrom EncoderDecoderAgent.GRU.Train import Train as GRU\nfrom EncoderDecoderAgent.CNN.Train import Train as CNN\nfrom EncoderDecoderAgent.CNN2D.Train import Train as CNN2d\nfrom EncoderDecoderAgent.CNNAttn.Train import Train as CNN_ATTN\nfrom EncoderDecoderAgent.CNN_GRU.Train import Train as CNN_GRU\n\n# Imports for Deep RL Agent\nfrom DeepRLAgent.VanillaInput.Train import Train as DeepRL\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport torch\nimport argparse\nfrom tqdm import tqdm\nimport os\nfrom utils import save_pkl, load_pkl\n\nparser = argparse.ArgumentParser(description='DQN-Trader arguments')\nparser.add_argument('--dataset-name', default=\"BTC_USDT_1m\",\n help='Name of the data inside the Data folder')\nparser.add_argument('--nep', type=int, default=50,\n help='Number of episodes')\nparser.add_argument('--window_size', type=int, default=3,\n help='Window size for sequential models')\nparser.add_argument('--cuda', action=\"store_true\",\n help='run on CUDA (default: False)')\nparser.add_argument('--load_dataset_from_file', type=Boolean, default=False,\n help='run from csv or prepared data set')\nparser.add_argument('--use_patterns', type=Boolean, default=False,\n help='run also pattern models')\nparser.add_argument('--begin_date', type=str, default=\"2020-01-01 00:00:00.000\",\n help='start date of data frame')\nparser.add_argument('--split_point', type=str, default='2021-06-12 00:00:00.000',\n help='split point in data frame')\nargs = parser.parse_args()\n\nDATA_LOADERS = {\n f'{args.dataset_name}': YahooFinanceDataLoader(f'{args.dataset_name}',\n split_point=args.split_point,\n load_from_file=args.load_dataset_from_file,\n load_patterns = args.use_patterns,\n begin_date=args.begin_date)\n}\n\n\nclass SensitivityRun:\n def __init__(self,\n dataset_name,\n gamma,\n batch_size,\n replay_memory_size,\n feature_size,\n target_update,\n n_episodes,\n n_step,\n window_size,\n device,\n evaluation_parameter='gamma',\n transaction_cost=0.02):\n \"\"\"\n\n @param data_loader:\n @param dataset_name:\n @param gamma:\n @param batch_size:\n @param replay_memory_size:\n @param feature_size:\n @param target_update:\n @param n_episodes:\n @param n_step:\n @param window_size:\n @param device:\n @param evaluation_parameter: shows which parameter are we evaluating and can be: 'gamma', 'batch size', 'n_step'\n or 'replay memory size'\n @param transaction_cost:\n \"\"\"\n self.data_loader = DATA_LOADERS[dataset_name]\n self.dataset_name = dataset_name\n self.gamma = gamma\n self.batch_size = batch_size\n self.replay_memory_size = replay_memory_size\n self.feature_size = feature_size\n self.target_update = target_update\n self.n_episodes = n_episodes\n self.n_step = n_step\n self.transaction_cost = transaction_cost\n self.window_size = window_size\n self.device = device\n self.evaluation_parameter = evaluation_parameter\n # The state mode is only for autoPatternExtractionAgent. Therefore, for pattern inputs, the state mode would be\n # set to None, because it can be recovered from the name of the data loader (e.g. dataTrain_patternBased).\n\n self.STATE_MODE_WINDOWED = 5 # window with k candles inside + the trend of those candles\n\n self.dataTrain_autoPatternExtractionAgent = None\n self.dataTest_autoPatternExtractionAgent = None\n self.dataTrain_patternBased = None\n self.dataTest_patternBased = None\n self.dataTrain_autoPatternExtractionAgent_candle_rep = None\n self.dataTest_autoPatternExtractionAgent_candle_rep = None\n self.dataTrain_autoPatternExtractionAgent_windowed = None\n self.dataTest_autoPatternExtractionAgent_windowed = None\n self.dataTrain_sequential = None\n self.dataTest_sequential = None\n self.dqn_windowed = None\n self.mlp_windowed = None\n self.experiment_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'Results/' + self.evaluation_parameter + '/')\n if not os.path.exists(self.experiment_path):\n os.makedirs(self.experiment_path)\n\n self.reset()\n self.test_portfolios = {'MLP-windowed': {}}\n\n\n def reset(self):\n self.load_data()\n self.load_agents()\n\n def load_data(self):\n self.dataTrain_autoPatternExtractionAgent_windowed = \\\n DataAutoPatternExtractionAgent(self.data_loader.data_train,\n self.STATE_MODE_WINDOWED,\n 'action_auto_extraction_windowed',\n self.device,\n self.gamma, self.n_step,\n self.batch_size,\n self.window_size,\n self.transaction_cost)\n self.dataTest_autoPatternExtractionAgent_windowed = \\\n DataAutoPatternExtractionAgent(self.data_loader.data_test,\n self.STATE_MODE_WINDOWED,\n 'action_auto_extraction_windowed',\n self.device,\n self.gamma, self.n_step,\n self.batch_size,\n self.window_size,\n self.transaction_cost)\n\n def load_agents(self):\n self.mlp_windowed = SimpleMLP(self.data_loader,\n self.dataTrain_autoPatternExtractionAgent_windowed,\n self.dataTest_autoPatternExtractionAgent_windowed,\n self.dataset_name,\n self.STATE_MODE_WINDOWED,\n self.window_size,\n self.transaction_cost,\n self.feature_size,\n BATCH_SIZE=self.batch_size,\n GAMMA=self.gamma,\n ReplayMemorySize=self.replay_memory_size,\n TARGET_UPDATE=self.target_update,\n n_step=self.n_step)\n\n def train(self):\n self.mlp_windowed.train(self.n_episodes) \n\n def evaluate_sensitivity(self):\n key = None\n if self.evaluation_parameter == 'gamma':\n key = self.gamma\n elif self.evaluation_parameter == 'batch size':\n key = self.batch_size\n elif self.evaluation_parameter == 'replay memory size':\n key = self.replay_memory_size\n elif self.evaluation_parameter == 'n_step':\n key = self.n_step\n elif self.evaluation_parameter == 'window_size':\n key = self.window_size\n\n self.test_portfolios['MLP-windowed'][key] = self.mlp_windowed.test().get_daily_portfolio_value()\n\n self.mlp_windowed.test().evaluate()\n\n def plot_and_save_sensitivity(self):\n plot_path = os.path.join(self.experiment_path, 'plots')\n if not os.path.exists(plot_path):\n os.makedirs(plot_path)\n\n sns.set(rc={'figure.figsize': (15, 7)})\n sns.set_palette(sns.color_palette(\"Paired\", 15))\n\n for model_name in self.test_portfolios.keys():\n first = True\n ax = None\n for gamma in self.test_portfolios[model_name]:\n profit_percentage = [\n (self.test_portfolios[model_name][gamma][i] - self.test_portfolios[model_name][gamma][0]) /\n self.test_portfolios[model_name][gamma][0] * 100\n for i in range(len(self.test_portfolios[model_name][gamma]))]\n\n difference = len(self.test_portfolios[model_name][gamma]) - len(self.data_loader.data_test_with_date)\n df = pd.DataFrame({'date': self.data_loader.data_test_with_date.index,\n 'portfolio': profit_percentage[difference:]})\n if not first:\n df.plot(ax=ax, x='date', y='portfolio', label=gamma)\n else:\n ax = df.plot(x='date', y='portfolio', label=gamma)\n first = False\n\n ax.set(xlabel='Time', ylabel='%Rate of Return')\n ax.set_title(f'Analyzing the sensitivity of {model_name} to {self.evaluation_parameter}')\n plt.legend()\n fig_file = os.path.join(plot_path, f'{model_name}.jpg')\n plt.savefig(fig_file, dpi=300)\n\n def save_portfolios(self):\n path = os.path.join(self.experiment_path, 'portfolios.pkl')\n save_pkl(path, self.test_portfolios)\n\n def save_experiment(self):\n self.plot_and_save_sensitivity()\n self.save_portfolios()\n\n\nif __name__ == '__main__':\n gamma_list = [0.9, 0.8, 0.7]\n batch_size_list = [16, 64, 256]\n replay_memory_size_list = [16, 64, 256]\n n_step_list = [1, 5, 10, 20, 60, 120, 5 * 60, 24 * 60]\n window_size = args.window_size\n dataset_name = args.dataset_name\n n_episodes = args.nep\n device = torch.device(\"cuda\" if args.cuda and torch.cuda.is_available() else \"cpu\")\n feature_size = 64\n target_update = 5\n\n gamma_default = 0.8\n batch_size_default = 16\n replay_memory_size_default = 32\n n_step_default = 8\n\n pbar = tqdm(len(n_step_list) + len(gamma_list) + len(batch_size_list) + len(replay_memory_size_list))\n\n\n run = SensitivityRun(\n dataset_name,\n gamma_default,\n batch_size_default,\n replay_memory_size_default,\n feature_size,\n target_update,\n n_episodes,\n n_step_default,\n window_size,\n device,\n evaluation_parameter='n_step',\n transaction_cost=0.001)\n \n for n_step in n_step_list:\n run.n_step = n_step\n run.reset()\n run.train()\n run.evaluate_sensitivity()\n pbar.update(1)\n\n run.save_experiment()\n\n run = SensitivityRun(\n dataset_name,\n gamma_default,\n batch_size_default,\n replay_memory_size_default,\n feature_size,\n target_update,\n n_episodes,\n n_step_default,\n window_size,\n device,\n evaluation_parameter='gamma',\n transaction_cost=0.001)\n\n for gamma in gamma_list:\n run.gamma = gamma\n run.reset()\n run.train()\n run.evaluate_sensitivity()\n pbar.update(1)\n\n run.save_experiment()\n\n # test batch-size\n run = SensitivityRun(\n dataset_name,\n gamma_default,\n batch_size_default,\n replay_memory_size_default,\n feature_size,\n target_update,\n n_episodes,\n n_step_default,\n window_size,\n device,\n evaluation_parameter='batch size',\n transaction_cost=0.001)\n\n for batch_size in batch_size_list:\n run.batch_size = batch_size\n run.reset()\n run.train()\n run.evaluate_sensitivity()\n pbar.update(1)\n\n run.save_experiment()\n\n # test replay memory size\n run = SensitivityRun(\n dataset_name,\n gamma_default,\n batch_size_default,\n replay_memory_size_default,\n feature_size,\n target_update,\n n_episodes,\n n_step_default,\n window_size,\n device,\n evaluation_parameter='replay memory size',\n transaction_cost=0.001)\n\n for replay_memory_size in replay_memory_size_list:\n run.replay_memory_size = replay_memory_size\n run.reset()\n run.train()\n run.evaluate_sensitivity()\n pbar.update(1)\n\n run.save_experiment()\n \n pbar.close()\n" ]
[ [ "matplotlib.pyplot.legend", "torch.cuda.is_available", "matplotlib.pyplot.savefig", "pandas.DataFrame" ] ]
dHannasch/beluga
[ "519e1ca2a43a86bc47737c45484288b2bacc1338" ]
[ "examples/Astrodynamics/Detumble/plotresults.py" ]
[ "from beluga.utils import load\nimport matplotlib.pyplot as plt\n\ndata = load('data.blg')\nsol_set = data['solutions']\n\nsol = sol_set[-1][-1]\n\nplt.figure()\nplt.plot(sol.t, sol.y[:,0], color='b', label='$\\omega_1$')\nplt.plot(sol.t, sol.y[:,1], color='r', label='$\\omega_2$')\nplt.plot(sol.t, sol.y[:,2], color='g', label='$\\omega_3$')\nplt.title('Rotation Rates')\nplt.ylabel('Angular Rates [rad/s]')\nplt.xlabel('Time [s]')\nplt.legend()\nplt.grid(True)\n\nplt.figure()\nplt.plot([sol.t[0], sol.t[-1]], [1, 1], color='k', linestyle='--', linewidth=3)\nplt.plot([sol.t[0], sol.t[-1]], [-1, -1], color='k', linestyle='--', linewidth=3)\nplt.plot(sol.t, sol.u[:,0], color='b', label='$u_1$')\nplt.plot(sol.t, sol.u[:,1], color='r', label='$u_2$')\nplt.plot(sol.t, sol.u[:,2], color='g', label='$u_3$')\nplt.title('Control History')\nplt.ylabel('Control [rad/s^2]')\nplt.xlabel('Time [s]')\nplt.legend()\nplt.grid(True)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
sethdp/sethdp.github.io
[ "dc4e871f669db37df8bad890fc817f2764a96d55" ]
[ "assets/blog/dmd/dmd.py" ]
[ "# %%\nimport math\nimport matplotlib as mpl\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nimport numpy as np\nimport pydmd\n\nmpl.rcParams.update({\n 'figure.dpi': 240,\n 'image.cmap': 'PiYG'\n})\n\ndef make_unit_circle_plot(ax):\n ax.add_artist(plt.Circle((0, 0), 1, fill=False))\n ax.set_aspect('equal')\n ax.set_xticks([-1, 0, 1])\n ax.set_yticks([-1, 0, 1])\n ax.set_xlim(-1.25, 1.25)\n ax.set_ylim(-1.25, 1.25)\n ax.set_xlabel('Real')\n ax.set_ylabel('Imaginary')\n\ndef make_animation(f, axes, image_fn, frames, filename):\n images = image_fn(0)\n im = [ax.imshow(images[i], vmin=-1, vmax=1) for i, ax in enumerate(axes)]\n\n f.savefig(filename + '.png')\n\n def update(i):\n images = image_fn(i)\n for j, img in enumerate(images):\n im[j].set_array(img)\n return im\n\n anim = animation.FuncAnimation(f, update, frames, blit=True)\n anim.save(filename + '.mp4')\n\n# %%\n# One-Dimensional Input\n\n# We will create a synthetic example of a 1D array changing in time\n# This will create a 2D input matrix from 2 dynamic modes,\n# where r is the index in the array and c is the timestep\n\nsteps = 20\nangle = 2 * math.pi / steps\nmode1 = complex(math.cos(angle), math.sin(angle))\nmode2 = mode1.conjugate()\nmodes = np.vander([mode1, mode2], steps, increasing=True).T / 2\n\neig_radius = .9\neig1 = eig_radius * complex(math.cos(angle), math.sin(angle))\neig2 = eig1.conjugate()\neigs = np.array([eig1, eig2])\ndynamics = np.vander(eigs, steps, increasing=True)\n\nX = modes.dot(dynamics).real\n\n# Create figures for input\n\n# Input array changing in time, video\n\nsubplot_kw = {\n 'frame_on': False,\n 'xticks': [],\n 'yticks': []\n}\nf, ax = plt.subplots(subplot_kw=subplot_kw)\ndef get_input_image(i):\n return [X[:, i, np.newaxis].T]\nmake_animation(f, [ax], get_input_image, steps, 'input')\n\n# Input matrix, image\n\nsubplot_kw = {\n 'xticks': range(0, steps, 5),\n 'yticks': range(0, steps, 5),\n 'xlabel': 'Frame',\n 'ylabel': 'Index'\n}\nf, ax = plt.subplots(figsize=(6, 5), subplot_kw=subplot_kw)\nim = ax.imshow(X)\nf.colorbar(im, shrink=.8)\nf.suptitle('Input')\nf.savefig('input_matrix.png')\n\n#%%\n# One-Dimensional Reconstruction\n\ndef plot_recon(ax, mode, dynamic, vmax, text):\n divider = make_axes_locatable(ax)\n\n aMode = divider.append_axes('left', '5%')\n aMode.set_ylim(-.5, 19.5)\n aMode.invert_yaxis()\n aMode.set_xticks([])\n aMode.set_yticks([])\n aMode.imshow(mode.reshape(20, 1), vmax=vmax, vmin=-vmax)\n aMode.set_ylabel('Mode ' + text)\n\n aDyn = divider.append_axes('bottom', '20%')\n aDyn.set_xlim(-.5, 19.5)\n aDyn.set_xticks([])\n aDyn.set_yticks([])\n aDyn.plot(dynamic)\n aDyn.set_xlabel('Dynamic ' + text)\n\n Xr = mode.reshape(20, 1).dot(dynamic.reshape(1, 20)).real\n ax.imshow(Xr, vmax=vmax, vmin=-vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n\nf, ax = plt.subplots(2, 2, figsize=(6, 6))\nplot_recon(ax[0, 0], modes[:, 0].real, dynamics[0, :].real, vmax, '1, Real')\n# TODO wrong because colors modes\nplot_recon(ax[1, 0], -modes[:, 0].imag, dynamics[0].imag, vmax, '1, Imag')\nplot_recon(ax[0, 1], modes[:, 1].real, dynamics[1].real, vmax, '2, Real')\nplot_recon(ax[1, 1], -modes[:, 1].imag, dynamics[1].imag, vmax, '2, Imag')\nf.colorbar(im, ax=ax, shrink=.8)\nf.suptitle('Real Components of Reconstruction')\nf.savefig('recon_comp.png')\n\nrecon1 = modes[:, 0].real.reshape(20, 1).dot(dynamics[0, :].real.reshape(1, 20))\nrecon2 = -modes[:, 0].imag.reshape(20, 1).dot(dynamics[0].imag.reshape(1, 20))\nrecon3 = modes[:, 0].real.reshape(20, 1).dot(dynamics[0, :].imag.reshape(1, 20))\nrecon4 = modes[:, 0].imag.reshape(20, 1).dot(dynamics[0].real.reshape(1, 20))\n\nf = plt.figure()\ngrid = ImageGrid(f, 111, (4, 1), share_all=True, axes_pad=0)\ngrid[0].set_xticks([])\ngrid[0].set_yticks([])\nfor ax in grid:\n ax.set_frame_on(False)\n\ndef get_recon_image(i):\n return [\n recon1[:, i, np.newaxis].T,\n recon2[:, i, np.newaxis].T,\n recon3[:, i, np.newaxis].T,\n recon4[:, i, np.newaxis].T\n ]\n\nmake_animation(f, [ax for ax in grid], get_recon_image, steps, 'recon')\n\nf, ax = plt.subplots()\nmake_unit_circle_plot(ax)\nax.plot(eigs.real, eigs.imag, '.')\n\nf.savefig('recon_eigs.png')\n\n# %%\n# Background Subtraction\n\n# Create a simple gradient background\n\nframes = 30\ndims = (90, 160)\nbg = np.zeros(dims)\n\nfor r in range(dims[0]):\n for c in range(dims[1]):\n bg[r, c] = r / dims[0] * c / dims[1]\n\nX = np.ndarray((bg.size, frames))\n\n# Create a square that moves from left to right\n\nradius = 8\nrow = 40\ncol_start = 20\ncol_step = 4\nfor i in range(frames):\n frame = bg.copy()\n col = col_start + col_step * i\n for r in range(row - radius, row + radius):\n for c in range(col - radius, col + radius):\n frame[r, c] = 1\n \n X[:, i] = frame.flatten()\n\nvmax = np.max(np.abs(X))\n\n# DMD\n\ndmd = pydmd.DMD()\ndmd.fit(X)\n\n# Low-rank / Background\n\n# Note: generally it's better to use a threshold; there may be multiple modes\n# constituting the background\nabs_log_eigs = np.abs(np.log(dmd.eigs))\nmin_idx = np.argmin(abs_log_eigs)\nmode = dmd.modes[:, min_idx, np.newaxis]\ndynamic = dmd.dynamics[np.newaxis, min_idx, :]\nL = mode.dot(dynamic).real\n\nprint(\"abs log eigs: {}\".format(abs_log_eigs))\nprint(\"min: {} = {}\".format(min_idx, abs_log_eigs[min_idx]))\n\n# Sparse / Foreground\n\nS = X - L\n\n# Background subtraction figures\n\n# Input matrix with foreground/background, video\n\nf = plt.figure(figsize=(8, 4.5))\ngrid = ImageGrid(f, 111, (2, 2), share_all=True, axes_pad=0)\nfor ax in grid:\n ax.set_frame_on(False)\ngrid[0].set_xticks([])\ngrid[0].set_yticks([])\nkwargs = {'fontsize': 18}\ngrid[0].text(10, 20, 'Input', **kwargs)\ngrid[2].text(10, 20, 'Foreground', **kwargs)\ngrid[3].text(10, 20, 'Background', **kwargs)\ndef get_back_sub_images(i):\n return [A[:, i].reshape(dims) for A in [X, S, L]]\nmake_animation(f, [grid[0], grid[2], grid[3]], get_back_sub_images, frames, 'back_sub')\n\n# Eigenvalues on unit circle and sorted by abs log value, image\n\nidxs = np.argsort(abs_log_eigs)\neigs_sorted = dmd.eigs[idxs]\nabs_log_eigs_sorted = abs_log_eigs[idxs]\n\nf, ax = plt.subplots(1, 2)\nmake_unit_circle_plot(ax[0])\nax[0].set_xlabel('Index')\nax[1].set_ylabel('$|\\log \\lambda|$')\nax[0].plot(eigs_sorted.real[np.newaxis], eigs_sorted.imag[np.newaxis], '.')\nax[1].plot(np.arange(abs_log_eigs.size)[np.newaxis], abs_log_eigs_sorted[np.newaxis], '.')\nf.savefig('back_sub_eigs.png')\n\n# %%\n# Eigenvalues v Time\n\n# Create three eigenvalues of different amplitudes and angular velocities\n\nradii = np.array([.95, 1.0, 1.01])\nangles_inv = np.array([20, 20, 40])\nsteps = 20\n\nangles = 2 * math.pi / angles_inv\neigs = radii * (np.cos(angles) + 1j * np.sin(angles))\ndynamics = np.vander(eigs, steps, increasing=True)\n\n# Plot dynamics\n\nf = plt.figure(figsize=(6, 3))\ngs = f.add_gridspec(2, 2, width_ratios=(2, 3), hspace=0)\nf.suptitle('DMD Eigenvalues v Time')\n\nlabels = [r'${}, 2\\pi/{}$'.format(radii[i], angles_inv[i]) for i in range(len(radii))]\n\nax_cir = f.add_subplot(gs[:, 0])\nmake_unit_circle_plot(ax_cir)\nfor i in range(len(dynamics)):\n ax_cir.plot(dynamics[i].real, dynamics[i].imag, '-.', label=labels[i])\nf.legend(loc='lower right', ncol=3, title=\"$|\\lambda|, \\\\varphi$\", fontsize='x-small')\n\nax_real = f.add_subplot(gs[0, 1])\nax_imag = f.add_subplot(gs[1, 1])\nfor i in range(len(dynamics)):\n ax_real.plot(dynamics[i].real, '-.')\n ax_imag.plot(dynamics[i].imag, '-.')\nax_real.set_xticks([])\nax_real.set_ylabel('Real')\nax_imag.set_xlabel('Time (steps)')\nax_imag.set_ylabel('Imaginary')\n\nf.tight_layout()\nf.savefig('dynamics.png')\n\n# %%\n# DMD v FFT\nN = 64\nwave = [.95 ** i * math.sin(2 * np.pi * i / 32) for i in range(N)]\n\nwave_fft = np.fft.fft(wave)\nwave_fft_expanded = np.zeros((2 * N,), np.complex128)\nwave_fft_expanded[::2] = 2 * wave_fft\nwave_fft_shifted = np.fft.fftshift(wave_fft)\nrecon_fft = np.fft.ifft(wave_fft_expanded).real\n\ndmd = pydmd.HODMD(0, 0, True, True, 16)\ndmd.fit(wave)\ndynamics = np.vander(dmd.eigs, 2 * N, True)\nrecon_dmd = dmd.modes.dot((dynamics.T * dmd._b).T)[0].real\n\ndmd_eig_phase = 2 * np.pi / 32\ndmd_eig = .95 * (np.cos(dmd_eig_phase) + 1j * np.sin(dmd_eig_phase))\ndmd_eigs_orig = np.asarray([dmd_eig, dmd_eig.conjugate()])\n\n\nf, ax = plt.subplots(2, sharex=True, sharey=True, gridspec_kw={'hspace': 0})\n\nax[0].plot(wave, 'y')\nax[0].set_xlabel('$t$')\nax[0].set_ylabel('$f(t)$')\nax[0].legend(['Original Input'])\n\nax[1].plot(recon_fft, 'c')\nax[1].plot(recon_dmd, 'm')\nax[1].set_xlabel('$t$')\nax[1].set_ylabel('f(t)')\nax[1].legend(['FFT', 'DMD'])\nf.suptitle('FFT v DMD\\nReconstruction & Extrapolation')\nf.savefig('fft_recon.png')\n\nf, ax = plt.subplots(1, 2)\n\nax[0].plot(wave_fft_shifted.real, 'cx', wave_fft_shifted.imag, 'm+')\nax[0].set_title('FFT Frequencies')\nax[0].set_xlabel('$k$')\nax[0].set_ylabel('$A_k$')\nax[0].legend(['Real', 'Imag'])\n\nax[1].plot(dmd.eigs.real, dmd.eigs.imag, 'c.')\nax[1].set_title('DMD Eigenvalues')\nmake_unit_circle_plot(ax[1])\n\nf.savefig('fft_comp.png')\n\n# %%\n" ]
[ [ "numpy.ndarray", "numpy.fft.fftshift", "numpy.argmin", "numpy.vander", "numpy.arange", "numpy.sin", "matplotlib.pyplot.Circle", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.log", "numpy.fft.ifft", "matplotlib.rcParams.update", "matplotlib.animation.FuncAnimation", "numpy.argsort", "numpy.array", "numpy.abs", "numpy.fft.fft", "matplotlib.pyplot.subplots", "numpy.cos" ] ]
kikacaty/c_study
[ "b0556603f638f616a75cc8460c424eb002904549" ]
[ "eval_ensemble_arch.py" ]
[ "'''\nDescription: \nAutor: Jiachen Sun\nDate: 2021-08-05 13:18:39\nLastEditors: Jiachen Sun\nLastEditTime: 2021-08-05 14:10:13\n'''\nfrom __future__ import print_function\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, ExponentialLR, StepLR, MultiStepLR, ReduceLROnPlateau\nfrom data import PCData_SSL, PCData, PCData_Jigsaw\nfrom model_finetune import PointNet_Rotation, DGCNN_Rotation, PointNet_Jigsaw, PointNet, DGCNN, PointNet_Simple, Pct, DeepSym\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport sys\nsys.path.append(\"./emd/\")\nimport emd_module\nfrom util import cal_loss, IOStream, cross_entropy_with_probs,trades_loss\nimport sklearn.metrics as metrics\nimport attack\nimport time\nimport model_combine\n# EPS=0.05\n# ALPHA=0.01\n# TRAIN_ITER=7\n# TEST_ITER=7\n\ndef _init_():\n if not os.path.exists(args.pre_path +'finetune_checkpoints'):\n os.makedirs(args.pre_path +'finetune_checkpoints')\n if not os.path.exists(args.pre_path +'finetune_checkpoints/'+args.exp_name):\n os.makedirs(args.pre_path +'finetune_checkpoints/'+args.exp_name)\n if not os.path.exists(args.pre_path +'finetune_checkpoints/'+args.exp_name+'/'+'models'):\n os.makedirs(args.pre_path +'finetune_checkpoints/'+args.exp_name+'/'+'models')\n os.system('cp finetune_main.py '+args.pre_path+'finetune_checkpoints'+'/'+args.exp_name+'/'+'finetune_main.py.backup')\n os.system('cp model_finetune.py '+args.pre_path+'finetune_checkpoints' + '/' + args.exp_name + '/' + 'model_finetune.py.backup')\n os.system('cp util.py '+args.pre_path+'finetune_checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')\n os.system('cp data.py '+args.pre_path+'finetune_checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')\n os.system('cp attack.py '+args.pre_path+'finetune_checkpoints' + '/' + args.exp_name + '/' + 'attack.py.backup')\n np.random.seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n torch.manual_seed(args.seed)\n torch.backends.cudnn.deterministic=True\n torch.backends.cudnn.benchmark = False\n\n \ndef adversarial(args,io,model=None, dataloader=None):\n\n if dataloader == None:\n test_loader = DataLoader(PCData(name=args.dataset,partition='test', num_points=args.num_points), num_workers=8,\n batch_size=args.test_batch_size, shuffle=False, drop_last=False)\n else:\n test_loader = dataloader\n\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n if args.dataset == 'modelnet40':\n output_channel = 40\n elif args.dataset == 'modelnet10':\n output_channel = 10\n elif args.dataset == 'scanobjectnn':\n output_channel = 15\n elif args.dataset == 'shapenet':\n output_channel = 57\n #Try to load models\n if model is None:\n # if args.model == 'dgcnn':\n # model1 = DGCNN(args,output_channels=output_channel).to(device)\n # model2 = DGCNN(args,output_channels=output_channel).to(device)\n # model3 = DGCNN(args,output_channels=output_channel).to(device)\n # elif args.model == 'pointnet_simple':\n # model1 = PointNet_Simple(args,output_channels=output_channel).to(device)\n # model2 = PointNet_Simple(args,output_channels=output_channel).to(device)\n # model3 = PointNet_Simple(args,output_channels=output_channel).to(device)\n # elif args.model == 'pct':\n # model1 = Pct(args,output_channels=output_channel).to(device)\n # model2 = Pct(args,output_channels=output_channel).to(device)\n # model3 = Pct(args,output_channels=output_channel).to(device)\n\n # else:\n # raise Exception(\"Not implemented\")\n model1 = PointNet_Simple(args,output_channels=output_channel).to(device)\n model2 = DGCNN(args,output_channels=output_channel).to(device)\n model3 = Pct(args,output_channels=output_channel).to(device)\n\n\n model1 = nn.DataParallel(model1)\n model1.load_state_dict(torch.load(args.model_path1 + '/model_epoch' + str(args.epochs) + '.t7'))\n model2 = nn.DataParallel(model2)\n model2.load_state_dict(torch.load(args.model_path2 + '/model_epoch' + str(args.epochs) + '.t7'))\n model3 = nn.DataParallel(model3)\n model3.load_state_dict(torch.load(args.model_path3 + '/model_epoch' + str(args.epochs) + '.t7'))\n\n model1 = model1.eval()\n model2 = model2.eval()\n model3 = model3.eval()\n\n # if args.attack == 'apgd':\n # apgd = attack.APGDAttack(model1,n_iter=args.test_iter,eps=args.eps,seed=args.seed)\n # elif args.attack == 'apgd_margin':\n # apgd = attack.APGDAttack(model1,n_iter=args.test_iter,loss='ce_margin',eps=args.eps,seed=args.seed)\n\n test_true = []\n test_pred = []\n\n total = args.total\n counter = 0\n \n for data, label,_,_ in test_loader:\n data, label = data.to(device).float(), label.to(device).long().squeeze()\n data = data.permute(0, 2, 1)\n batch_size = data.size()[0]\n\n if args.attack == 'pgd':\n adv_data = attack.pgd_attack_ensemble(model1,model2,model3,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,repeat=1,mixup=False)\n # elif args.attack == 'pgd_margin':\n # adv_data = attack.pgd_attack_margin(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,repeat=1,mixup=False)\n # elif args.attack == 'nattack':\n # adv_data = attack.nattack(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,variance=0.1,samples=args.samples)\n # elif args.attack == 'spsa':\n # adv_data = attack.spsa(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,samples=args.samples)\n # elif args.attack == 'nes':\n # adv_data = attack.nes(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,variance=0.001,samples=args.samples)\n # elif args.attack == 'evolution':\n # adv_data = attack.evolution(model1,data,label,eps=args.eps,iters=args.test_iter,variance=0.005,samples=args.samples,k=args.samples // 4)\n # elif args.attack == 'apgd' or args.attack == 'apgd_margin':\n # _,adv_data = apgd.perturb(data,label)\n # elif args.attack == 'mim':\n # adv_data = attack.mim(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,repeat=1,mixup=False)\n # elif args.attack == 'mim_margin':\n # adv_data = attack.mim_margin(model1,data,label,eps=args.eps,alpha=args.alpha,iters=args.test_iter,repeat=1,mixup=False)\n \n logits1,_,_ = model1(adv_data)\n logits2,_,_ = model2(adv_data)\n logits3,_,_ = model3(adv_data)\n\n # logits1 = logits1.max(dim = 1)[1]\n # logits2 = logits2.max(dim = 1)[1]\n # logits3 = logits3.max(dim = 1)[1]\n\n # tmax1 = logits1.max(-1, keepdim=True)[0]\n # tmax2 = logits2.max(-1, keepdim=True)[0]\n # tmax3 = logits3.max(-1, keepdim=True)[0]\n\n # output1 = torch.where(logits1 == tmax1, 1, 0)\n # output2 = torch.where(logits2 == tmax2, 1, 0)\n # output3 = torch.where(logits3 == tmax3, 1, 0)\n\n # logits = output1 + output2 + output3\n\n logits = torch.stack([logits1,logits2,logits3])\n logits = torch.max(logits,dim=0)[0]\n\n val,preds = logits.max(dim=1)\n # if val == 1:\n # logits = logits1 + logits2 + logits3\n # val,preds = logits.max(dim=1) \n\n counter += batch_size\n test_true.append(label.cpu().numpy())\n test_pred.append(preds.detach().cpu().numpy())\n if counter >= total:\n break\n test_true = np.concatenate(test_true)\n test_pred = np.concatenate(test_pred)\n test_acc = metrics.accuracy_score(test_true, test_pred)\n\n # avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred1)\n outstr = ' Adversarial :: ADV_test acc: %.6f ' %(test_acc)\n io.cprint(args.attack + outstr)\n return \n\nif __name__ == \"__main__\":\n # Training settings\n parser = argparse.ArgumentParser(description='Point Cloud Recognition')\n parser.add_argument('--exp_name', type=str, default='exp', metavar='N',\n help='Name of the experiment')\n parser.add_argument('--model', type=str, default='dgcnn', metavar='N',\n choices=['pointnet', 'dgcnn', 'pointnet_simple', 'pct', 'deepsym'],\n help='Model to use, [pointnet, dgcnn pointnet_simple]')\n parser.add_argument('--pre_path', type=str, default='./', metavar='N',\n help='Name of the pre path')\n parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N')\n parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',\n help='Size of batch)')\n parser.add_argument('--test_batch_size', type=int, default=32, metavar='batch_size',\n help='Size of batch)')\n parser.add_argument('--epochs', type=int, default=250, metavar='N',\n help='which epoch to evaluate')\n parser.add_argument('--no_cuda', type=bool, default=False,\n help='enables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 0)')\n parser.add_argument('--num_points', type=int, default=1024,\n help='num of points to use')\n parser.add_argument('--dropout', type=float, default=0.5,\n help='dropout rate')\n parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',\n help='Dimension of embeddings')\n parser.add_argument('--k', type=int, default=20, metavar='N',\n help='Num of nearest neighbors to use')\n parser.add_argument('--p', type=str, default='', metavar='N',\n help='Pretrained model path')\n parser.add_argument('--eps',type=float,default=0.05,\n help=\"Maximum allowed L_inf Perturbation for training\")\n parser.add_argument('--alpha',type=float,default=0.005,\n help=\"Adversarial training perturbation step size\")\n parser.add_argument('--test_iter',type=int,default=200,\n help=\"Number of steps taken to create adversarial test inputs\")\n parser.add_argument('--total',type=int,default=1000,\n help=\"Number of samples to evaluate\")\n parser.add_argument('--adversarial',type=bool,default=False,\n help=\"Whether to use adversarial examples\")\n parser.add_argument('--gpu',type=str,default='0',\n help=\"Which gpu to use\")\n parser.add_argument('--model_path1', type=str, default='', metavar='N',\n help='Pretrained model path 1')\n parser.add_argument('--model_path2', type=str, default='', metavar='N',\n help='Pretrained model path 2')\n parser.add_argument('--model_path3', type=str, default='', metavar='N',\n help='Pretrained model path 3')\n parser.add_argument('--attack', type=str, default='pgd', metavar='N',\n help='Attack method')\n parser.add_argument('--samples', type=int, default=64, \n help='black box samples')\n\n args = parser.parse_args()\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n _init_()\n\n print(args.adversarial)\n io = IOStream(args.pre_path+'finetune_checkpoints/' + args.exp_name + '/run_' + str(args.epochs) + '_' + args.attack + '_' + str(args.test_iter) + '.log')\n io.cprint(str(args))\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n if args.cuda:\n io.cprint(\n 'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')\n torch.cuda.manual_seed(args.seed)\n else:\n io.cprint('Using CPU')\n\n adversarial(args=args,io=io)" ]
[ [ "torch.max", "numpy.random.seed", "torch.cuda.manual_seed", "torch.cuda.current_device", "torch.manual_seed", "torch.cuda.device_count", "torch.stack", "numpy.concatenate", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.nn.DataParallel", "sklearn.metrics.accuracy_score" ] ]
aaronsnoswell/UniModal-IRL
[ "02ea22b47d50ff1234c25b2293db01162112d534" ]
[ "unimodal_irl/sw_maxent_irl.py" ]
[ "\"\"\"Implements Exact Maximum Entropy IRL from my thesis\"\"\"\n\nimport numpy as np\nfrom numba import jit\nfrom numba import types\nfrom numba.typed import Dict, List\n\nfrom scipy.optimize import minimize\n\n\nfrom mdp_extras import (\n Linear,\n Disjoint,\n trajectory_reward,\n DiscreteExplicitExtras,\n DiscreteImplicitExtras,\n)\n\n\n# Placeholder for 'negative infinity' which doesn't cause NaN in log-space operations\n_NINF = np.finfo(np.float64).min\n\n\n@jit(nopython=True)\ndef nb_forward_pass_log(p0s, L, t_mat, gamma=1.0, rs=None, rsa=None, rsas=None):\n \"\"\"Compute *forward* message passing variable in log-space\n\n This is denoted 'alpha' in the paper/thesis\n\n Args:\n p0s (numpy array): Starting state probabilities\n L (int): Maximum path length\n t_mat (numpy array): |S|x|A|x|S| transition matrix\n\n gamma (float): Discount factor\n rs (numpy array): |S| array of linear state reward weights\n rsa (numpy array): |S|x|A| array of linear state-action reward weights\n rsas (numpy array): |S|x|A|x|S| array of linear state-action-state reward weights\n\n Returns:\n (numpy array): |S|xL array of forward message values in log space\n \"\"\"\n\n if rs is None:\n rs = np.zeros(t_mat.shape[0])\n if rsa is None:\n rsa = np.zeros(t_mat.shape[0:2])\n if rsas is None:\n rsas = np.zeros(t_mat.shape[0:3])\n\n alpha = np.zeros((t_mat.shape[0], L))\n alpha[:, 0] = np.log(p0s) + rs\n for t in range(L - 1):\n for s2 in range(t_mat.shape[2]):\n # Find maximum value among all parents of s2\n m_t = _NINF\n for s1 in range(t_mat.shape[0]):\n for a in range(t_mat.shape[1]):\n if t_mat[s1, a, s2] == 0:\n continue\n m_t = max(\n m_t,\n (\n alpha[s1, t]\n + np.log(t_mat[s1, a, s2])\n + gamma ** ((t + 1) - 1) * (rsa[s1, a] + rsas[s1, a, s2])\n ),\n )\n m_t += (gamma ** (t + 1)) * rs[s2]\n\n # Compute next column of alpha in log-space\n for s1 in range(t_mat.shape[0]):\n for a in range(t_mat.shape[1]):\n if t_mat[s1, a, s2] == 0:\n continue\n alpha[s2, t + 1] += t_mat[s1, a, s2] * np.exp(\n alpha[s1, t]\n + gamma ** ((t + 1) - 1) * (rsa[s1, a] + rsas[s1, a, s2])\n + (gamma ** (t + 1)) * rs[s2]\n - m_t\n )\n alpha[s2, t + 1] = m_t + np.log(alpha[s2, t + 1])\n\n return alpha\n\n\n@jit(nopython=True)\ndef nb_forward_pass_log_deterministic_stateonly(\n p0s, L, parents, rs, gamma=1.0, padded=False\n):\n \"\"\"Compute *forward* message passing variable in log-space\n\n This is denoted 'alpha' in the paper/thesis\n\n This version of the forward pass function makes extra assumptions so we can handle\n some much larger problems\n - Dynamics are deterministic\n - Rewards are state-only\n\n Args:\n p0s (numpy array): Starting state probabilities\n L (int): Maximum path length\n parents (numpy array): Fixed-size parents array. Rows indices correspond to\n states, and the first X elements of each row contain the parent state IDs\n for that state. Any remaining elements of that row are then -1.\n rs (numpy array): |S| array of linear state reward weights\n\n gamma (float): Discount factor\n padded (bool): Is this MDP padded? In which case, we need to handle the parents\n array with extra caution (it won't have the auxiliary state/action included)\n\n Returns:\n (numpy array): |S|xL array of forward message values in log space\n \"\"\"\n num_states = len(p0s)\n\n alpha = np.zeros((num_states, L))\n alpha[:, 0] = np.log(p0s) + rs\n\n # Handle padded MDP - the auxiliary state has every state (including itself)\n # as a parent, but this isn't reflected in the parents fixed size array\n num_aux_states = 1 if padded else 0\n\n for t in range(L - 1):\n for s2 in range(num_states - num_aux_states):\n # Find maximum value among all parents of s2\n m_t = _NINF\n for s1 in parents[s2, :]:\n if s1 < 0:\n # s2 has no more parents\n break\n m_t = max(m_t, alpha[s1, t])\n m_t += (gamma ** (t + 1)) * rs[s2]\n\n # Compute next column of alpha in log-space\n for s1 in parents[s2, :]:\n if s1 < 0:\n # s2 has no more parents\n break\n alpha[s2, t + 1] += 1.0 * np.exp(\n alpha[s1, t] + (gamma ** (t + 1)) * rs[s2] - m_t\n )\n alpha[s2, t + 1] = m_t + np.log(alpha[s2, t + 1])\n\n if padded:\n # Handle auxiliary state as a special case - every state (including itself\n # is a parent state)\n s_aux = num_states - 1\n s_aux_parents = list(range(num_states))\n\n # Find maximum value among all parents of s2\n m_t = _NINF\n for s1 in s_aux_parents:\n m_t = max(m_t, alpha[s1, t])\n m_t += (gamma ** (t + 1)) * rs[s_aux]\n\n # Compute next column of alpha in log-space\n for s1 in s_aux_parents:\n alpha[s_aux, t + 1] += 1.0 * np.exp(\n alpha[s1, t] + (gamma ** (t + 1)) * rs[s_aux] - m_t\n )\n alpha[s_aux, t + 1] = m_t + np.log(alpha[s_aux, t + 1])\n\n return alpha\n\n\n@jit(nopython=True)\ndef nb_backward_pass_log(L, t_mat, gamma=1.0, rs=None, rsa=None, rsas=None):\n \"\"\"Compute *backward* message passing variable in log space\n\n This is denoted 'beta' in the paper/thesis\n\n Args:\n L (int): Maximum path length\n t_mat (numpy array): |S|x|A|x|S| transition matrix\n children (dict): Dictionary mapping states to (a, s') child tuples\n\n gamma (float): Discount factor\n rs (numpy array): Linear state reward weights\n rsa (numpy array): Linear state-action reward weights\n rsas (numpy array): Linear state-action-state reward weights\n\n Returns:\n (numpy array): |S| x L array of backward message values in log space\n \"\"\"\n\n if rs is None:\n rs = np.zeros(t_mat.shape[0])\n if rsa is None:\n rsa = np.zeros(t_mat.shape[0:2])\n if rsas is None:\n rsas = np.zeros(t_mat.shape[0:3])\n\n beta = np.zeros((t_mat.shape[0], L))\n beta[:, 0] = gamma ** (L - 1) * rs\n for t in range(L - 1):\n for s1 in range(t_mat.shape[0]):\n # Find maximum value among children of s1\n m_t = _NINF\n for a in range(t_mat.shape[1]):\n for s2 in range(t_mat.shape[2]):\n if t_mat[s1, a, s2] == 0:\n continue\n m_t = max(\n m_t,\n (\n np.log(t_mat[s1, a, s2])\n + gamma ** (L - (t + 1) - 1)\n * (rsa[s1, a] + rsas[s1, a, s2])\n + beta[s2, t]\n ),\n )\n m_t += gamma ** (L - (t + 1) - 1) * rs[s1]\n\n # Compute next column of beta in log-space\n for a in range(t_mat.shape[1]):\n for s2 in range(t_mat.shape[2]):\n if t_mat[s1, a, s2] == 0:\n continue\n beta[s1, t + 1] += t_mat[s1, a, s2] * np.exp(\n gamma ** (L - (t + 1) - 1)\n * (rs[s1] + rsa[s1, a] + rsas[s1, a, s2])\n + beta[s2, t]\n - m_t\n )\n beta[s1, t + 1] = m_t + np.log(beta[s1, t + 1])\n\n return beta\n\n\n@jit(nopython=True)\ndef nb_backward_pass_log_deterministic_stateonly(L, children, rs, gamma=1.0):\n \"\"\"Compute *backward* message passing variable in log space\n\n This is denoted 'beta' in the paper/thesis\n\n This version of the backward pass function makes extra assumptions so we can handle\n some much larger problems\n - Dynamics are deterministic\n - Rewards are state-only\n\n Args:\n L (int): Maximum path length\n children (numpy array): Fixed-size children array. Rows indices correspond to\n states, and the first X elements of each row contain the child state IDs\n for that state. Any remaining elements of that row are then -1.\n rs (numpy array): Linear state\n\n gamma (float): Discount factor\n\n Returns:\n (numpy array): |S| x L array of backward message values in log space\n \"\"\"\n num_states = len(children)\n\n beta = np.zeros((num_states, L))\n beta[:, 0] = gamma ** (L - 1) * rs\n for t in range(L - 1):\n for s1 in range(num_states):\n\n # Find maximum value among children of s1\n m_t = _NINF\n for s2 in children[s1, :]:\n if s2 < 0:\n # s1 has no more children\n break\n m_t = max(m_t, beta[s2, t])\n m_t += gamma ** (L - (t + 1) - 1) * rs[s1]\n\n # Compute next column of beta in log-space\n for s2 in children[s1, :]:\n if s2 < 0:\n # s1 has no more children\n break\n beta[s1, t + 1] += np.exp(\n gamma ** (L - (t + 1) - 1) * rs[s1] + beta[s2, t] - m_t\n )\n beta[s1, t + 1] = m_t + np.log(beta[s1, t + 1])\n\n return beta\n\n\n@jit(nopython=True)\ndef nb_marginals_log(\n L, t_mat, alpha_log, beta_log, Z_theta_log, gamma=1.0, rsa=None, rsas=None\n):\n \"\"\"Compute marginal terms\n\n Args:\n L (int): Maximum path length\n t_mat (numpy array): |S|x|A|x|S| transition matrix\n alpha_log (numpy array): |S|xL array of *forward* message values in log space\n beta_log (numpy array): |S|xL array of *backward* message values in log space\n Z_theta_log (float): Partition value in log space\n\n gamma (float): Discount factor\n rsa (numpy array): |S|x|A| array of linear state-action reward weights\n rsas (numpy array): |S|x|A|x|S| array of linear state-action-state reward\n weights\n\n Returns:\n (numpy array): |S|xL array of state marginals in log space\n (numpy array): |S|x|A|x(L-1) array of state-action marginals in log space\n (numpy array): |S|x|A|x|S|x(L-1) array of state-action-state marginals in log space\n \"\"\"\n\n if rsa is None:\n rsa = np.zeros((t_mat.shape[0], t_mat.shape[1]))\n if rsas is None:\n rsas = np.zeros((t_mat.shape[0], t_mat.shape[1], t_mat.shape[2]))\n\n pts = np.zeros((t_mat.shape[0], L))\n ptsa = np.zeros((t_mat.shape[0], t_mat.shape[1], L - 1))\n ptsas = np.zeros((t_mat.shape[0], t_mat.shape[1], t_mat.shape[2], L - 1))\n\n for t in range(L - 1):\n\n for s1 in range(t_mat.shape[0]):\n\n # if np.isneginf(alpha_log[s1, t]):\n if np.exp(alpha_log[s1, t]) == 0:\n # Catch edge case where the forward message value is zero to prevent\n # floating point error\n pts[s1, t] = -np.inf\n ptsa[s1, :, t] = -np.inf\n ptsas[s1, :, :, t] = -np.inf\n else:\n # Compute max value\n m_t = _NINF\n for a in range(t_mat.shape[1]):\n for s2 in range(t_mat.shape[2]):\n if t_mat[s1, a, s2] != 0:\n m_t = max(\n m_t,\n (\n np.log(t_mat[s1, a, s2])\n + gamma ** ((t + 1) - 1)\n * (rsa[s1, a] + rsas[s1, a, s2])\n + beta_log[s2, L - (t + 1) - 1]\n ),\n )\n m_t += alpha_log[s1, t] - Z_theta_log\n\n # Compute state marginals in log space\n for a in range(t_mat.shape[1]):\n for s2 in range(t_mat.shape[2]):\n contrib = t_mat[s1, a, s2] * np.exp(\n alpha_log[s1, t]\n + gamma ** ((t + 1) - 1) * (rsa[s1, a] + rsas[s1, a, s2])\n + beta_log[s2, L - (t + 1) - 1]\n - Z_theta_log\n - m_t\n )\n pts[s1, t] += contrib\n ptsa[s1, a, t] += contrib\n if contrib == 0:\n ptsas[s1, a, s2, t] = -np.inf\n else:\n ptsas[s1, a, s2, t] = m_t + np.log(contrib)\n if ptsa[s1, a, t] == 0:\n ptsa[s1, a, t] = -np.inf\n else:\n ptsa[s1, a, t] = m_t + np.log(ptsa[s1, a, t])\n if pts[s1, t] == 0:\n pts[s1, t] = -np.inf\n else:\n pts[s1, t] = m_t + np.log(pts[s1, t])\n\n # Compute final column of pts\n pts[:, L - 1] = alpha_log[:, L - 1] - Z_theta_log\n\n return pts, ptsa, ptsas\n\n\n@jit(nopython=True)\ndef nb_marginals_log_deterministic_stateonly(\n L, children, alpha_log, beta_log, Z_theta_log\n):\n \"\"\"Compute marginal terms\n\n This version of the marginal function makes extra assumptions so we can handle\n some much larger problems\n - Dynamics are deterministic\n - Rewards are state-only\n\n Args:\n L (int): Maximum path length\n children (numpy array): Fixed-size children array. Rows indices correspond to\n states, and the first X elements of each row contain the child state IDs\n for that state. Any remaining elements of that row are then -1.\n alpha_log (numpy array): |S|xL array of *forward* message values in log space\n beta_log (numpy array): |S|xL array of *backward* message values in log space\n Z_theta_log (float): Partition value in log space\n\n Returns:\n (numpy array): |S|xL array of state marginals in log space\n \"\"\"\n\n num_states = len(children)\n\n pts = np.zeros((num_states, L))\n\n for t in range(L - 1):\n\n for s1 in range(num_states):\n\n # if np.isneginf(alpha_log[s1, t]):\n if np.exp(alpha_log[s1, t]) == 0:\n # Catch edge case where the forward message value is zero to prevent\n # floating point error\n pts[s1, t] = -np.inf\n else:\n # Compute max value\n m_t = _NINF\n for s2 in children[s1, :]:\n if s2 < 0:\n # s1 has no more children\n break\n m_t = max(m_t, beta_log[s2, L - (t + 1) - 1])\n m_t += alpha_log[s1, t] - Z_theta_log\n\n # Compute state marginals in log space\n for s2 in children[s1, :]:\n if s2 < 0:\n # s1 has no more children\n break\n contrib = np.exp(\n alpha_log[s1, t]\n + beta_log[s2, L - (t + 1) - 1]\n - Z_theta_log\n - m_t\n )\n pts[s1, t] += contrib\n\n if pts[s1, t] == 0:\n pts[s1, t] = -np.inf\n else:\n pts[s1, t] = m_t + np.log(pts[s1, t])\n\n # Compute final column of pts\n pts[:, L - 1] = alpha_log[:, L - 1] - Z_theta_log\n\n return pts\n\n\ndef log_partition(L, alpha_log, padded=True):\n \"\"\"Compute the log partition function\n\n Args:\n L (int): Maximum path length\n alpha_log (numpy array): |S|xL *forward* message variable in log space\n\n padded (bool): If true, the final row of the alpha matrix corresponds\n to a dummy state which is used for MDP padding\n\n Returns:\n (float): Partition function value\n \"\"\"\n\n # If the dummy state is included, don't include it in the partition\n if padded:\n alpha_log = alpha_log[0:-1, :]\n\n # Find maximum value\n m = np.max(alpha_log[:, 0:L])\n\n # Compute partition in log space\n return m + np.log(np.sum(np.exp(alpha_log[:, 0:L] - m)))\n\n\ndef maxent_log_likelihood(xtr, phi, reward, rollouts, weights=None):\n \"\"\"\n Find the average log likelihood of a set of paths under a MaxEnt model\n\n That is,\n\n \\hat{\\ell}(\\theta) = \\E_{\\Data}[ \\log p(\\tau \\mid \\theta)\n\n To get the total log-likelihood of the dataset (i.e. gets larger as you add more\n data), multiply the value returned by this function with len(rollouts).\n\n To get the total data likelihood, take the exponent of that value.\n\n Args:\n xtr (mdp_extras.DiscreteExplicitExtras): MDP extras\n phi (mdp_extras.FeatureFunction): Feature function to use with linear reward\n parameters.\n reward (mdp_extras.RewardFunction): Reward function\n rollouts (list): List of rollouts, each a list of (s, a) tuples\n\n weights (numpy array): Optional vector of path weights for weighted IRL problems\n\n Returns:\n (float): Average log-likelihood of the paths in rollouts under the given reward\n \"\"\"\n return np.average(\n maxent_path_logprobs(xtr, phi, reward, rollouts, xtr.is_padded),\n weights=weights,\n )\n\n\ndef maxent_path_logprobs(xtr, phi, reward, rollouts):\n \"\"\"Efficiently compute log probability of a set of paths\n\n Args:\n xtr (mdp_extras.DiscreteExplicitExtras): MDP extras\n phi (mdp_extras.FeatureFunction): Feature function to use with linear reward\n parameters.\n reward (mdp_extras.RewardFunction): Reward function\n rollouts (list): List of rollouts, each a list of (s, a) tuples\n\n Returns:\n (list): List of log-probabilities under a MaxEnt model of paths\n \"\"\"\n\n # Find max path length\n if len(rollouts) == 1:\n max_path_length = len(rollouts[0])\n else:\n max_path_length = max(*[len(r) for r in rollouts])\n\n if isinstance(xtr, DiscreteExplicitExtras):\n # Process tabular MDP\n\n # Catch float overflow as an error - reward magnitude is too large for\n # exponentiation with this max path length\n with np.errstate(over=\"raise\"):\n alpha_log = nb_forward_pass_log(\n xtr.p0s,\n max_path_length,\n xtr.t_mat,\n xtr.gamma,\n *reward.structured(xtr, phi),\n )\n\n elif isinstance(xtr, DiscreteImplicitExtras):\n # Handle Implicit dynamics MDP\n\n # Only supports state features - otherwise we run out of memory\n assert (\n phi.type == phi.Type.OBSERVATION\n ), \"For DiscreteImplicit MPDs only state-based rewards are supported\"\n\n # Only supports deterministic transitions\n assert (\n xtr.is_deterministic\n ), \"For DiscreteImplicit MPDs only deterministic dynamics are supported\"\n\n rs = np.array([reward(phi(s)) for s in xtr.states])\n\n # Catch float overflow as an error - reward magnitude is too large for\n # exponentiation with this max path length\n with np.errstate(over=\"raise\"):\n # Compute alpha_log\n alpha_log = nb_forward_pass_log_deterministic_stateonly(\n xtr.p0s,\n max_path_length,\n xtr.parents_fixedsize,\n rs,\n gamma=xtr.gamma,\n padded=xtr.is_padded,\n )\n else:\n raise ValueError(f\"Unknown MDP class {xtr}\")\n\n with np.errstate(over=\"raise\"):\n # Compute partition value\n Z_theta_log = log_partition(max_path_length, alpha_log, padded=xtr.is_padded)\n\n path_log_probs = (\n np.array(\n [\n xtr.path_log_probability(p) + trajectory_reward(xtr, phi, reward, p)\n for p in rollouts\n ]\n )\n - Z_theta_log\n )\n\n return path_log_probs\n\n\ndef maxent_ml_path(xtr, phi, reward, start, goal, max_path_length):\n \"\"\"Find the ML path from s1 to sg under a MaxEnt model\n\n If transitions can incur +ve rewards the returned paths may contain loops\n\n NB ajs 14/Jan/2020 The log likelihood of the path that we compute internally\n is fine for doing viterbi ML path inference, but it's not the actual path\n log likelihood - it's missing the partition function, and the gamma time offset\n is incorrect (depending on what start time the Viterbi alg picks).\n\n Args:\n xtr (DiscreteExplicitExtras): MDP Extras object\n phi (FeatureFunction): MDP Featrure function\n reward (Linear): Linear reward function\n start (int): Starting state\n goal (int): End state\n max_path_length (int): Maximum allowable path length to search\n\n Returns:\n (list): Maximum Likelihood path from start to goal under the given MaxEnt reward\n function, or None if no path is possible\n \"\"\"\n\n rs, rsa, rsas = reward.structured(xtr, phi)\n\n # We check if the rewards are all \\le 0 - this allows a sp\n all_negative_rewards = (\n np.max(rs) <= 0 and np.max(rsa.flat) <= 0 and np.max(rsas.flat) <= 0\n )\n\n # Initialize an SxA LL Viterbi trellis\n sa_lls = np.zeros((len(xtr.states), len(xtr.actions), max_path_length)) - np.inf\n sa_lls[goal, :, :] = xtr.gamma ** np.arange(max_path_length) * rs[goal]\n\n # Supress divide by zero - we take logs of many zeroes here\n with np.errstate(divide=\"ignore\"):\n\n # Walk backward to propagate the maximum LL\n for t in range(max_path_length - 2, -1, -1):\n\n # Max-Reduce over actions to compute state LLs\n # (it's a max because we get to choose our actions)\n s_lls = np.max(sa_lls, axis=1)\n\n if not np.any(np.isneginf((s_lls[:, t + 1]))):\n # The backward message has propagated to every state one step in the future\n if all_negative_rewards:\n # In this context we are after a shortest path\n # Drop any earlier times from the trellis and early stop the backward pass\n sa_lls = sa_lls[:, :, t + 1 :]\n break\n\n # Sweep end states\n for s2 in xtr.states:\n\n if np.isneginf(s_lls[s2, t + 1]):\n # Skip this state - it hasn't been reached by probability messages yet\n continue\n\n # Sweep actions\n for a in xtr.actions:\n\n # Sweep starting states\n for s1 in xtr.states:\n\n if xtr.terminal_state_mask[s1]:\n # We can't step forward from terminal states - skip this one\n continue\n\n transition_ll = (\n xtr.gamma ** (t - 1) * rs[s1]\n + xtr.gamma ** (t) * rsa[s1, a]\n + xtr.gamma ** (t) * rsas[s1, a, s2]\n # + xtr.gamma ** (t + 1) * rs[s2]\n + np.log(xtr.t_mat[s1, a, s2])\n )\n\n if np.isneginf(transition_ll):\n # This transition is impossible - skip\n continue\n\n # Store the max because we're after the maximum likelihood path\n sa_lls[s1, a, t] = max(\n sa_lls[s1, a, t], transition_ll + s_lls[s2, t + 1]\n )\n\n # Max-reduce to get state/action ML trellises for convenience\n s_lls = np.max(sa_lls, axis=1)\n\n # Identify our starting time\n if np.isneginf(np.max(s_lls[start])):\n # There is no feasible path from s1 to sg less or equal to than max_path_length\n return None\n start_time = np.argmax(s_lls[start, :])\n\n # Walk forward from start state, start time to re-construct path\n state = start\n time = start_time\n ml_path = []\n while state != goal:\n action = np.argmax(sa_lls[state, :, time])\n ml_path.append((state, action))\n successor_states = [s for (a, s) in xtr.children[state] if a == action]\n\n # Choose successor state with highest log likelihood at time + 1\n ml = -np.inf\n next_state = None\n for s2 in successor_states:\n s2_ll = s_lls[s2, time + 1]\n if s2_ll >= ml:\n next_state = s2\n ml = s2_ll\n\n state = next_state\n time = time + 1\n\n # Add final (goal) state\n ml_path.append((state, None))\n\n return ml_path\n\n\ndef sw_maxent_irl(x, xtr, phi, phi_bar, max_path_length, nll_only=False):\n \"\"\"Maximum Entropy IRL using our exact algorithm\n\n Returns NLL and NLL gradient of the demonstration data under the proposed reward\n parameters x.\n\n N.b. the computed NLL here doesn't include the contribution from the MDP dynamics\n for each path - this term is independent of the parameter x, so doesn't affect the\n optimization result.\n\n Args:\n x (numpy array): Current reward function parameter vector estimate\n xtr (mdp_extras.BaseExtras): Extras object for the MDP being\n optimized\n phi (mdp_extras.FeatureFunction): Feature function to use with linear reward\n parameters. We require len(phi) == len(x).\n phi_bar (numpy array): Feature expectation. N.b. if using a weighted feature\n expectation, it is very important to make sure the weights you used\n sum to 1.0!\n max_path_length (int): Maximum path length\n nll_only (bool): If true, only return NLL\n\n Returns:\n (float): Negative Log Likelihood of a MaxEnt model with x as the reward\n parameters and the given feature expectation\n (numpy array): Downhill gradient of negative log likelihood at the given point\n\n \"\"\"\n\n # Store current argument guess\n r_linear = Linear(x)\n\n if isinstance(xtr, DiscreteExplicitExtras):\n # Process tabular MDP\n\n # Explode reward function to indicator arrays\n rs, rsa, rsas = r_linear.structured(xtr, phi)\n\n # Catch float overflow as an error - reward magnitude is too large for\n # exponentiation with this max path length\n with np.errstate(over=\"raise\"):\n\n # Compute forward message\n alpha_log = nb_forward_pass_log(\n xtr.p0s,\n max_path_length,\n xtr.t_mat,\n gamma=xtr.gamma,\n rs=rs,\n rsa=rsa,\n rsas=rsas,\n )\n\n # Compute partition value\n Z_theta_log = log_partition(\n max_path_length, alpha_log, padded=xtr.is_padded\n )\n\n # Compute NLL\n nll = Z_theta_log - x @ phi_bar\n\n if nll_only:\n return nll\n else:\n\n # Compute gradient\n with np.errstate(over=\"raise\"):\n\n # Compute backward message\n beta_log = nb_backward_pass_log(\n max_path_length,\n xtr.t_mat,\n gamma=xtr.gamma,\n rs=rs,\n rsa=rsa,\n rsas=rsas,\n )\n\n # Compute transition marginals\n pts_log, ptsa_log, ptsas_log = nb_marginals_log(\n max_path_length,\n xtr.t_mat,\n alpha_log,\n beta_log,\n Z_theta_log,\n gamma=xtr.gamma,\n rsa=rsa,\n rsas=rsas,\n )\n\n # Compute gradient based on feature type\n if phi.type == Disjoint.Type.OBSERVATION:\n\n s_counts = np.sum(np.exp(pts_log), axis=-1)\n efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)\n nll_grad = efv_s - phi_bar\n\n elif phi.type == Disjoint.Type.OBSERVATION_ACTION:\n\n sa_counts = np.sum(np.exp(ptsa_log), axis=-1)\n efv_sa = np.sum(\n [\n sa_counts[s1, a] * phi(s1, a)\n for s1 in xtr.states\n for a in xtr.actions\n ],\n axis=0,\n )\n nll_grad = efv_sa - phi_bar\n\n elif phi.type == Disjoint.Type.OBSERVATION_ACTION_OBSERVATION:\n\n sas_counts = np.sum(np.exp(ptsas_log), axis=-1)\n efv_sas = np.sum(\n [\n sas_counts[s1, a, s2] * phi(s1, a, s2)\n for s1 in xtr.states\n for a in xtr.actions\n for s2 in xtr.states\n ],\n axis=0,\n )\n nll_grad = efv_sas - phi_bar\n\n else:\n raise ValueError\n\n return nll, nll_grad\n\n elif isinstance(xtr, DiscreteImplicitExtras):\n # Handle Implicit dynamics MDP\n\n # Only supports state features - otherwise we run out of memory\n assert (\n phi.type == phi.Type.OBSERVATION\n ), \"For DiscreteImplicit MPDs only state-based rewards are supported\"\n\n # Only supports deterministic transitions\n assert (\n xtr.is_deterministic\n ), \"For DiscreteImplicit MPDs only deterministic dynamics are supported\"\n\n rs = np.array([r_linear(phi(s)) for s in xtr.states])\n\n # Catch float overflow as an error - reward magnitude is too large for\n # exponentiation with this max path length\n with np.errstate(over=\"raise\"):\n # Compute alpha_log\n alpha_log = nb_forward_pass_log_deterministic_stateonly(\n xtr.p0s,\n max_path_length,\n xtr.parents_fixedsize,\n rs,\n gamma=xtr.gamma,\n padded=xtr.is_padded,\n )\n\n # Compute partition value\n Z_theta_log = log_partition(\n max_path_length, alpha_log, padded=xtr.is_padded\n )\n\n # Compute NLL\n nll = Z_theta_log - x @ phi_bar\n\n if nll_only:\n return nll\n else:\n\n # Compute NLL gradient as well\n with np.errstate(over=\"raise\"):\n # Compute beta_log\n beta_log = nb_backward_pass_log_deterministic_stateonly(\n max_path_length, xtr.children_fixedsize, rs, gamma=xtr.gamma\n )\n\n # Compute transition marginals pts_log (not ptsa, ptsas)\n pts_log = nb_marginals_log_deterministic_stateonly(\n max_path_length,\n xtr.children_fixedsize,\n alpha_log,\n beta_log,\n Z_theta_log,\n )\n\n # Compute gradient\n s_counts = np.sum(np.exp(pts_log), axis=-1)\n efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)\n nll_grad = efv_s - phi_bar\n return nll, nll_grad\n\n else:\n # Unknown MDP type\n raise ValueError(f\"Unknown MDP class {xtr}\")\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n import gym\n import random\n import itertools as it\n\n from mdp_extras import vi, OptimalPolicy, Indicator\n from mdp_extras.envs import nchain_extras\n\n n = 2\n env = gym.make(\"NChain-v0\", n=n)\n xtr, phi, reward_gt = nchain_extras(env, gamma=1.0)\n\n # Change to a uniform MDP\n xtr._t_mat = np.ones_like(xtr.t_mat) / len(xtr.states)\n xtr._p0s = np.ones(len(xtr.states)) / len(xtr.states)\n reward_gt.theta = np.zeros_like(reward_gt.theta)\n\n # Change phi to state-based indicator\n phi = Indicator(Indicator.Type.OBSERVATION, xtr)\n\n # Solve the MDP\n max_path_length = n\n # num_demos = 10000\n # demo_star = []\n # for _ in range(num_demos):\n # demo = []\n # # Start state\n # s = random.choice(xtr.states)\n # for t in it.count():\n # a = random.choice(xtr.actions)\n # demo.append((s, a))\n # s = random.choice(xtr.states)\n # if t == max_path_length - 2:\n # break\n # demo.append((s, None))\n # demo_star.append(demo)\n # phi_bar_star = phi.demo_average(demo_star)\n phi_bar_star = np.ones(len(phi)) / len(phi)\n\n x = np.zeros(len(phi))\n nll = sw_maxent_irl(x, xtr, phi, phi_bar_star, max_path_length, nll_only=True)\n\n pass\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.log", "numpy.ones_like", "numpy.arange", "numpy.finfo", "numpy.isneginf", "numpy.max", "numpy.argmax", "numpy.zeros_like", "numpy.errstate", "numpy.exp", "numpy.zeros" ] ]
rakshithb/FFF-Sensing
[ "13ee0724e97b0142766cd858102b5cce00c798cd" ]
[ "FFF_SysID_Vision_Analysis.py" ]
[ "# import required libraries\r\nimport numpy as np\r\nimport cv2\r\nprint('OpenCV version: '+cv2.__version__)\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport datetime\r\nimport os\r\nfrom collections import Counter\r\n\r\n# Set source folder\r\nSRC_FOLDER = \"C:/Users/raksh/OneDrive - The Pennsylvania State University/PhD Research/Paper-4/SysID Experiment/OL Test 3/\"\r\n\r\n# open and read file containing start and end timestamps of the videos\r\ndf_vidTimes = pd.read_excel(SRC_FOLDER + \"Video_Timestamps_1.xlsx\")\r\ndf_vidTimes.drop(df_vidTimes.columns[0],axis=1,inplace=True)\r\n\r\n################ ALL FUNCTIONS DEFINITIONS ################\r\n\r\ndef perspCorrection(img,pt1,pt2,pt3,pt4,scale_width,scale_height):\r\n \r\n # Create a copy of the image\r\n img_copy = np.copy(img)\r\n\r\n # Convert to RGB so as to display via matplotlib\r\n # Using Matplotlib we can easily find the coordinates of the 4 points that is essential for finding then transformation matrix\r\n #img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n \r\n # to calculate the transformation matrix\r\n input_pts = np.float32([pt1,pt2,pt3,pt4])\r\n output_pts = np.float32([[0,0],[scale_width-1,0],[0,scale_height-1],[scale_width-1,scale_height-1]])\r\n\r\n # Compute the perspective transform M\r\n M = cv2.getPerspectiveTransform(input_pts,output_pts)\r\n \r\n # Apply the perspective transformation to the image\r\n imgPersp = cv2.warpPerspective(img,M,(scale_width, scale_height)) #,flags=cv2.INTER_LINEAR) cv2.INTER_CUBIC is also an option\r\n imgGrayPersp = cv2.cvtColor(imgPersp, cv2.COLOR_BGR2GRAY) \r\n \r\n # visulaize corners using cv2 circles\r\n for x in range (0,4):\r\n cv2.circle(img_copy,(round(input_pts[x][0]),round(input_pts[x][1])),5,(0,0,255),cv2.FILLED) \r\n \r\n return [img_copy,imgPersp,imgGrayPersp]\r\n \r\ndef extractTopBottom(img,tStart,tEnd,bStart,bEnd):\r\n img_top = img[tStart[1]:tEnd[1],tStart[0]:tEnd[0]]\r\n img_bottom = img[bStart[1]:bEnd[1],bStart[0]:bEnd[0]] \r\n \r\n return [img_top,img_bottom]\r\n \r\ndef gaussianBlur(img,fsize):\r\n \r\n # gaussian blur\r\n gblur = cv2.GaussianBlur(img,(fsize,fsize),0)\r\n \r\n return gblur\r\n\r\ndef medianBlur(img,fsize=3):\r\n \r\n # median blur - effective at removing salt and pepper noise\r\n mblur = cv2.medianBlur(img,fsize)\r\n \r\n return mblur\r\n \r\ndef bilateralFilter(img):\r\n \r\n # Bilateral filter preserves edges while removing noise\r\n bfblur = cv2.bilateralFilter(img,9,75,75)\r\n \r\n return bfblur\r\n \r\ndef gAdaptiveThresholding(img):\r\n \r\n # median filtering\r\n adaptive_gaussian = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\r\n cv2.THRESH_BINARY,11,2)\r\n\r\n return adaptive_gaussian\r\n\r\ndef morphOps(img,kernel1,kernel2,k1_num_passes=2): \r\n \r\n # Closing = Dilation + Erosion\r\n # dilation\r\n mask_dil = cv2.dilate(img,kernel1,iterations = k1_num_passes)\r\n \r\n # erosion\r\n mask_erode = cv2.erode(mask_dil,kernel2,iterations = 1)\r\n \r\n return mask_erode \r\n\r\ndef computeW_Rev(img,img_debug): \r\n \r\n avg_num_pixels = 159\r\n scaling_factor = 1.0\r\n mm_per_pixel = ((1/32)*25.4)/(scaling_factor*avg_num_pixels)\r\n edge_length_threshold = 55\r\n min_L_edge_threshold = False\r\n min_R_edge_threshold = False\r\n \r\n # Predefine arrays for data storage\r\n approx_edges = 10\r\n num_edges = np.zeros(img.shape[0]) #,dtype=np.uint16) \r\n edge_start = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)\r\n edge_end = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)\r\n \r\n edge_count = 0\r\n k=0\r\n\r\n sse = False\r\n tse = False\r\n\r\n # start scanning from (0,0) until black pixel is found \r\n # go across columns first\r\n\r\n for i in range(img.shape[0]):\r\n\r\n found_edge = False\r\n temp_edge_count = 0\r\n k=0 \r\n\r\n for j in range(img.shape[1]):\r\n\r\n if(img[i,j]<=50):\r\n # Black pixel found - edge\r\n if(found_edge==False):\r\n found_edge = True\r\n temp_edge_count += 1\r\n num_edges[i] = temp_edge_count\r\n edge_start[i][k] = j\r\n k += 1\r\n\r\n else:\r\n if(found_edge):\r\n edge_end[i][k-1] = j-1\r\n found_edge = False \r\n \r\n x = Counter(num_edges)\r\n y = {z:count for z, count in x.items() if count >= edge_length_threshold and z > 1}\r\n #print(y)\r\n if(len(y)!=0):\r\n edge_condition = sorted(y,key=y.get)[0]\r\n \r\n else:\r\n print('num_edges > 1 and length(num_edges) >= threshold not satisfied . . . Lowering threshold to identify matches')\r\n w = {z:count for z, count in x.items() if count < edge_length_threshold and z > 1}\r\n if(len(w)!=0):\r\n print('Found num_edges > 1 and length(num_edges) < threshold!')\r\n edge_condition = sorted(w,key=w.get)[0]\r\n else:\r\n print('Unable to find edge condition . . . check image')\r\n edge_condition = -1\r\n\r\n if img_debug:\r\n print('edge condition: ' + str(edge_condition))\r\n \r\n if edge_condition == 2: #max(num_edges)==2:\r\n\r\n # max num_edges = 2\r\n \r\n L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==2)]>60,edge_start[:,0][np.argwhere(num_edges==2)]<300)]\r\n L1_edge_end = edge_end[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_end[:,0][np.argwhere(num_edges==2)]>60,edge_end[:,0][np.argwhere(num_edges==2)]<300)]\r\n\r\n if(np.max(L1_edge_start)-np.min(L1_edge_start)>13):\r\n L1_edge_start = L1_edge_start[L1_edge_start >= (np.max(L1_edge_start)-10)]\r\n\r\n if(np.max(L1_edge_end)-np.min(L1_edge_end)>15):\r\n L1_edge_end = L1_edge_end[L1_edge_end >= (np.max(L1_edge_end)-10)]\r\n\r\n trueLedge_start = L1_edge_start\r\n trueLedge_end = L1_edge_end\r\n\r\n R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]\r\n R1_edge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]\r\n\r\n if(np.max(R1_edge_start)-np.min(R1_edge_start)>13):\r\n R1_edge_start = R1_edge_start[R1_edge_start <= (np.min(R1_edge_start)+10)]\r\n\r\n if(np.max(R1_edge_end)-np.min(R1_edge_end)>13):\r\n R1_edge_end = R1_edge_end[R1_edge_end <= (np.min(R1_edge_end)+10)]\r\n\r\n trueRedge_start = R1_edge_start\r\n trueRedge_end = R1_edge_end\r\n\r\n if(len(trueLedge_start)>len(trueLedge_end)):\r\n trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])\r\n \r\n if(len(trueLedge_start)<len(trueLedge_end)):\r\n trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])\r\n\r\n if(len(trueRedge_start)>len(trueRedge_end)):\r\n trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])\r\n \r\n if(len(trueRedge_start)<len(trueRedge_end)):\r\n trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])\r\n\r\n line1_start = (round(np.mean((trueLedge_start+trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start+trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start+trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start+trueRedge_end)/2)),img.shape[0])\r\n\r\n edge_count = 2\r\n case_cond = 1\r\n \r\n elif edge_condition == 3: #max(num_edges)==3: \r\n \r\n # max num_edges = 3\r\n \r\n # logic for finding true left edge \r\n L2_edge_start = edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]<250]\r\n if(len(L2_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L2_edge_start\r\n trueLedge_end = edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]\r\n else:\r\n if(len(edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)])!=0): \r\n L1_edge_start = edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)] \r\n \r\n if(len(L2_edge_start)!=0):\r\n L1_edge_start = np.hstack((L1_edge_start,L2_edge_start))\r\n\r\n if(np.max(L1_edge_start)-np.min(L1_edge_start)>13):\r\n L1_edge_start = L1_edge_start[L1_edge_start >= (np.max(L1_edge_start)-10)]\r\n\r\n else:\r\n L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][edge_start[:,0][np.argwhere(num_edges==2)]<250]\r\n\r\n if(len(L1_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L1_edge_start\r\n\r\n if(len(edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)])!=0):\r\n trueLedge_end = edge_end[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_end[:,0][np.argwhere(num_edges==3)]<250,edge_end[:,0][np.argwhere(num_edges==3)]>60)] \r\n \r\n if(len(L2_edge_start)!=0):\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]))\r\n\r\n if(np.max(trueLedge_end)-np.min(trueLedge_end)>13):\r\n trueLedge_end = trueLedge_end[trueLedge_end >= (np.max(trueLedge_end)-10)]\r\n\r\n else:\r\n trueLedge_end = edge_end[:,0][np.argwhere(num_edges==2)][edge_end[:,0][np.argwhere(num_edges==2)]<250]\r\n \r\n elif(len(L1_edge_start)!=0 and len(L1_edge_start)<edge_length_threshold):\r\n \r\n trueLedge_start = L1_edge_start\r\n\r\n trueLedge_end = edge_end[:,0][np.argwhere(num_edges==3)][edge_end[:,0][np.argwhere(num_edges==3)]<250]\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,0][np.argwhere(num_edges==2)][edge_end[:,0][np.argwhere(num_edges==2)]<250]))\r\n \r\n min_L_edge_threshold = True\r\n else:\r\n print('max(num_edges)=3 invalid true left edge condition encountered . . . check code')\r\n\r\n # logic for finding true right edge\r\n R2_edge_start = edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]>350]\r\n if(len(R2_edge_start)>=edge_length_threshold):\r\n trueRedge_start = R2_edge_start\r\n trueRedge_end = edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]>350]\r\n else:\r\n R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]\r\n\r\n if(len(R1_edge_start)==0):\r\n # three definite edges\r\n trueRedge_start = edge_start[:,2][np.argwhere(num_edges==3)][edge_start[:,2][np.argwhere(num_edges==3)]>350]\r\n trueRedge_end = edge_end[:,2][np.argwhere(num_edges==3)][edge_end[:,2][np.argwhere(num_edges==3)]>350]\r\n \r\n elif(len(R1_edge_start)>=edge_length_threshold):\r\n trueRedge_start = R1_edge_start\r\n trueRedge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350] \r\n\r\n elif(len(R1_edge_start)!=0 and len(R1_edge_start)<edge_length_threshold):\r\n # there are some elements but edge length is minimal\r\n trueRedge_start = R1_edge_start\r\n trueRedge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]\r\n\r\n min_R_edge_threshold = True\r\n\r\n else:\r\n print('max(num_edges)=3 invalid true right edge condition encountered . . . check code') \r\n\r\n \r\n if(np.max(trueRedge_start)-np.min(trueRedge_start)>13):\r\n trueRedge_start = trueRedge_start[trueRedge_start <= (np.min(trueRedge_start)+10)]\r\n\r\n if(np.max(trueRedge_end)-np.min(trueRedge_end)>13):\r\n trueRedge_end = trueRedge_end[trueRedge_end <= (np.min(trueRedge_end)+10)]\r\n\r\n \r\n if(len(trueLedge_start)>len(trueLedge_end)):\r\n trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])\r\n \r\n if(len(trueLedge_start)<len(trueLedge_end)):\r\n trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])\r\n\r\n if(len(trueRedge_start)>len(trueRedge_end)):\r\n trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])\r\n \r\n if(len(trueRedge_start)<len(trueRedge_end)):\r\n trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])\r\n\r\n if(len(trueLedge_start)<edge_length_threshold):\r\n min_L_edge_threshold = True\r\n\r\n if(len(trueRedge_start)<edge_length_threshold):\r\n min_R_edge_threshold = True\r\n\r\n if(min_L_edge_threshold or min_R_edge_threshold):\r\n line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])\r\n \r\n edge_count = 3\r\n case_cond = 2 \r\n\r\n elif(np.logical_and(len(trueLedge_start)>=edge_length_threshold,len(trueRedge_start)>=edge_length_threshold)):\r\n \r\n line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])\r\n \r\n edge_count = 3\r\n case_cond = 3 \r\n \r\n else:\r\n print('max(num_edges)=3 with no matching condition reached . . . check code') \r\n \r\n \r\n elif edge_condition == 4: #max(num_edges)==4: \r\n \r\n # max num_edges = 4\r\n # logic for finding true left edge \r\n L3_edge_start = edge_start[:,2][np.argwhere(num_edges==4)][edge_start[:,2][np.argwhere(num_edges==4)]<250]\r\n if(len(L3_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L3_edge_start\r\n trueLedge_end = edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]<250]\r\n else:\r\n L2_edge_start = edge_start[:,1][np.argwhere(num_edges==4)][np.logical_and(edge_start[:,1][np.argwhere(num_edges==4)]<250,edge_start[:,1][np.argwhere(num_edges==4)]>60)]\r\n L2_edge_start = np.hstack((L2_edge_start,edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]<250]))\r\n if(len(L2_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L2_edge_start\r\n\r\n trueLedge_end = edge_end[:,1][np.argwhere(num_edges==4)][np.logical_and(edge_end[:,1][np.argwhere(num_edges==4)]<250,edge_end[:,1][np.argwhere(num_edges==4)]>60)]\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]))\r\n \r\n else:\r\n L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][edge_start[:,0][np.argwhere(num_edges==2)]<250]\r\n L1_edge_start = np.hstack((L1_edge_start,edge_start[:,0][np.argwhere(num_edges==3)][edge_start[:,0][np.argwhere(num_edges==3)]<250]))\r\n L1_edge_start = np.hstack((L1_edge_start,edge_start[:,0][np.argwhere(num_edges==4)][edge_start[:,0][np.argwhere(num_edges==4)]<250]))\r\n\r\n if(len(L1_edge_start)>= edge_length_threshold):\r\n trueLedge_start = L1_edge_start\r\n\r\n trueLedge_end = edge_end[:,0][np.argwhere(num_edges==2)][edge_end[:,0][np.argwhere(num_edges==2)]<250]\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,0][np.argwhere(num_edges==3)][edge_end[:,0][np.argwhere(num_edges==3)]<250]))\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,0][np.argwhere(num_edges==4)][edge_end[:,0][np.argwhere(num_edges==4)]<250]))\r\n else:\r\n print('max(num_edges)=4 invalid true left edge condition encountered . . . check code')\r\n\r\n # logic for finding true right edge\r\n R3_edge_start = edge_start[:,1][np.argwhere(num_edges==4)][edge_start[:,1][np.argwhere(num_edges==4)]>350]\r\n if(len(R3_edge_start)>=edge_length_threshold):\r\n trueRedge_start = R3_edge_start\r\n trueRedge_end = edge_end[:,1][np.argwhere(num_edges==4)][edge_end[:,1][np.argwhere(num_edges==4)]>350]\r\n else:\r\n R2_edge_start = edge_start[:,2][np.argwhere(num_edges==4)][edge_start[:,2][np.argwhere(num_edges==4)]>350]\r\n R2_edge_start = np.hstack((R2_edge_start,edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]>350]))\r\n if(len(R2_edge_start)>=edge_length_threshold):\r\n trueRedge_start = R2_edge_start\r\n\r\n trueRedge_end = edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]>350]\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]>350]))\r\n\r\n else:\r\n R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]\r\n R1_edge_start = np.hstack((R1_edge_start,edge_start[:,2][np.argwhere(num_edges==3)][edge_start[:,2][np.argwhere(num_edges==3)]>350]))\r\n R1_edge_start = np.hstack((R1_edge_start,edge_start[:,3][np.argwhere(num_edges==4)][edge_start[:,3][np.argwhere(num_edges==4)]>350]))\r\n\r\n if(len(R1_edge_start)>= edge_length_threshold):\r\n trueRedge_start = R1_edge_start\r\n\r\n trueRedge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,2][np.argwhere(num_edges==3)][edge_end[:,2][np.argwhere(num_edges==3)]>350]))\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,3][np.argwhere(num_edges==4)][edge_end[:,3][np.argwhere(num_edges==4)]>350]))\r\n else:\r\n print('max(num_edges)=4 invalid true right edge condition encountered . . . check code')\r\n\r\n if(len(trueLedge_start)>len(trueLedge_end)):\r\n trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])\r\n \r\n if(len(trueLedge_start)<len(trueLedge_end)):\r\n trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])\r\n\r\n if(len(trueRedge_start)>len(trueRedge_end)):\r\n trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])\r\n \r\n if(len(trueRedge_start)<len(trueRedge_end)):\r\n trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])\r\n\r\n if(np.logical_and(len(trueLedge_start)>=edge_length_threshold,len(trueRedge_start)>=edge_length_threshold)):\r\n \r\n line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])\r\n \r\n edge_count = 4\r\n case_cond = 4 \r\n \r\n else:\r\n print('max(num_edges)=4 with no matching condition reached . . . check code')\r\n\r\n elif edge_condition > 4:\r\n\r\n # greater than 4 max edges case is typically - stringing or rother artifact causing psuedo edges\r\n # Identify true left edge\r\n\r\n L4_edge_start = edge_start[:,3][np.argwhere(num_edges==5)][edge_start[:,3][np.argwhere(num_edges==5)]<250]\r\n if(len(L4_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L4_edge_start\r\n trueLedge_end = edge_end[:,3][np.argwhere(num_edges==5)][edge_end[:,3][np.argwhere(num_edges==5)]<250]\r\n else:\r\n L3_edge_start = edge_start[:,2][np.argwhere(num_edges==5)][edge_start[:,2][np.argwhere(num_edges==5)]<250]\r\n L3_edge_start = np.hstack((L3_edge_start,edge_start[:,2][np.argwhere(num_edges==4)][edge_start[:,2][np.argwhere(num_edges==4)]<250]))\r\n L3_edge_start = np.hstack((L3_edge_start,edge_start[:,1][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,1][np.argwhere(num_edges==3)]<250,edge_start[:,1][np.argwhere(num_edges==3)]>60)]))\r\n \r\n if(len(L3_edge_start)>=edge_length_threshold):\r\n trueLedge_start = L3_edge_start\r\n\r\n trueLedge_end = edge_end[:,2][np.argwhere(num_edges==5)][edge_end[:,2][np.argwhere(num_edges==5)]<250]\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]<250]))\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]))\r\n \r\n elif(len(L3_edge_start)!= 0 and len(L3_edge_start)<edge_length_threshold):\r\n trueLedge_start = L3_edge_start\r\n\r\n trueLedge_end = edge_end[:,2][np.argwhere(num_edges==5)][edge_end[:,2][np.argwhere(num_edges==5)]<250]\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]<250]))\r\n trueLedge_end = np.hstack((trueLedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]))\r\n\r\n min_L_edge_threshold = True\r\n\r\n else:\r\n \r\n # L2_edge_start = edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]<250]\r\n # L2_edge_start = np.hstack((L2_edge_start,edge_start[:,0][np.argwhere(num_edges==3)][edge_start[:,0][np.argwhere(num_edges==3)]<250]))\r\n\r\n # if(len(L2_edge_start)>=edge_length_threshold):\r\n # trueLedge_start = L2_edge_start\r\n\r\n # trueLedge_end = edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]\r\n # trueLedge_end = np.hstack((trueLedge_end,edge_end[:,0][np.argwhere(num_edges==3)][edge_end[:,0][np.argwhere(num_edges==3)]<250]))\r\n # else:\r\n print('max(num_edges)>4 invalid true left edge condition encountered . . . check code')\r\n\r\n\r\n # Identify true right edge\r\n sse_Redge_start = edge_start[:,3][np.argwhere(num_edges==5)][edge_start[:,3][np.argwhere(num_edges==5)]>350]\r\n sse_Redge_start = np.hstack((sse_Redge_start,edge_start[:,2][np.argwhere(num_edges==4)][edge_start[:,2][np.argwhere(num_edges==4)]>350]))\r\n\r\n if(len(sse_Redge_start)>=edge_length_threshold):\r\n trueRedge_start = sse_Redge_start\r\n\r\n trueRedge_end = edge_end[:,3][np.argwhere(num_edges==5)][edge_end[:,3][np.argwhere(num_edges==5)]>350]\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]>350]))\r\n \r\n elif(len(sse_Redge_start)!= 0 and len(sse_Redge_start)<edge_length_threshold):\r\n trueRedge_start = sse_Redge_start\r\n\r\n trueRedge_end = edge_end[:,3][np.argwhere(num_edges==5)][edge_end[:,3][np.argwhere(num_edges==5)]>350]\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]>350]))\r\n\r\n min_R_edge_threshold = True\r\n\r\n else:\r\n \r\n trueRedge_start = edge_start[:,3][np.argwhere(num_edges==4)][edge_start[:,3][np.argwhere(num_edges==4)]>350]\r\n trueRedge_start = np.hstack((trueRedge_start,edge_start[:,4][np.argwhere(num_edges==5)][edge_start[:,4][np.argwhere(num_edges==5)]>350]))\r\n \r\n trueRedge_end = edge_end[:,3][np.argwhere(num_edges==4)][edge_end[:,3][np.argwhere(num_edges==4)]>350]\r\n trueRedge_end = np.hstack((trueRedge_end,edge_end[:,4][np.argwhere(num_edges==5)][edge_end[:,4][np.argwhere(num_edges==5)]>350]))\r\n\r\n if(len(trueLedge_start)>len(trueLedge_end)):\r\n trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])\r\n\r\n if(len(trueLedge_start)<len(trueLedge_end)):\r\n trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])\r\n\r\n if(len(trueRedge_start)>len(trueRedge_end)):\r\n trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])\r\n\r\n if(len(trueRedge_start)<len(trueRedge_end)):\r\n trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])\r\n\r\n\r\n if(len(trueLedge_start)<edge_length_threshold):\r\n min_L_edge_threshold = True\r\n\r\n if(len(trueRedge_start)<edge_length_threshold):\r\n min_R_edge_threshold = True\r\n\r\n # Length check\r\n if(min_L_edge_threshold or min_R_edge_threshold):\r\n line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])\r\n \r\n edge_count = 5\r\n case_cond = 5\r\n\r\n elif(np.logical_and(len(trueLedge_start)>=edge_length_threshold,len(trueRedge_start)>=edge_length_threshold)):\r\n\r\n line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0) \r\n line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)\r\n line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])\r\n \r\n edge_count = 5\r\n case_cond = 6\r\n\r\n\r\n elif(len(edge_start[:,0][np.argwhere(num_edges==2)])>= edge_length_threshold):\r\n \r\n line1_start = (round(np.mean((edge_start[:,0][np.argwhere(num_edges==2)] + edge_end[:,0][np.argwhere(num_edges==2)])/2)),0) \r\n line1_end = (round(np.mean((edge_start[:,0][np.argwhere(num_edges==2)] + edge_end[:,0][np.argwhere(num_edges==2)])/2)),img.shape[0])\r\n\r\n line2_start = (round(np.mean((edge_start[:,1][np.argwhere(num_edges==2)] + edge_end[:,1][np.argwhere(num_edges==2)])/2)),0)\r\n line2_end = (round(np.mean((edge_start[:,1][np.argwhere(num_edges==2)] + edge_end[:,1][np.argwhere(num_edges==2)])/2)),img.shape[0])\r\n \r\n edge_count = np.nan\r\n case_cond = 7\r\n \r\n else:\r\n print('max(num_edges)>4 with no matching condition reached . . . check code')\r\n\r\n else:\r\n print('Invalid edge condition reached . . . check code')\r\n\r\n # convert to BGR image and draw line\r\n img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\r\n \r\n # computed road width in pixels \r\n dist_px = line2_start[0] - line1_start[0]\r\n dist_mm = round(dist_px*mm_per_pixel,4)\r\n \r\n cv2.line(img_color,line1_start,line1_end,(0,255,0),2)\r\n cv2.line(img_color,line2_start,line2_end,(0,255,0),2)\r\n \r\n # Add Road width value to image\r\n \r\n # text\r\n text = str(dist_mm) + ' mm'\r\n \r\n if img_debug:\r\n print('w = ' + text)\r\n\r\n # font\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n \r\n # org\r\n org = (line1_start[0]+50, round(img.shape[0]/2))\r\n \r\n # fontScale\r\n fontScale = 1\r\n \r\n # Blue color in BGR\r\n color = (255, 0, 0)\r\n \r\n # Line thickness of 2 px\r\n thickness = 2\r\n \r\n # Using cv2.putText() method\r\n img_color = cv2.putText(img_color, text, org, font, \r\n fontScale, color, thickness, cv2.LINE_AA) \r\n \r\n return [case_cond,img_color,edge_start,edge_end,num_edges,edge_count,dist_px,dist_mm]\r\n\r\n# Main Code\r\nnum_layers = 20\r\nmax_speed = 50\r\nlayers = list(range(5,num_layers+1))\r\n#speeds = list(range(10,max_speed+10,10))\r\n#speeds = [10,20,50,30,40]\r\n#frame_skip_start = [32,20,11,15,13] # Start processesing from these frames for different vR - Skip frames before this\r\n\r\n# For Debugging errors\r\nlayers = [20] # Restart in case of error\r\nspeeds = [40] # For testing only\r\n\r\n#idx = [int((s/10)-1) for s in speeds]\r\n#frame_start = [frame_skip_start[i] for i in idx]\r\n\r\nbad_frames = [109] # Check this - enter probelmatic frame here - start processing from this frame\r\nvidCount = 0\r\nimg_debug = True # Enable this flag to save pictures\r\n\r\nw_result_columns=['Layer','vR','Frame','ActualTimestamp','CaseCondition','Bottom_Edges','w_Vision']\r\nframe_summary_columns = ['Layer','vR','Start_TS','End_TS','Total_Frames','Per_Frames_Skipped','Skipped_Frames']\r\nlst = []\r\nlst_skip_frames = []\r\nlst_frame_summary = []\r\n\r\n# to calculate the transformation matrix\r\npt1 = [192.30,343.00] # (x,y) - Top left\r\npt2 = [1079.0,379.80] # (x,y) - Top right\r\npt3 = [153.50,571.90] # (x,y) - bottom left\r\npt4 = [1107.10,611.70] # (x,y) - bottom Right\r\n\r\n# Actual Dimensions of region selected by 4 points \r\nscale_width = round(11.7348*200) # mm Actual ruler width measured using calipers\r\nscale_height = round(6.35*200) # mm Height based on selected 4 points for perspective transform\r\n\r\n# Extract top - bottom smaller regions\r\ntStart = [655,0]\r\ntEnd = [1300,345]\r\nbStart = [655,925]\r\nbEnd = [1300,1270]\r\n\r\nk1_num_passes = 1 # Default should be 2\r\nrun = 'Run-2'\r\n\r\nfor l in range(len(layers)):\r\n for v in range(len(speeds)):\r\n \r\n lst = []\r\n lst_skip_frames = []\r\n \r\n vidName = 'vid_l'+str(layers[l])+'_vR_'+str(speeds[v])+'.avi'\r\n print('Processing video: ' + vidName)\r\n \r\n idx = df_vidTimes.index[(df_vidTimes.Layer==layers[l]) & (df_vidTimes.Speed==speeds[v])].to_list()[0]\r\n start_TS = df_vidTimes.Start_Timestamp[idx]\r\n end_TS = df_vidTimes.End_Timestamp[idx]\r\n \r\n print('video: {0} starts at {1} and ends at {2}'.format(vidName,start_TS,end_TS))\r\n \r\n srcVideo = SRC_FOLDER + vidName\r\n #print('video location: ' + srcVideo)\r\n \r\n cap = cv2.VideoCapture(srcVideo)\r\n numFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\r\n print('video {0} has {1} frames'.format(vidName,numFrames))\r\n\r\n # check if video opened successfully\r\n if (cap.isOpened() == False):\r\n print(\"Error reading video file. Exiting ...\")\r\n exit(0)\r\n \r\n frameCount = 0\r\n\r\n while(cap.isOpened()):\r\n \r\n frame_exists, frame = cap.read()\r\n \r\n if frame_exists:\r\n \r\n frameCount = frameCount + 1\r\n \r\n if(frameCount in bad_frames):\r\n #if(frameCount >= frame_skip_start[v] and frameCount <= numFrames - 5): \r\n \r\n try:\r\n \r\n #print('Begin processing frame {0}'.format(frameCount)) \r\n\r\n # call function - correct perspective transform\r\n [img_bgr,imgPersp,imgGrayPersp] = perspCorrection(frame,pt1,pt2,pt3,pt4,scale_width,scale_height)\r\n img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\r\n\r\n # Filter grayscale image\r\n # Bilateral filter\r\n bfblur = bilateralFilter(imgGrayPersp)\r\n\r\n # ROI Extraction - Mark rectangle\r\n # convert to RGB image and draw line\r\n img_ROI = cv2.cvtColor(imgGrayPersp, cv2.COLOR_GRAY2RGB)\r\n img_ROI = cv2.rectangle(img_ROI, (bStart[0],bStart[1]), (bEnd[0],bEnd[1]), (255,0,0), 8)\r\n #img_ROI = cv2.rectangle(img_ROI, (tStart[0],tStart[1]), (tEnd[0],tEnd[1]), (0,0,255), 8) \r\n\r\n [img_top,img_bottom] = extractTopBottom(bfblur,tStart,tEnd,bStart,bEnd)\r\n\r\n # Thresholding - Adaptive Gaussian \r\n #thresh_top = gAdaptiveThresholding(img_top)\r\n thresh_bottom = gAdaptiveThresholding(img_bottom)\r\n\r\n #dstPathTop = 'Perspective Corrected\\\\Top\\\\'\r\n #cv2.imwrite(dstPathTop+'top'+str(i+1)+'.jpg',img_top) \r\n\r\n # create kernel \r\n kernel1 = np.ones((8,2),np.uint8)\r\n kernel2 = np.ones((5,2),np.uint8)\r\n\r\n # perform morph operations \r\n #binImgTop=morphOps(thresh_top,kernel1,kernel2)\r\n binImgBtm=morphOps(thresh_bottom,kernel1,kernel2,k1_num_passes)\r\n\r\n # save images - for analysis\r\n if(img_debug):\r\n pCorrImg_savePath = SRC_FOLDER + \"Results/\" + run + \"/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Extracted Images/Gray/\"\r\n #bfImg_savePath = SRC_FOLDER + \"Extracted Images/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Bilateral Filter/\"\r\n #bROIImg_savePath = SRC_FOLDER + \"Extracted Images/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/ROI/\"\r\n threshImg_savePath = SRC_FOLDER + \"Results/\" + run + \"/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Extracted Images/Gray/\"\r\n binImg_savePath = SRC_FOLDER + \"Results/\" + run + \"/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Binary/\"\r\n\r\n cv2.imwrite(pCorrImg_savePath + \"pCorr\" + str(frameCount) + \".jpg\", imgGrayPersp)\r\n #cv2.imwrite(bfImg_savePath + \"bFil\" + str(frameCount) + \".jpg\", bfblur)\r\n #cv2.imwrite(bROIImg_savePath + \"btm_ROI\" + str(frameCount) + \".jpg\", img_bottom)\r\n cv2.imwrite(threshImg_savePath + \"thresh\" + str(frameCount) + \".jpg\", thresh_bottom) \r\n cv2.imwrite(binImg_savePath + \"binary\" + str(frameCount) + \".jpg\", binImgBtm)\r\n\r\n # Extrusion width measurement \r\n #[top_img_color,top_edge_start,top_edge_end,top_num_edges,top_edge_count,top_edge_dist_pixels,top_edge_dist] = computeW(binImgTop)\r\n [bottom_case_cond,bottom_img_color,bottom_edge_start,bottom_edge_end,bottom_num_edges,bottom_edge_count,bottom_edge_dist_pixels,bottom_edge_dist] = computeW_Rev(binImgBtm,img_debug)\r\n \r\n # horizontally concatenates images of same height \r\n img_h = cv2.hconcat([cv2.cvtColor(binImgBtm, cv2.COLOR_GRAY2BGR), bottom_img_color]) \r\n \r\n # save image - for analysis\r\n if(img_debug):\r\n wImg_savePath = SRC_FOLDER + \"Results/\" + run + \"/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Vision Measurements/\"\r\n cv2.imwrite(wImg_savePath + \"wImg\" + str(frameCount) + \".jpg\", bottom_img_color)\r\n\r\n hImg_savePath = SRC_FOLDER + \"Results/\" + run + \"/\" + str(layers[l]) + \"/\" + str(speeds[v]) + \"/Summary/\"\r\n cv2.imwrite(hImg_savePath + \"wS_Img\" + str(frameCount) + \".jpg\", img_h)\r\n\r\n # Calculate actual timestamp based on excel timestamps and frame number\r\n act_TS = start_TS+frameCount*(end_TS-start_TS)/numFrames\r\n\r\n # Store results in dataframe \r\n lst.append([layers[l],speeds[v],frameCount,act_TS,bottom_case_cond,bottom_edge_count,bottom_edge_dist])\r\n \r\n #print('Finished processing frame {0}'.format(frameCount))\r\n \r\n except ValueError as e:\r\n #if (len(e.args) > 0 and e.args[0] == 'cannot convert float NaN to integer'):\r\n print('Unable to sucessfully process frame {0}, skipping . . .'.format(frameCount))\r\n print(e)\r\n # Calculate actual timestamp based on excel timestamps and frame number\r\n act_TS = start_TS+frameCount*(end_TS-start_TS)/numFrames\r\n # Store results in dataframe \r\n lst.append([layers[l],speeds[v],frameCount,act_TS,np.nan,np.nan,np.nan])\r\n lst_skip_frames.append([frameCount])\r\n\r\n except UnboundLocalError as u:\r\n print('Unable to sucessfully process frame {0}, skipping . . .'.format(frameCount))\r\n print(u)\r\n # Calculate actual timestamp based on excel timestamps and frame number\r\n act_TS = start_TS+frameCount*(end_TS-start_TS)/numFrames\r\n # Store results in dataframe \r\n lst.append([layers[l],speeds[v],frameCount,act_TS,np.nan,np.nan,np.nan])\r\n lst_skip_frames.append([frameCount])\r\n\r\n else:\r\n #print(\"Can't retrieve frame - stream may have ended. Exiting..\")\r\n break \r\n \r\n # Clean up\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n \r\n print('Finished processing video: {0}'.format(vidName))\r\n print('')\r\n print('')\r\n vidCount = vidCount + 1\r\n\r\n if not img_debug:\r\n \r\n results = pd.DataFrame(lst,columns=w_result_columns)\r\n # Save results to excel\r\n with pd.ExcelWriter(\r\n SRC_FOLDER + 'Results/' + run + '/' + 'l' + str(layers[l])+'_vR'+str(speeds[v])+ '_results.xlsx',\r\n date_format=\"YYYY-MM-DD\",\r\n datetime_format=\"YYYY-MM-DD HH:MM:SS.000\"\r\n ) as writer:\r\n results.to_excel(writer,index=False)\r\n \r\n #print('Saved results of video {0} at {1}'.format(vidName,SRC_FOLDER + 'l' + str(layers[l])+'_vR'+str(speeds[v])+ '_results.xlsx'))\r\n \r\n lst_frame_summary.append([layers[l],speeds[v],start_TS,end_TS,numFrames,len(lst_skip_frames)/numFrames,lst_skip_frames])\r\n\r\nif not img_debug:\r\n frame_summary_results = pd.DataFrame(lst_frame_summary,columns=frame_summary_columns)\r\n\r\n # Some more cleanup and data addition\r\n frame_summary_results[\"Video_Duration\"] = frame_summary_results[\"End_TS\"] - frame_summary_results[\"Start_TS\"] \r\n frame_summary_results[\"Video_Duration\"] = [x.total_seconds() for x in frame_summary_results[\"Video_Duration\"]]\r\n frame_summary_results[\"FPS\"] = frame_summary_results[\"Total_Frames\"]/frame_summary_results[\"Video_Duration\"]\r\n frame_summary_results[\"Total_Frames_Skipped\"] = [len(x) for x in frame_summary_results[\"Skipped_Frames\"]]\r\n # Re-oder columns\r\n frame_summary_results = frame_summary_results[[\"Layer\",\"vR\",\"Start_TS\",\"End_TS\",\"Video_Duration\",\"Total_Frames\",\"FPS\",\"Total_Frames_Skipped\",\"Per_Frames_Skipped\",\"Skipped_Frames\"]]\r\n\r\n with pd.ExcelWriter(\r\n SRC_FOLDER + 'Results/' + run + '/' + 'video_processing_summary.xlsx',\r\n date_format=\"YYYY-MM-DD\",\r\n datetime_format=\"YYYY-MM-DD HH:MM:SS.000\"\r\n ) as writer:\r\n frame_summary_results.to_excel(writer,index=False)\r\n print('Processing of all videos completed successfully! Summary results saved at {0}'.format(SRC_FOLDER + 'video_processing_summary.xlsx'))\r\n" ]
[ [ "numpy.hstack", "pandas.read_excel", "numpy.min", "pandas.DataFrame", "numpy.argwhere", "numpy.ones", "numpy.max", "numpy.copy", "numpy.mean", "numpy.float32", "pandas.ExcelWriter", "numpy.zeros" ] ]
heroclass728/train_object_detection_model
[ "f2346c72b0d89eb740881d01b0ff6bae0759ba10" ]
[ "object_detection/trainer.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Detection model trainer.\n\nThis file provides a generic training method that can be used to train a\nDetectionModel.\n\"\"\"\n\nimport functools\n\nimport tensorflow as tf\n\nfrom object_detection.builders import optimizer_builder\nfrom object_detection.builders import preprocessor_builder\nfrom object_detection.core import batcher\nfrom object_detection.core import preprocessor\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import ops as util_ops\nfrom object_detection.utils import variables_helper\nfrom slim.deployment import model_deploy\n\nslim = tf.contrib.slim\n\n\ndef _create_input_queue(batch_size_per_clone, create_tensor_dict_fn,\n batch_queue_capacity, num_batch_queue_threads,\n prefetch_queue_capacity, data_augmentation_options):\n \"\"\"Sets up reader, prefetcher and returns input queue.\n\n Args:\n batch_size_per_clone: batch size to use per clone.\n create_tensor_dict_fn: function to create tensor dictionary.\n batch_queue_capacity: maximum number of elements to store within a queue.\n num_batch_queue_threads: number of threads to use for batching.\n prefetch_queue_capacity: maximum capacity of the queue used to prefetch\n assembled batches.\n data_augmentation_options: a list of tuples, where each tuple contains a\n data augmentation function and a dictionary containing arguments and their\n values (see preprocessor.py).\n\n Returns:\n input queue: a batcher.BatchQueue object holding enqueued tensor_dicts\n (which hold images, boxes and targets). To get a batch of tensor_dicts,\n call input_queue.Dequeue().\n \"\"\"\n tensor_dict = create_tensor_dict_fn()\n\n tensor_dict[fields.InputDataFields.image] = tf.expand_dims(\n tensor_dict[fields.InputDataFields.image], 0)\n\n images = tensor_dict[fields.InputDataFields.image]\n float_images = tf.to_float(images)\n tensor_dict[fields.InputDataFields.image] = float_images\n\n if data_augmentation_options:\n tensor_dict = preprocessor.preprocess(tensor_dict,\n data_augmentation_options)\n\n input_queue = batcher.BatchQueue(\n tensor_dict,\n batch_size=batch_size_per_clone,\n batch_queue_capacity=batch_queue_capacity,\n num_batch_queue_threads=num_batch_queue_threads,\n prefetch_queue_capacity=prefetch_queue_capacity)\n return input_queue\n\n\ndef _get_inputs(input_queue, num_classes):\n \"\"\"Dequeue batch and construct inputs to object detection model.\n\n Args:\n input_queue: BatchQueue object holding enqueued tensor_dicts.\n num_classes: Number of classes.\n\n Returns:\n images: a list of 3-D float tensor of images.\n locations_list: a list of tensors of shape [num_boxes, 4]\n containing the corners of the groundtruth boxes.\n classes_list: a list of padded one-hot tensors containing target classes.\n masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,\n image_width] containing instance masks for objects if present in the\n input_queue. Else returns None.\n \"\"\"\n read_data_list = input_queue.dequeue()\n label_id_offset = 1\n\n def extract_images_and_targets(read_data):\n image = read_data[fields.InputDataFields.image]\n location_gt = read_data[fields.InputDataFields.groundtruth_boxes]\n classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],\n tf.int32)\n classes_gt -= label_id_offset\n classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt,\n depth=num_classes, left_pad=0)\n masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)\n return image, location_gt, classes_gt, masks_gt\n\n return zip(*map(extract_images_and_targets, read_data_list))\n\n\ndef _create_losses(input_queue, create_model_fn):\n \"\"\"Creates loss function for a DetectionModel.\n\n Args:\n input_queue: BatchQueue object holding enqueued tensor_dicts.\n create_model_fn: A function to create the DetectionModel.\n \"\"\"\n detection_model = create_model_fn()\n (images, groundtruth_boxes_list, groundtruth_classes_list,\n groundtruth_masks_list\n ) = _get_inputs(input_queue, detection_model.num_classes)\n images = [detection_model.preprocess(image) for image in images]\n images = tf.concat(images, 0)\n if any(mask is None for mask in groundtruth_masks_list):\n groundtruth_masks_list = None\n\n detection_model.provide_groundtruth(groundtruth_boxes_list,\n groundtruth_classes_list,\n groundtruth_masks_list)\n prediction_dict = detection_model.predict(images)\n\n losses_dict = detection_model.loss(prediction_dict)\n for loss_tensor in losses_dict.values():\n tf.losses.add_loss(loss_tensor)\n\n\ndef train(create_tensor_dict_fn, create_model_fn, train_config, master, task,\n num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name,\n is_chief, train_dir):\n \"\"\"Training function for detection models.\n\n Args:\n create_tensor_dict_fn: a function to create a tensor input dictionary.\n create_model_fn: a function that creates a DetectionModel and generates\n losses.\n train_config: a train_pb2.TrainConfig protobuf.\n master: BNS name of the TensorFlow master to use.\n task: The task id of this training instance.\n num_clones: The number of clones to run per machine.\n worker_replicas: The number of work replicas to train with.\n clone_on_cpu: True if clones should be forced to run on CPU.\n ps_tasks: Number of parameter server tasks.\n worker_job_name: Name of the worker job.\n is_chief: Whether this replica is the chief replica.\n train_dir: Directory to write checkpoints and training summaries to.\n \"\"\"\n\n detection_model = create_model_fn()\n data_augmentation_options = [\n preprocessor_builder.build(step)\n for step in train_config.data_augmentation_options]\n\n with tf.Graph().as_default():\n # Build a configuration specifying multi-GPU and multi-replicas.\n deploy_config = model_deploy.DeploymentConfig(\n num_clones=num_clones,\n clone_on_cpu=clone_on_cpu,\n replica_id=task,\n num_replicas=worker_replicas,\n num_ps_tasks=ps_tasks,\n worker_job_name=worker_job_name)\n\n # Place the global step on the device storing the variables.\n with tf.device(deploy_config.variables_device()):\n global_step = slim.create_global_step()\n\n with tf.device(deploy_config.inputs_device()):\n input_queue = _create_input_queue(train_config.batch_size // num_clones,\n create_tensor_dict_fn,\n train_config.batch_queue_capacity,\n train_config.num_batch_queue_threads,\n train_config.prefetch_queue_capacity,\n data_augmentation_options)\n\n # Gather initial summaries.\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n global_summaries = set([])\n\n model_fn = functools.partial(_create_losses,\n create_model_fn=create_model_fn)\n clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])\n first_clone_scope = clones[0].scope\n\n # Gather update_ops from the first clone. These contain, for example,\n # the updates for the batch_norm variables created by model_fn.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)\n\n with tf.device(deploy_config.optimizer_device()):\n training_optimizer = optimizer_builder.build(train_config.optimizer,\n global_summaries)\n\n sync_optimizer = None\n if train_config.sync_replicas:\n training_optimizer = tf.SyncReplicasOptimizer(\n training_optimizer,\n replicas_to_aggregate=train_config.replicas_to_aggregate,\n total_num_replicas=train_config.worker_replicas)\n sync_optimizer = training_optimizer\n\n # Create ops required to initialize the model from a given checkpoint.\n init_fn = None\n if train_config.fine_tune_checkpoint:\n var_map = detection_model.restore_map(\n from_detection_checkpoint=train_config.from_detection_checkpoint)\n available_var_map = (variables_helper.\n get_variables_available_in_checkpoint(\n var_map, train_config.fine_tune_checkpoint))\n init_saver = tf.train.Saver(available_var_map)\n\n def initializer_fn(sess):\n init_saver.restore(sess, train_config.fine_tune_checkpoint)\n\n init_fn = initializer_fn\n\n with tf.device(deploy_config.optimizer_device()):\n total_loss, grads_and_vars = model_deploy.optimize_clones(\n clones, training_optimizer, regularization_losses=None)\n total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')\n\n # Optionally multiply bias gradients by train_config.bias_grad_multiplier.\n if train_config.bias_grad_multiplier:\n biases_regex_list = ['.*/biases']\n grads_and_vars = variables_helper.multiply_gradients_matching_regex(\n grads_and_vars,\n biases_regex_list,\n multiplier=train_config.bias_grad_multiplier)\n\n # Optionally freeze some layers by setting their gradients to be zero.\n if train_config.freeze_variables:\n grads_and_vars = variables_helper.freeze_gradients_matching_regex(\n grads_and_vars, train_config.freeze_variables)\n\n # Optionally clip gradients\n if train_config.gradient_clipping_by_norm > 0:\n with tf.name_scope('clip_grads'):\n grads_and_vars = slim.learning.clip_gradient_norms(\n grads_and_vars, train_config.gradient_clipping_by_norm)\n\n # Create gradient updates.\n grad_updates = training_optimizer.apply_gradients(grads_and_vars,\n global_step=global_step)\n update_ops.append(grad_updates)\n\n update_op = tf.group(*update_ops)\n with tf.control_dependencies([update_op]):\n train_tensor = tf.identity(total_loss, name='train_op')\n\n # Add summaries.\n for model_var in slim.get_model_variables():\n global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))\n for loss_tensor in tf.losses.get_losses():\n global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor))\n global_summaries.add(\n tf.summary.scalar('TotalLoss', tf.losses.get_total_loss()))\n\n # Add the summaries from the first clone. These contain the summaries\n # created by model_fn and either optimize_clones() or _gather_clone_loss().\n summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,\n first_clone_scope))\n summaries |= global_summaries\n\n # Merge all summaries together.\n summary_op = tf.summary.merge(list(summaries), name='summary_op')\n\n # Soft placement allows placing on CPU ops without GPU implementation.\n session_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=False)\n\n # Save checkpoints regularly.\n keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours\n saver = tf.train.Saver(\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)\n\n slim.learning.train(\n train_tensor,\n logdir=train_dir,\n master=master,\n is_chief=is_chief,\n session_config=session_config,\n startup_delay_steps=train_config.startup_delay_steps,\n init_fn=init_fn,\n summary_op=summary_op,\n number_of_steps=(\n train_config.num_steps if train_config.num_steps else None),\n save_summaries_secs=120,\n sync_optimizer=sync_optimizer,\n saver=saver)\n" ]
[ [ "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.losses.get_total_loss", "tensorflow.get_collection", "tensorflow.losses.get_losses", "tensorflow.check_numerics", "tensorflow.ConfigProto", "tensorflow.to_float", "tensorflow.name_scope", "tensorflow.train.Saver", "tensorflow.identity", "tensorflow.losses.add_loss", "tensorflow.summary.histogram", "tensorflow.expand_dims", "tensorflow.SyncReplicasOptimizer" ] ]
matthuszagh/fmcw
[ "b60f18102a948562bfc954de2658ba5fb48c3c79" ]
[ "simulations/openems/linear_taper.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nTest the effect of different linear taper angles/lengths on S11 and S21.\n\"\"\"\n\nimport numpy as np\nfrom pyems.pcb import common_pcbs\nfrom pyems.simulation import Simulation\nfrom pyems.utilities import print_table\nfrom pyems.structure import PCB, Microstrip, Taper\nfrom pyems.coordinate import Box2, Coordinate2, Axis\nfrom pyems.mesh import Mesh\nfrom pyems.calc import sweep\n\nunit = 1e-3\nfreq_res = 1e7\nfreq = np.arange(0, 18e9 + freq_res, freq_res)\npcb_prop = common_pcbs[\"oshpark4\"]\npcb_len = 10\npcb_width = 5\ntrace_width = 0.38\nz0_ref = 50\n\nmicrostrip_discontinuity_width = 0.5\nmicrostrip_discontinuity_length = 1\n\n\ndef sim_func(taper_angle: float):\n \"\"\"\n :param taper_angle: Linear taper angle in degrees.\n \"\"\"\n angle_rad = taper_angle * np.pi / 180\n dy = np.abs(trace_width - microstrip_discontinuity_width) / 2\n dx = dy / np.tan(angle_rad)\n taper_middle = microstrip_discontinuity_length / 2 + dx / 2\n taper_end = microstrip_discontinuity_length / 2 + dx\n\n sim = Simulation(freq=freq, unit=unit, sim_dir=None)\n pcb = PCB(\n sim=sim,\n pcb_prop=pcb_prop,\n length=pcb_len,\n width=pcb_width,\n layers=range(3),\n omit_copper=[0],\n )\n\n Microstrip(\n pcb=pcb,\n position=Coordinate2(0, 0),\n length=microstrip_discontinuity_length,\n width=microstrip_discontinuity_width,\n propagation_axis=Axis(\"x\"),\n trace_layer=0,\n gnd_layer=1,\n )\n\n Taper(\n pcb=pcb,\n position=Coordinate2(-taper_middle, 0),\n pcb_layer=0,\n width1=trace_width,\n width2=microstrip_discontinuity_width,\n length=dx,\n )\n Taper(\n pcb=pcb,\n position=Coordinate2(taper_middle, 0),\n pcb_layer=0,\n width1=microstrip_discontinuity_width,\n width2=trace_width,\n length=dx,\n )\n\n box = Box2(\n Coordinate2(-pcb_len / 2, -trace_width / 2),\n Coordinate2(-taper_end, trace_width / 2),\n )\n Microstrip(\n pcb=pcb,\n position=box.center(),\n length=box.length(),\n width=trace_width,\n propagation_axis=Axis(\"x\"),\n trace_layer=0,\n gnd_layer=1,\n port_number=1,\n excite=True,\n feed_shift=0.35,\n ref_impedance=50,\n )\n\n box = Box2(\n Coordinate2(taper_end, -trace_width / 2),\n Coordinate2(pcb_len / 2, trace_width / 2),\n )\n Microstrip(\n pcb=pcb,\n position=box.center(),\n length=box.length(),\n width=trace_width,\n propagation_axis=Axis(\"x\", direction=-1),\n trace_layer=0,\n gnd_layer=1,\n port_number=2,\n ref_impedance=50,\n )\n\n Mesh(\n sim=sim,\n metal_res=1 / 120,\n nonmetal_res=1 / 40,\n min_lines=5,\n expand_bounds=((0, 0), (0, 0), (10, 40)),\n )\n\n # sim.run(csx=False)\n sim.run()\n return sim.s_param(1, 1)\n\n\nangles = np.arange(10, 90, 10)\nangles = [10]\nres = sweep(sim_func, angles, processes=5)\n\nstr_angles = [str(angle) for angle in angles]\nprint_table(\n data=np.concatenate(([freq / 1e9], res)),\n col_names=[\"freq\"] + str_angles,\n prec=[2] + [4 for _ in angles],\n)\n" ]
[ [ "numpy.tan", "numpy.arange", "numpy.concatenate", "numpy.abs" ] ]
google/graph-gen
[ "6481e09696ee5b29c12d893a61c27b80f8d874be" ]
[ "research/graph_gen/real_NVP.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"RealNVP like graph generation procedure.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport rev_GNN\nimport tensorflow as tf\nimport numpy as np\nimport copy\n\nflags = tf.app.flags\n\nFLAGS = flags.FLAGS\n\ndef mlp(inputs,\n layer_sizes,\n activation_fn=tf.nn.relu,\n output_act=None):\n prev_size = inputs.get_shape().as_list()[-1]\n shape_inp = tf.shape(inputs)\n if len(inputs.get_shape().as_list()) > 2:\n out = tf.reshape(inputs, [-1, shape_inp[2]])\n else:\n out = tf.reshape(inputs, [-1, shape_inp[1]])\n for i, layer_size in enumerate(layer_sizes):\n z = tf.layers.fully_connected(out, layer_size, activation_fn)\n\n if i < len(layer_sizes) - 1 and activation_fn is not None:\n out = activation_fn(z)\n elif i == len(layer_sizes) - 1 and output_act is not None:\n out = output_act(z)\n else:\n out = z\n if len(inputs.get_shape().as_list()) > 2:\n return tf.reshape(out, [shape_inp[0], shape_inp[1], -1])\n else:\n return tf.reshape(out, [shape_inp[0], -1])\n\ndef message_fn(params, model_hparams):\n \"\"\"Messages are just MLPs of the nodes itself, as edges have no labels,\n so, message is just a transform of the node vector.\"\"\"\n node_states = params['node_states']\n mask = params['mask']\n node_dim = model_hparams.node_dim\n msg_dim = model_hparams.msg_dim\n\n with tf.variable_scope('message_fn', reuse=tf.AUTO_REUSE):\n msg_function = mlp(node_states,\n layer_sizes=[model_hparams.msg_hidden1, model_hparams.msg_hidden2],\n activation_fn=tf.nn.tanh,\n output_act=tf.nn.tanh)\n # This could be a soft matrix, in which case we are taking a sum over all\n # possible edges/neighbours.\n adj_mat = params['adj_mat']\n temp_mask = tf.expand_dims(mask, 2)\n mask = tf.multiply(temp_mask, tf.transpose(temp_mask, (0, 2, 1)))\n adj_mat = adj_mat * mask\n\n if FLAGS.use_edge_features:\n edge_features = params['edge_feat']\n # Just to make sure that the edge features are interpreted in the way that\n # edge features give out a distribution over possible labels.\n if FLAGS.use_sigmoid_for_edge_feat:\n edge_features = tf.nn.sigmoid(edge_features)\n else:\n edge_features = tf.nn.softmax(edge_features)\n edge_features = edge_features * tf.expand_dims(mask, 3)\n edge_embd = params['edge_embd']\n # edge_features: B x N x N x (d+1)\n # edge_embd: (d+1) x m x m\n batch_size = tf.shape(edge_features)[0]\n num_nodes = tf.shape(edge_features)[1]\n print (edge_embd)\n edge_embd = tf.reshape(edge_embd,\n [model_hparams.edge_dim + 1,\n model_hparams.msg_dim*model_hparams.msg_dim])\n edge_features_rs = tf.reshape(edge_features,\n [batch_size*num_nodes*num_nodes,\n model_hparams.edge_dim + 1])\n edge_matrices = tf.matmul(edge_features_rs, edge_embd)\n edge_matrices = tf.reshape(edge_matrices,\n [batch_size, num_nodes, num_nodes,\n model_hparams.msg_dim, model_hparams.msg_dim])\n # edge_matrices: B x N x N x d x d\n msg_tiled = tf.tile(tf.expand_dims(msg_function, 2),\n [1, 1, tf.shape(msg_function)[1], 1])\n msg_edge_embd = tf.matmul(edge_matrices, tf.expand_dims(msg_tiled, 4))\n # B x N x N x d x 1\n msg_edge_embd = tf.multiply(tf.squeeze(msg_edge_embd, axis=4),\n tf.expand_dims(adj_mat, 3))\n # B x N x N x d\n msg_function = tf.reduce_sum(msg_edge_embd, axis=1)\n return msg_function\n\n # adj_mat defacto handles the non-existance of particular nodes\n msg_function = tf.transpose(msg_function, perm=[0, 2, 1])\n msg_function = tf.matmul(msg_function, adj_mat)\n return tf.transpose(msg_function, perm=[0, 2, 1])\n\ndef aggn_fn(msg_matrix):\n \"\"\"Messages are M(u -> v), so we need to sum over dimension 1.\"\"\"\n return tf.reduce_sum(msg_matrix, 1)\n\ndef update_fn(params, model_hparams):\n node_states = params['node_states']\n msg_vec = params['agg_msg']\n mask = params['mask']\n batch_size = tf.shape(node_states)[0]\n num_nodes = tf.shape(node_states)[1]\n node_shape = tf.shape(node_states)\n msg_shape = tf.shape(msg_vec)\n\n is_training = params['is_training']\n\n msg_dim = model_hparams.msg_dim\n node_dim = model_hparams.node_dim\n print ('INFO: Msg Dim = ', msg_dim, ' Node Dim = ', node_dim)\n print ('Msg Vec = ', msg_vec)\n\n with tf.variable_scope('update_fn', reuse=tf.AUTO_REUSE):\n w_z = tf.get_variable(\"GRU_w_z\", shape=[msg_dim, node_dim])\n u_z = tf.get_variable(\"GRU_u_z\", shape=[node_dim, node_dim])\n w_r = tf.get_variable(\"GRU_w_r\", shape=[msg_dim, node_dim])\n u_r = tf.get_variable(\"GRU_u_r\", shape=[node_dim, node_dim])\n w = tf.get_variable(\"GRU_w\", shape=[msg_dim, node_dim])\n u = tf.get_variable(\"GRU_u\", shape=[node_dim, node_dim])\n\n node_reshape = tf.reshape(node_states, [-1, node_shape[2]], name='node_rs')\n msg_reshape = tf.reshape(msg_vec, [-1, msg_shape[2]], name='msg_rs')\n\n z_t = tf.sigmoid(\n tf.matmul(msg_reshape, w_z) + tf.matmul(node_reshape, u_z), name=\"z_t\")\n r_t = tf.sigmoid(\n tf.matmul(msg_reshape, w_r) + tf.matmul(node_reshape, u_r), name=\"r_t\")\n h_tilde = tf.tanh(\n tf.matmul(msg_reshape, w) + tf.matmul(tf.multiply(r_t, node_reshape), u),\n name=\"h_tilde\")\n h_t = tf.multiply(1 - z_t, node_reshape) + tf.multiply(z_t, h_tilde)\n h_t_rs = tf.reshape(\n h_t, [batch_size, num_nodes, node_dim], name=\"h_t_rs\")\n\n mask_col = tf.reshape(mask, [batch_size, num_nodes, 1])\n h_t_masked = tf.multiply(\n h_t_rs, mask_col, name=\"mul_h_t_masked\"\n )\n\n h_t_rs = tf.reshape(\n h_t_masked, [batch_size, num_nodes, node_dim], name=\"h_t_rs_again\")\n return h_t_rs\n\ndef message_passing_step(node_rep,\n msg_fn, agg_fn, state_fn,\n params, model_hparams):\n adj_mat = params['adj_mat']\n mask = params['mask']\n if not FLAGS.use_edge_features:\n params['edge_features'] = None\n params['edge_embd'] = None\n message_out = msg_fn(params={\n 'node_states' : node_rep,\n 'is_training' : params['is_training'],\n 'adj_mat' : params['adj_mat'],\n 'edge_feat' : params['edge_features'],\n 'edge_embd' : params['edge_embd'],\n 'mask' : params['mask']\n },\n model_hparams=model_hparams)\n agg_out = message_out\n next_state_output = update_fn(params={\n 'node_states': node_rep,\n 'is_training' : params['is_training'],\n 'agg_msg' : agg_out,\n 'mask': mask\n },\n model_hparams=model_hparams)\n return next_state_output\n\n\nclass GraphNet(object):\n \"\"\"Instantiation of Rev-GNN architecture for graph generation.\"\"\"\n def __init__(self, hparams, params):\n self.hparams = hparams\n self.msg_fn = params['msg_fn'] if 'msg_fn' in params else message_fn\n self.agg_fn = params['agg_fn'] if 'agg_fn' in params else aggn_fn\n self.state_fn = params['update_fn'] if 'update_fn' in params else update_fn\n self.num_steps = self.hparams.num_steps\n self.is_training = params['is_training']\n self.n_edge_types = hparams.edge_dim\n self.n_node_types = hparams.node_dim\n\n if FLAGS.use_edge_features:\n with tf.variable_scope('edge_embd', reuse=tf.AUTO_REUSE):\n self.edge_embd = tf.get_variable(\n 'edge_embd',\n shape=(self.n_edge_types+1,\n hparams.msg_dim, hparams.msg_dim))\n\n def forward(self, node_states, adj_mat, mask, edge_feat=None):\n is_training = self.is_training\n def _f(node_rep):\n with tf.variable_scope('rev_mp/layer_0/b') as scope:\n params = dict()\n params['adj_mat'] = adj_mat\n params['is_training'] = is_training\n params['mask'] = mask\n if FLAGS.use_edge_features:\n params['edge_features'] = edge_feat\n params['edge_embd'] = self.edge_embd\n return message_passing_step(node_rep,\n self.msg_fn,\n self.agg_fn,\n self.state_fn,\n params,\n self.hparams)\n rev_mp = rev_GNN.rev_mp_block_backward(\n node_states[:, :, :(self.hparams.node_dim)],\n node_states[:, :, (self.hparams.node_dim):],\n _f,\n _f,\n self.num_steps, is_training)\n node_states_out = tf.concat([rev_mp[0], rev_mp[1]], axis=2)\n return node_states_out\n\n def jacobian_forward(self, omega, adj_mat, mask=None, edge_feat=None):\n batch_size = tf.shape(omega)[0]\n return tf.zeros([batch_size], tf.float32)\n\n def jacobian_backward(self, omega, adj_mat, mask=None, edge_feat=None):\n batch_size = tf.shape(omega)[0]\n return tf.zeros([batch_size], tf.float32)\n\n def inverse(self, node_states, adj_mat, mask, edge_feat=None):\n is_training = self.is_training\n print ('Node states: ', node_states)\n def _f(node_rep):\n with tf.variable_scope('rev_mp/layer_0/b') as scope:\n params = dict()\n params['adj_mat'] = adj_mat\n params['is_training'] = is_training\n params['mask'] = mask\n if FLAGS.use_edge_features:\n params['edge_features'] = edge_feat\n params['edge_embd'] = self.edge_embd\n return message_passing_step(node_rep,\n self.msg_fn,\n self.agg_fn,\n self.state_fn,\n params,\n self.hparams)\n rev_mp = rev_GNN.rev_mp_block(\n node_states[:, :, :(self.hparams.node_dim)],\n node_states[:, :, (self.hparams.node_dim):],\n _f,\n _f,\n self.num_steps, is_training)\n node_states_out = tf.concat([rev_mp[0], rev_mp[1]], axis=2)\n return node_states_out\n\nclass BatchNormBijector(tf.contrib.distributions.bijectors.BatchNormalization):\n \"\"\"Extended batch norm bijector because of some changes that need to be\n made in the existing batch norm bijector implementation.\"\"\"\n def __init__(self,\n batchnorm_layer=None,\n training=True,\n validate_args=False,\n name=\"batch_normalization\"):\n print (name)\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n g_constraint = lambda x: tf.nn.relu(x) + 1e-6\n batchnorm_layer = tf.layers.BatchNormalization(\n trainable=True,\n gamma_constraint=g_constraint,\n momentum=0.99,\n renorm_momentum=0.99,\n epsilon=1e-4)\n super(BatchNormBijector, self).__init__(batchnorm_layer,\n training,\n validate_args,\n name)\n\n def _forward(self, x):\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\n return self._de_normalize(x)\n\n def _normalize(self, y):\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\n return self.batchnorm.apply(y,\n training=(self._training and (not\n FLAGS.batch_norm_type)))\n\n def _forward_log_det_jacobian(self, x):\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\n return -self._inverse_log_det_jacobian(x, use_saved_statistics=True)\n\n def _get_broadcast_fn(self, x):\n \"\"\"Commenting out the sape fully defined condition, to ensure that\n full shape is not an issue.\"\"\"\n # if not x.shape.is_fully_defined():\n # raise ValueError(\"Input must have shape known at graph construction.\")\n input_shape = tf.shape(x)\n input_shape_length = input_shape.get_shape().as_list()[0]\n\n ndims = input_shape_length\n reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis]\n # Broadcasting only necessary for single-axis batch norm where the axis is\n # not the last dimension\n broadcast_shape = [1] * ndims\n broadcast_shape[self.batchnorm.axis[0]] = (\n input_shape[self.batchnorm.axis[0]])\n def _broadcast(v):\n if (v is not None and\n len(v.get_shape()) != ndims and\n reduction_axes != list(range(ndims - 1))):\n return tf.reshape(v, broadcast_shape)\n return v\n return _broadcast\n\n def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):\n \"\"\"Remove shape checking constraints, and make sure shape checking\n is lazily done at the time of running.\"\"\"\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\n input_shape = tf.shape(y)\n input_shape_length = input_shape.get_shape()[0]\n print ('Input shape length: ', input_shape_length)\n\n if not self.batchnorm.built:\n # Create variables.\n self.batchnorm.build(input_shape)\n\n event_dims = self.batchnorm.axis\n reduction_axes = [i for i in range(input_shape_length)\\\n if i not in event_dims]\n\n if (use_saved_statistics or not self._training) or FLAGS.batch_norm_type:\n log_variance = tf.log(\n self.batchnorm.moving_variance + self.batchnorm.epsilon)\n else:\n # At training-time, ildj is computed from the mean and log-variance\n # across the current minibatch.\n _, v = tf.nn.moments(y, axes=reduction_axes, keep_dims=True)\n log_variance = tf.log(v + self.batchnorm.epsilon)\n # `gamma` and `log Var(y)` reductions over event_dims.\n # Log(total change in area from gamma term).\n log_total_gamma = tf.reduce_sum(tf.log(self.batchnorm.gamma))\n\n # Log(total change in area from log-variance term).\n log_total_variance = tf.reduce_sum(log_variance)\n # The ildj is scalar, as it does not depend on the values of x and are\n # constant across minibatch elements.\n return log_total_gamma - 0.5 * log_total_variance\n\nclass BatchNormBijector2(object):\n \"\"\"Bijector for applying Batch Normalisation.\n\n This class defines functions for running forward and backward passes over the\n batch norm bijector. Update ops were an issue with the regular tf batch norm\n bijector and hence custom BatchNormBijector created.\n\n Attributes:\n vars_created: Whether variables exist, or whether they should be created.\n scope: A tf.variable_scope with appropriate reuse settings to be used for\n defining ops and variables inside it.\n epsilon: A small floating point constant to prevent NaN from arising in\n computations when dividing by variance.\n name: Name of the instantiated object\n training: A bool whether instantiate in train mode or in test mode.\n decay: An float representing the decay factor in the moving mean and\n moving variance update in BatchNorm\n \"\"\"\n\n def __init__(self,\n batchnorm_layer=None,\n training=True,\n validate_args=False,\n name='batch_normalization',\n mask=None):\n \"\"\"Instantiate the batch normb ijector.\"\"\"\n self._vars_created = False\n self._scope = tf.variable_scope(name, reuse=tf.AUTO_REUSE)\n self._epsilon = 1e-4\n self.name = name\n self._training = training\n self._decay = 0.42\n\n def _create_vars(self, x):\n \"\"\"Create variables for this batch norm instance given a sample input.\"\"\"\n n = tf.shape(x).get_shape().as_list()[0]\n n = x.get_shape().as_list()[n - 1]\n with self._scope:\n self.beta = tf.get_variable('beta', [1, n], trainable=True)\n self.gamma = tf.get_variable('gamma', [1, n], trainable=True)\n self.train_m = tf.get_variable(\n 'moving_mean', [1, n],\n initializer=tf.zeros_initializer,\n trainable=False)\n self.train_v = tf.get_variable(\n 'moving_var', [1, n],\n initializer=tf.ones_initializer,\n trainable=False)\n self._vars_created = True\n\n def _forward(self, x, **kwargs):\n \"\"\"Run denormalisation (invert batch norm) for sampling computation.\"\"\"\n if not self._vars_created:\n self._create_vars(x)\n x = tf.Print(\n x, [self.train_m, self.train_v],\n summarize=100,\n message='moving_mean_debug')\n return (x - self.beta) * tf.exp(-self.gamma) *\\\n tf.sqrt(self.train_v + self._epsilon) + self.train_m\n\n def _forward_log_det_jacobian(self, x, **kwargs):\n \"\"\"Return log of the determinant of the jacobian equal to the negative of\n the inverse jacobian's log detetminant.\"\"\"\n return -self._inverse_log_det_jacobian(x, **kwargs)\n\n def _inverse(self, x, use_saved_statistics=False, update_vars=False,\n **kwargs):\n \"\"\"Apply BN in the forward direction during training.\"\"\"\n if not self._vars_created:\n self._create_vars(x)\n _mask = kwargs['mask']\n input_shape = tf.shape(x)\n input_shape_length = input_shape.get_shape()[0]\n print('Input shape length: ', input_shape_length)\n\n reduction_axes = [ax_num for ax_num in range(input_shape_length - 1)]\n # At train time, use the current minibatch moments, at test time use the\n # moments from the moving average.\n if (use_saved_statistics or not self._training) or True:\n return (x - self.train_m) * 1. / tf.sqrt(self.train_v + self._epsilon) *\\\n tf.exp(self.gamma) + self.beta\n else:\n m, v = _masked_moments(\n x, axes=reduction_axes, mask=_mask, keep_dims=False)\n print('v in inverse: ', v)\n # Right now an exponential moving average is used. We could also\n # try something different.\n if update_vars:\n with self._scope:\n update_train_m = tf.assign_sub(\n self.train_m,\n self._decay * (self.train_m - m),\n use_locking=True,\n name='update_m')\n update_train_v = tf.assign_sub(\n self.train_v,\n self._decay * (self.train_v - v),\n use_locking=True,\n name='update_v')\n #tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_train_v)\n #tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_train_m)\n with tf.control_dependencies([update_train_m, update_train_v]):\n return (x - m)*1.0/tf.sqrt(v + self._epsilon)*tf.exp(self.gamma) +\\\n self.beta\n else:\n return (x - m)*1.0/tf.sqrt(v + self._epsilon)*tf.exp(self.gamma) +\\\n self.beta\n\n def _inverse_log_det_jacobian(self, x, **kwargs):\n \"\"\"Compute the log determinant of the jacobian in the inverse pass.\"\"\"\n input_shape = tf.shape(x)\n input_shape_length = input_shape.get_shape()[0]\n print('Input shape length: ', input_shape_length)\n _mask = kwargs['mask']\n\n reduction_axes = [ax_num for ax_num in range(input_shape_length - 1)]\n\n if not self._vars_created:\n self._create_vars(x)\n if self._training and False:\n _, v = _masked_moments(\n x, axes=reduction_axes, mask=_mask, keep_dims=False)\n v = tf.Print(v, [v], summarize=10, message='current-' + self.name)\n print('v: ', v)\n else:\n v = self.train_v\n v = tf.Print(\n v, [self.train_v], summarize=10, message='moving_' + self.name)\n log_det_jacobian = tf.reduce_sum(self.gamma) -\\\n 0.5*tf.reduce_sum(tf.log(v + self._epsilon))\n return log_det_jacobian\n\n\nclass GraphCouplingBijector(object):\n \"\"\"Coupling operation for graph based operations; where reversible GNNs are\n applied on top of models.\"\"\"\n def __init__(self, gnn, adj_fn,\n event_ndims=0,\n validate_args=False,\n name=\"graph_coupling_bijector\",\n hparams=None,\n params = None,\n is_training=True):\n assert hparams is not None\n assert params is not None\n self.graph_net = gnn if gnn is not None else gnn\n\n self.graph_net = GraphNet(hparams, params)\n self.adj_fn = adj_fn\n self.num_steps = hparams.num_steps\n self.name = name\n self.is_training = is_training\n\n def _forward(self, x, **kwargs):\n z, omega = x\n adj_mat = self.adj_fn(z)\n edge_feat = None\n if FLAGS.use_edge_features:\n assert 'edge_feat' in kwargs\n edge_feat = kwargs['edge_feat']\n # Takes as input the node features and adjacency matrix and does the\n # reversible message passing computation here.\n with tf.variable_scope(\"{name}\".format(name=self.name), reuse=tf.AUTO_REUSE):\n omega_new = self.graph_net.forward(omega, adj_mat,\n kwargs['mask'], edge_feat)\n return (z, omega_new)\n\n def _forward_log_det_jacobian(self, x, **kwargs):\n z, omega = x\n mask = kwargs['mask']\n adj_mat = self.adj_fn(z)\n edge_feat = None\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n batch_size = tf.shape(z)[0]\n sum_log_det_jacobian = tf.zeros([batch_size])\n sum_log_det_jacobian += self.graph_net.jacobian_forward(omega, adj_mat,\n mask=mask,\n edge_feat=edge_feat)\n return sum_log_det_jacobian\n\n def _inverse(self, y, **kwargs):\n z, omega = y\n edge_feat = None\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n adj_mat = self.adj_fn(z)\n with tf.variable_scope(\"{name}\".format(name=self.name), reuse=tf.AUTO_REUSE):\n omega_old = self.graph_net.inverse(omega, adj_mat,\n kwargs['mask'], edge_feat)\n return (z, omega_old)\n\n def _inverse_log_det_jacobian(self, y, **kwargs):\n z, omega = y\n mask = kwargs['mask']\n edge_feat = None\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n batch_size = tf.shape(z)[0]\n sum_log_det_jacobian = tf.zeros([batch_size])\n sum_log_det_jacobian += self.graph_net.jacobian_backward(omega, z,\n mask, edge_feat)\n return sum_log_det_jacobian\n\nclass CouplingBijector(object):\n def __init__(self, translation_fn, scale_fn,\n event_ndims=0,\n validate_args=False,\n name=\"coupling_bijector\",\n is_training=True):\n self.translation_fn = translation_fn\n self.scale_fn = scale_fn\n self.name = name\n self.is_training = is_training\n self.n_edge_types = 5 ## Hardcoded for now\n if FLAGS.only_nodes:\n self.masking_translation = 0.0\n else:\n self.masking_translation = 1.0\n\n def _forward(self, x, **kwargs):\n z, omega = x\n mask = kwargs['mask']\n mask = tf.multiply(tf.expand_dims(mask, 2),\n tf.transpose(tf.expand_dims(mask, 2), (0, 2, 1)))\n z_dims = tf.shape(z)[2]\n omega_new = omega\n\n with tf.variable_scope(\"{name}/scale\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n scale_omega = self.scale_fn(omega, z_dims)\n\n with tf.variable_scope(\"{name}/translation\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n translation_omega = self.translation_fn(omega, z_dims)\n exp_scale = tf.check_numerics(tf.exp(scale_omega*mask),\n \" tf.exp(scale) is not numerically stable\")\n if FLAGS.use_edge_features:\n z_update = translation_omega[0]\n edge_update = translation_omega[1]\n else:\n z_update = translation_omega\n z_new = (z*exp_scale +\\\n self.masking_translation*z_update*mask)\n\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n temp_mask = tf.expand_dims(mask, 3)\n # like geometric mean of the edge features\n if FLAGS.use_scaling and FLAGS.share_scaling:\n edge_feat = FLAGS.lambda_combiner * edge_feat *\\\n tf.expand_dims(exp_scale, 3) +\\\n (1.0 - FLAGS.lambda_combiner)*edge_update * temp_mask\n else:\n edge_feat = FLAGS.lambda_combiner*edge_feat +\\\n (1.0 - FLAGS.lambda_combiner)*edge_update* temp_mask\n return (z_new, omega_new), edge_feat\n return (z_new, omega_new)\n\n def _forward_log_det_jacobian(self, x, **kwargs):\n z, omega = x\n mask = kwargs['mask']\n mask = tf.expand_dims(mask, 2)\n mask = tf.multiply(mask, tf.transpose(mask, (0, 2, 1)))\n\n z_dims = tf.shape(z)[2]\n with tf.variable_scope(\"{name}/scale\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n scale_omega = self.scale_fn(omega, z_dims)\n log_det_jacobian = tf.reduce_sum(scale_omega*mask, axis=[1,2])\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n if FLAGS.use_scaling and FLAGS.share_scaling:\n log_det_jacobian += FLAGS.lambda_combiner*(self.n_edge_types+1)*\\\n tf.reduce_sum(scale_omega*mask, axis=tuple(range(1, len(z.shape))))\n else:\n log_det_jacobian += FLAGS.lambda_combiner*tf.reduce_sum(\n tf.ones_like(edge_feat)*tf.expand_dims(mask, 3), axis=[1, 2, 3])\n return log_det_jacobian\n\n def _inverse(self, y, **kwargs):\n z_new, omega_new = y\n mask = kwargs['mask']\n mask = tf.expand_dims(mask, 2)\n mask = tf.multiply(mask, tf.transpose(mask, (0, 2, 1)))\n z_dims = tf.shape(z_new)[2]\n omega = omega_new\n\n with tf.variable_scope(\"{name}/scale\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n scale_omega = self.scale_fn(omega, z_dims)\n\n with tf.variable_scope(\"{name}/translation\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n translation_omega = self.translation_fn(omega, z_dims)\n exp_scale = tf.check_numerics(tf.exp(-scale_omega*mask),\n \" tf.exp(-scale) is not numerically stable\")\n if FLAGS.use_edge_features:\n z_update = translation_omega[0]\n edge_update = translation_omega[1]\n else:\n z_update = translation_omega\n\n z = (z_new - self.masking_translation * z_update * mask)* exp_scale\n\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n temp_mask = tf.expand_dims(mask, 3)\n edge_feat = (edge_feat -\\\n (1.0 - FLAGS.lambda_combiner)*edge_update*temp_mask)\n if FLAGS.use_scaling and FLAGS.share_scaling:\n edge_feat = edge_feat*1.0/FLAGS.lambda_combiner*\\\n tf.expand_dims(exp_scale, 3)\n else:\n edge_feat = edge_feat*1.0/FLAGS.lambda_combiner\n return (z, omega), edge_feat\n\n return (z, omega)\n\n def _inverse_log_det_jacobian(self, y, **kwargs):\n z_new, omega_new = y\n mask = kwargs['mask']\n mask = tf.expand_dims(mask, 2)\n mask = tf.multiply(mask, tf.transpose(mask, (0, 2, 1)))\n z_dims = tf.shape(z_new)[2]\n with tf.variable_scope(\"{name}/scale\".format(name=self.name),\n reuse=tf.AUTO_REUSE):\n scale_omega = self.scale_fn(omega_new, z_dims)\n log_det_jacobian = -tf.reduce_sum(scale_omega*mask,\n axis=tuple(range(1, len(z_new.shape))))\n if FLAGS.use_edge_features:\n edge_feat = kwargs['edge_feat']\n if FLAGS.use_scaling and FLAGS.share_scaling:\n log_det_jacobian -= FLAGS.lambda_combiner*(self.n_edge_types+1)*\\\n tf.reduce_sum(scale_omega*mask, axis=tuple(range(1, len(z.shape))))\n else:\n log_det_jacobian -= FLAGS.lambda_combiner*tf.reduce_sum(\n tf.ones_like(edge_feat), axis=[1, 2, 3])\n return log_det_jacobian\n\n\nclass RealNVP(object):\n\n def __init__(self,\n num_coupling_layers=2,\n event_ndims=0,\n name='real-nvp',\n variable_sharing=True):\n \"\"\"Instantiate a realNVP bijector for graph generation.\"\"\"\n self.num_coupling_layers = num_coupling_layers\n self.variable_sharing = variable_sharing\n self.name = name\n\n def build(self, params, hparams, adj_fn, translate_fn, scale_fn,\n is_training=None):\n num_coupling_layers = self.num_coupling_layers\n self.layers_z = [CouplingBijector(name=\"coupling_{i}_z\".format(i=i),\n translation_fn=translate_fn,\n scale_fn=scale_fn,\n is_training=is_training)\n for i in range(0, num_coupling_layers)]\n\n self.layers_omega = [GraphCouplingBijector(name=\"graph_{i}_h\".format(i=i),\n gnn=None,\n adj_fn=adj_fn,\n hparams=hparams,\n params = params,\n is_training=is_training)\n for i in range(0, num_coupling_layers)]\n\n # To use batch norm, we directly use batch norm bijector\n # We use two batch norm bijectors for the z and omega, because of the\n # different ways of applying batch norm to each of them\n self.layers_batch_norm_z = [\n BatchNormBijector(\n batchnorm_layer=None,\n training=is_training,\n validate_args=False,\n name='batch-norm-z-{i}'.format(i=i))\n for i in range(0, num_coupling_layers)]\n\n self.layers_batch_norm_h = [\n BatchNormBijector(\n batchnorm_layer=None,\n training=is_training,\n validate_args=False,\n name='batch-norm-h-{i}'.format(i=i))\n for i in range(0, num_coupling_layers)]\n\n self.layers_batch_norm_e = [\n BatchNormBijector(\n batchnorm_layer=None,\n training=is_training,\n validate_args=False,\n name='batch-norm-e-{i}'.format(i=i))\n for i in range(0, num_coupling_layers)]\n\n self.layers_h = self.layers_omega\n print ('RealNVP build finished....')\n\n def _forward(self, x, **kwargs):\n out = x\n # As we apply BN in the opposite direction, the first thing to do is to\n # invert Z using BN on the inputs, omega_0 is already normalised. Then move\n # ahead to get Z_0.5 which is expected to be normalised as omega_0 was. Now,\n # we apply the graph propagation onto omega_0.5 andZ_0.5 both of which are\n # normalised, so shouldn't shoot up by the end.\n for idx, (layer_z, layer_h) in enumerate(zip(self.layers_z,\n self.layers_omega)):\n z, omega = out\n if not FLAGS.only_nodes:\n z = self.layers_batch_norm_z[idx]._forward(tf.expand_dims(z, 3))\n z = tf.squeeze(z, 3)\n if FLAGS.use_edge_features:\n edge_feat = self.layers_batch_norm_e[idx]._forward(kwargs['edge_feat'])\n out = (z, omega)\n\n if not FLAGS.use_edge_features:\n out = layer_z._forward(out, **kwargs)\n else:\n kwargs['edge_feat'] = edge_feat\n out, edge_feat = layer_z._forward(out, **kwargs)\n kwargs['edge_feat'] = edge_feat\n\n z, omega = out\n if FLAGS.use_BN:\n omega = self.layers_batch_norm_h[idx]._forward(omega)\n out = (z, omega)\n\n out = layer_h._forward(out, **kwargs)\n\n if FLAGS.sample:\n return out, kwargs['edge_feat']\n return out\n\n def _forward_log_det_jacobian(self, x, **kwargs):\n z, omega = x\n sum_log_det_jacobian = 0\n out = x\n\n for idx, (layer_z, layer_h) in enumerate(zip(self.layers_z,\n self.layers_omega)):\n # BN on Z\n if not FLAGS.only_nodes:\n z, omega = out\n sum_log_det_jacobian += self.layers_batch_norm_z[idx].forward_log_det_jacobian(\n tf.expand_dims(z, 3),\n event_ndims=1)\n z = self.layers_batch_norm_z[idx]._forward(tf.expand_dims(z, 3))\n z = tf.squeeze(z, 3)\n if FLAGS.use_edge_features:\n sum_log_det_jacobian += self.layers_batch_norm_e[idx].forward_log_det_jacobian(\n kwargs['edge_feat'],\n event_ndims=1)\n edge_feat = self.layers_batch_norm_e[idx]._forward(kwargs['edge_feat'])\n out = (z, omega)\n\n # Z fprop\n sum_log_det_jacobian += layer_z._forward_log_det_jacobian(out, **kwargs)\n if FLAGS.use_edge_features:\n kwargs['edge_feat'] = edge_feat\n out, edge_feat = layer_z._forward(out, **kwargs)\n kwargs['edge_feat'] = edge_feat\n else:\n out = layer_z._forward(out, **kwargs)\n # BN on omega\n print ('KWARGS here: ', kwargs)\n z, omega = out\n if FLAGS.use_BN:\n sum_log_det_jacobian += self.layers_batch_norm_h[idx].forward_log_det_jacobian(\n omega,\n event_ndims=1)\n omega = self.layers_batch_norm_h[idx]._forward(omega)\n out = z, omega\n\n # omega fprop\n sum_log_det_jacobian += layer_h._forward_log_det_jacobian(out, **kwargs)\n out = layer_h._forward(out, **kwargs)\n\n return sum_log_det_jacobian\n\n def _inverse(self, y, **kwargs):\n z, omega = y\n self.layers_batch_norm_h.reverse()\n self.layers_batch_norm_z.reverse()\n self.layers_batch_norm_e.reverse()\n\n for idx, (layer_z, layer_h) in enumerate(zip(reversed(self.layers_z),\n reversed(self.layers_h))):\n z, omega = layer_h._inverse((z, omega), **kwargs)\n if FLAGS.use_BN:\n omega = self.layers_batch_norm_h[idx]._inverse(omega)\n if FLAGS.use_edge_features:\n (z, omega), edge_feat = layer_z._inverse((z, omega), **kwargs)\n kwargs['edge_feat'] = edge_feat\n else:\n z, omega = layer_z._inverse((z, omega), **kwargs)\n if not FLAGS.only_nodes:\n if FLAGS.use_edge_features:\n edge_feat = self.layers_batch_norm_e[idx]._inverse(kwargs['edge_feat'])\n kwargs['edge_feat'] = edge_feat\n z = self.layers_batch_norm_z[idx]._inverse(tf.expand_dims(z, 3))\n z = tf.squeeze(z, 3)\n\n self.layers_batch_norm_h.reverse()\n self.layers_batch_norm_z.reverse()\n self.layers_batch_norm_e.reverse()\n if FLAGS.use_edge_features:\n return z, omega, edge_feat\n return z, omega\n\n def _inverse_log_det_jacobian(self, y, **kwargs):\n \"Called during training -- Check twice for correctness.\"\n z, omega = y\n out = y\n sum_log_det_jacobian = 0\n self.layers_batch_norm_h.reverse()\n self.layers_batch_norm_z.reverse()\n self.layers_batch_norm_e.reverse()\n\n for idx, (layer_z, layer_h) in enumerate(zip(reversed(self.layers_z),\n reversed(self.layers_h))):\n z, omega = out\n # omega_layer bprop\n sum_log_det_jacobian += layer_h._inverse_log_det_jacobian(out, **kwargs)\n out = layer_h._inverse(out, **kwargs)\n\n # omega-bn apply\n if FLAGS.use_BN:\n sum_log_det_jacobian += self.layers_batch_norm_h[idx].inverse_log_det_jacobian(\n omega,\n event_ndims=1)\n omega = self.layers_batch_norm_h[idx]._inverse(omega)\n out = (z, omega)\n # Z backprop\n sum_log_det_jacobian += layer_z._inverse_log_det_jacobian(out, **kwargs)\n if FLAGS.use_edge_features:\n out, edge_feat = layer_z._inverse(out, **kwargs)\n kwargs['edge_feat'] = edge_feat\n else:\n out = layer_z._inverse(out, **kwargs)\n\n # z-bn apply\n z, omega = out\n if not FLAGS.only_nodes:\n sum_log_det_jacobian += self.layers_batch_norm_z[idx].inverse_log_det_jacobian(\n tf.expand_dims(z, 3),\n event_ndims=1)\n z = self.layers_batch_norm_z[idx]._inverse(tf.expand_dims(z, 3))\n z = tf.squeeze(z, 3)\n if FLAGS.use_edge_features:\n sum_log_det_jacobian += self.layers_batch_norm_e[idx].inverse_log_det_jacobian(\n kwargs['edge_feat'],\n event_ndims=1)\n edge_feat = self.layers_batch_norm_e[idx]._inverse(kwargs['edge_feat'])\n kwargs['edge_feat'] = edge_feat\n out = (z, omega)\n\n self.layers_batch_norm_h.reverse()\n self.layers_batch_norm_z.reverse()\n self.layers_batch_norm_e.reverse()\n return sum_log_det_jacobian\n\n\ndef real_nvp_block(fn_params, num_layers):\n \"\"\"Instantiate a Real NVP block for the graph generation process.\"\"\"\n num_coupling_layers = params['num_coupling_layers']\n real_nvp = RealNVP(num_coupling_layers, name='real-nvp')\n return real_nvp\n\ndef real_nvp_model_fn(real_nvp_model, z,\n omega, input_dist_fn, is_training=True, **kwargs):\n \"\"\"Get the Real NVP density for corresponding arguments.\n Args:\n z: a batch of test sampled, adjacency matrix\n omega: a batch of test sampled, node features\n \"\"\"\n print (kwargs)\n old_kwargs = copy.copy(kwargs)\n\n if FLAGS.use_edge_features:\n input_z, input_omega, edge_feat = real_nvp_model._inverse((z, omega),\n **kwargs)\n kwargs['edge_feat'] = edge_feat\n else:\n input_z, input_omega = real_nvp_model._inverse((z, omega), **kwargs)\n log_prior_prob = input_dist_fn((input_z, input_omega), is_training, **kwargs)\n log_det_jacobian = real_nvp_model._inverse_log_det_jacobian((z,\n omega),\n **old_kwargs)\n log_posterior_prob = log_prior_prob + log_det_jacobian\n log_posterior_prob = tf.Print(log_posterior_prob, [log_prior_prob,\n log_det_jacobian],\n summarize=100,\n message='Log Priors')\n\n return log_posterior_prob\n\ndef real_nvp_sample_fn(real_nvp_model, z_in, omega_in, input_dist_fn, **kwargs):\n \"\"\"Start from the inputs and get the output sample.\"\"\"\n print (kwargs)\n log_prior_prob = input_dist_fn((z_in, omega_in), **kwargs)\n out, edge_feat = real_nvp_model._forward((z_in, omega_in), **kwargs)\n log_det_jacobian = real_nvp_model._forward_log_det_jacobian((z_in, omega_in),\n **kwargs)\n log_posterior_prob = log_prior_prob - log_det_jacobian\n log_posterior_prob = tf.Print(log_posterior_prob, [log_posterior_prob,\n log_prior_prob,\n log_det_jacobian],\n summarize=100,\n message='Log Priors')\n return log_posterior_prob, out, edge_feat\n\n\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.layers.fully_connected", "tensorflow.reduce_sum", "tensorflow.nn.moments", "tensorflow.squeeze", "tensorflow.matmul", "tensorflow.Print", "tensorflow.nn.sigmoid", "tensorflow.shape", "tensorflow.exp", "tensorflow.nn.relu", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.nn.softmax", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.layers.BatchNormalization", "tensorflow.log", "tensorflow.assign_sub", "tensorflow.variable_scope", "tensorflow.sqrt" ] ]
zysszy/Recoder
[ "f57736db376b6b24e755fcfbd1ce232c52aa31af" ]
[ "ConvolutionForward.py" ]
[ "import torch.nn as nn\nfrom gelu import GELU\nclass ConvolutionLayer(nn.Module):\n def __init__(self, dmodel, layernum, kernelsize=3, dropout=0.1):\n super(ConvolutionLayer, self).__init__()\n self.conv1 = nn.Conv1d(dmodel, layernum, kernelsize, padding=(kernelsize-1)//2)\n self.conv2 = nn.Conv1d(dmodel, layernum, kernelsize, padding=(kernelsize-1)//2)\n self.activation = GELU()\n self.dropout = nn.Dropout(dropout)\n def forward(self, x, mask):\n convx = self.conv1(x.permute(0, 2, 1))\n convx = self.conv2(convx)\n out = self.dropout(self.activation(convx.permute(0, 2, 1)))\n return out#self.dropout(self.activation(self.conv1(self.conv2(x))))\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv1d" ] ]
aydevosotros/pandas
[ "9444dce96954c546333d5aecc92a06c3bfd19aa5" ]
[ "pandas/tests/series/indexing/test_loc.py" ]
[ "# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import (Series, Timestamp)\n\nfrom pandas.compat import lrange\nfrom pandas.util.testing import (assert_series_equal)\n\n\ndef test_loc_getitem(test_data):\n inds = test_data.series.index[[3, 4, 7]]\n assert_series_equal(\n test_data.series.loc[inds],\n test_data.series.reindex(inds))\n assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])\n\n # slice with indices\n d1, d2 = test_data.ts.index[[5, 15]]\n result = test_data.ts.loc[d1:d2]\n expected = test_data.ts.truncate(d1, d2)\n assert_series_equal(result, expected)\n\n # boolean\n mask = test_data.series > test_data.series.median()\n assert_series_equal(test_data.series.loc[mask], test_data.series[mask])\n\n # ask for index value\n assert test_data.ts.loc[d1] == test_data.ts[d1]\n assert test_data.ts.loc[d2] == test_data.ts[d2]\n\n\ndef test_loc_getitem_not_monotonic(test_data):\n d1, d2 = test_data.ts.index[[5, 15]]\n\n ts2 = test_data.ts[::2][[1, 2, 0]]\n\n pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))\n pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)\n\n\ndef test_loc_getitem_setitem_integer_slice_keyerrors():\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # this is OK\n cp = s.copy()\n cp.iloc[4:10] = 0\n assert (cp.iloc[4:10] == 0).all()\n\n # so is this\n cp = s.copy()\n cp.iloc[3:11] = 0\n assert (cp.iloc[3:11] == 0).values.all()\n\n result = s.iloc[2:6]\n result2 = s.loc[3:11]\n expected = s.reindex([4, 6, 8, 10])\n\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]\n pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))\n pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)\n\n\ndef test_loc_getitem_iterator(test_data):\n idx = iter(test_data.series.index[:10])\n result = test_data.series.loc[idx]\n assert_series_equal(result, test_data.series[:10])\n\n\ndef test_loc_setitem_boolean(test_data):\n mask = test_data.series > test_data.series.median()\n\n result = test_data.series.copy()\n result.loc[mask] = 0\n expected = test_data.series\n expected[mask] = 0\n assert_series_equal(result, expected)\n\n\ndef test_loc_setitem_corner(test_data):\n inds = list(test_data.series.index[[5, 8, 12]])\n test_data.series.loc[inds] = 5\n pytest.raises(Exception, test_data.series.loc.__setitem__,\n inds + ['foo'], 5)\n\n\ndef test_basic_setitem_with_labels(test_data):\n indices = test_data.ts.index[[5, 10, 15]]\n\n cp = test_data.ts.copy()\n exp = test_data.ts.copy()\n cp[indices] = 0\n exp.loc[indices] = 0\n assert_series_equal(cp, exp)\n\n cp = test_data.ts.copy()\n exp = test_data.ts.copy()\n cp[indices[0]:indices[2]] = 0\n exp.loc[indices[0]:indices[2]] = 0\n assert_series_equal(cp, exp)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 4, 6]\n arr_inds = np.array([0, 4, 6])\n\n cp = s.copy()\n exp = s.copy()\n s[inds] = 0\n s.loc[inds] = 0\n assert_series_equal(cp, exp)\n\n cp = s.copy()\n exp = s.copy()\n s[arr_inds] = 0\n s.loc[arr_inds] = 0\n assert_series_equal(cp, exp)\n\n inds_notfound = [0, 4, 5, 6]\n arr_inds_notfound = np.array([0, 4, 5, 6])\n pytest.raises(Exception, s.__setitem__, inds_notfound, 0)\n pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)\n\n # GH12089\n # with tz for values\n s = Series(pd.date_range(\"2011-01-01\", periods=3, tz=\"US/Eastern\"),\n index=['a', 'b', 'c'])\n s2 = s.copy()\n expected = Timestamp('2011-01-03', tz='US/Eastern')\n s2.loc['a'] = expected\n result = s2.loc['a']\n assert result == expected\n\n s2 = s.copy()\n s2.iloc[0] = expected\n result = s2.iloc[0]\n assert result == expected\n\n s2 = s.copy()\n s2['a'] = expected\n result = s2['a']\n assert result == expected\n" ]
[ [ "pandas.Timestamp", "pandas.util.testing.assert_series_equal", "numpy.random.randn", "pandas.date_range", "numpy.array", "pandas.compat.lrange" ] ]
SymmetricChaos/FiniteFields
[ "65258e06b7f04ce15223c1bc0c2384ef5e9cec1a" ]
[ "Visualization/FourierSquareWaveVisualization.py" ]
[ "from Fourier import Fourier, evaluate_series\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nx = np.linspace(-5,5,150)\npi = np.pi\nS = Fourier([0],[0],[0])\nfor i in [1,3,5]:\n A = Fourier([4/(pi*i)],[i])\n plt.plot(x,evaluate_series(A,x),color='gray',linestyle=\":\")\n S += A\n\nyS = evaluate_series(S,x)\nplt.plot(x,yS,color='black')\n\n\n\nfig = plt.figure()\nx = np.linspace(-5,5,150)\npi = np.pi\nS = Fourier([0],[0],[0])\nfor i in [1,2,3]:\n A = Fourier([4/(pi*i)],[i])\n plt.plot(x,evaluate_series(A,x),color='gray',linestyle=\":\")\n S += A\n\nyS = evaluate_series(S,x)\nplt.plot(x,yS,color='black')" ]
[ [ "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.figure" ] ]
sergio1221/flask-backend
[ "11a9e0db5b5e664fcc820919d97039738176ac62" ]
[ "apps/fithm-service/libs/database/trade.py" ]
[ "from flask import current_app\r\nfrom libs.database import db_session\r\nfrom .portfolios import get_portfolios\r\nfrom apps.models import (\r\n Trade, Business, Portfolio, Pending, \r\n AccountPosition, Price, Account, Model,\r\n TradeRequest\r\n)\r\nimport pandas as pd\r\nfrom iexfinance.stocks import Stock\r\nfrom datetime import datetime\r\n\r\n# def remove_all_pending_positions(trade: Trade):\r\n\r\n# db_session.query(Price).filter(Price.trade_id == trade.id).delete(False)\r\n\r\n# pending_ids = [p.id for p in trade.pendings]\r\n# db_session.query(AccountPosition).filter(AccountPosition.pending_id.in_(pending_ids)).delete(False)\r\n# db_session.commit()\r\n\r\n\r\n# def update_account_positions(trade: Trade, positions: list[AccountPosition]):\r\n\r\n# if not len(positions):\r\n# return remove_all_pending_positions(trade)\r\n\r\n# new_positions, pending_positions = prepare_update(trade, positions)\r\n# if new_positions != None:\r\n# create_pending_account_positions(trade, new_positions, pending_positions)\r\n# remove_pending_account_positions(new_positions, pending_positions)\r\n\r\n\r\ndef get_trade_prices(trade: Trade, use: str = 'read'):\r\n\r\n prices = trade.prices\r\n if len(prices) == 0:\r\n return None\r\n df_prices = pd.DataFrame(prices)\r\n df_prices.columns = ['price_object']\r\n df_price_details = pd.DataFrame([vars(p) for p in prices])\r\n if use == \"read\":\r\n obj_detail = pd.concat([df_prices, df_price_details], axis=1).drop_duplicates(\r\n subset=['symbol']\r\n )\r\n else:\r\n obj_detail = pd.concat([df_prices, df_price_details], axis=1)\r\n\r\n # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\r\n # print(obj_detail)\r\n return obj_detail\r\n\r\n\r\ndef update_trade_prices(trade: Trade, prices = None):\r\n\r\n if prices is None:\r\n prices = get_iex(trade)\r\n price_unique = [dict(t) for t in {tuple(d.items()) for d in prices}]\r\n ext_symbols = list(set([a['symbol'] for a in prices]))\r\n if len(price_unique) != len(ext_symbols):\r\n return 'You have attempted to use different prices for the same security.'\r\n\r\n current_prices = get_trade_prices(trade, use=\"write\")\r\n\r\n df_price_unique = pd.DataFrame(price_unique).set_index(['symbol'])\r\n current_prices = current_prices[\r\n current_prices['symbol'].isin([p['symbol'] for p in price_unique])\r\n ].drop('price', axis=1).set_index(['symbol'])\r\n current_prices['new_price'] = df_price_unique['price']\r\n\r\n def assign_it(row):\r\n row['price_object'].price = row['new_price']\r\n\r\n current_prices.apply(assign_it, axis=1)\r\n db_session.bulk_save_objects(current_prices['price_object'].tolist())\r\n db_session.commit()\r\n\r\n\r\ndef get_requests(trade: Trade, args: dict):\r\n\r\n return []\r\n \r\n # # get all symbols from all models in trade\r\n # pendings: list[Pending] = trade.pendings\r\n # pending_ids = [p.id for p in pendings]\r\n # positions = db_session.query(AccountPosition).filter(AccountPosition.pending_id.in_(pending_ids)).all()\r\n # portfolio_ids = [p.portfolio_id for p in pendings]\r\n # portfolio_objects = get_portfolios(portfolio_ids)\r\n\r\n # if any([True if p.model_id is None else False for p in portfolio_objects]):\r\n # return ValueError('One of your portfolios has not been assigned a model.')\r\n\r\n # model_ids = [p.model_id for p in portfolio_objects]\r\n # model_objects = db_session.query(Model).filter(Model.id.in_(model_ids)).all()\r\n # # add portfolio_id to each model position\r\n # for p in portfolio_objects:\r\n # for m in model_objects:\r\n # for a in m.allocation:\r\n # if p.model_id == m.id:\r\n # a.portfolio_id = p.id\r\n # model_allocations = [m.allocation for m in model_objects]\r\n # all_model_symbols = []\r\n # for allocation in model_allocations:\r\n # model_symbols = [m for m in allocation]\r\n # for m in model_symbols:\r\n # all_model_symbols.append(m)\r\n\r\n # df_model_positions = pd.DataFrame([vars(m) for m in all_model_symbols])\r\n # df_model_positions.drop(['_sa_instance_state'], inplace=True, axis=1)\r\n # df_model_positions.set_index(['portfolio_id'])\r\n\r\n # df_account_positions = pd.DataFrame([vars(p) for p in positions])\r\n # df_account_positions.drop(['_sa_instance_state'], inplace=True, axis=1)\r\n # df_account_positions.set_index(['portfolio_id'])\r\n\r\n # df_all_positions = pd.concat([df_model_positions, df_account_positions])\r\n # df_all_positions.set_index(['symbol'], inplace=True)\r\n\r\n # prices = trade.prices\r\n # df_prices = pd.DataFrame([vars(p) for p in prices]).drop_duplicates(subset=['symbol'])\r\n # df_prices.drop(['_sa_instance_state'], inplace=True, axis=1)\r\n # df_prices.set_index(['symbol'], inplace=True)\r\n\r\n # df_all_positions['price'] = df_prices['price']\r\n # df_all_positions['restrictions'] = float('NaN')\r\n # df_all_positions['trade_id'] = trade.id\r\n # df_all_positions.rename(columns={'weight': 'model_weight'}, inplace=True)\r\n # df_all_positions.reset_index(inplace=True)\r\n # df_all_positions.account_number.fillna('model', inplace=True)\r\n\r\n # all_requests = []\r\n # for i, port in df_all_positions.groupby('portfolio_id'):\r\n # trade_request_obj = {'portfolio_id': i}\r\n # trade_request_obj['portfolio'] = df_all_positions.loc[:,\r\n # ['account_number', 'symbol', 'shares', 'model_weight', 'price', 'restrictions']\r\n # ].to_dict('list')\r\n # all_requests.append(trade_request_obj)\r\n # if 'send' in args:\r\n # df_all_positions['archive'] = df_all_positions.apply(\r\n # lambda row: TradeRequest(\r\n # created=datetime.utcnow(),\r\n # trade_id=trade.id,\r\n # portfolio_id=row['portfolio_id'],\r\n # account_id=row['account_id'],\r\n # account_number=row['account_number'],\r\n # broker_name=row['broker_name'],\r\n # symbol=row['symbol'],\r\n # shares=row['shares'],\r\n # model_weight=row['model_weight'],\r\n # price=row['price'],\r\n # restrictions=float('NaN')), axis=1)\r\n # db_session.bulk_save_objects(df_all_positions.archive.tolist())\r\n # db_session.commit()\r\n # all_trades = []\r\n # # for t in all_requests:\r\n # # trade_manager = TradeManager('json', t['portfolio'])\r\n # # all_trades.append(trade_manager.trade_instructions.to_dict(orient='records'))\r\n # return all_trades\r\n # else:\r\n # return all_requests\r\n\r\n\r\ndef get_iex(trade: Trade):\r\n\r\n current = get_trade_prices(trade)['symbol']\r\n account_cash = current.loc[current == 'account_cash'].any()\r\n current = current.loc[current != 'account_cash']\r\n\r\n if current.empty:\r\n if account_cash:\r\n return [{'price': 1, 'symbol': 'account_cash'}]\r\n return []\r\n\r\n batch = Stock(current.tolist())\r\n prices = batch.get_price()\r\n if type(prices) == float:\r\n result = [{'symbol': current.tolist()[0], 'price': prices}]\r\n else:\r\n result = [{'price': v, 'symbol': i} for i, v in prices.items()]\r\n\r\n if account_cash:\r\n result.append({'price': 1, 'symbol': 'account_cash'})\r\n\r\n return result\r\n\r\n\r\n# def prepare_update(trade: Trade, positions: list[AccountPosition]):\r\n\r\n# pendings: list[Pending] = trade.pendings\r\n# if not len(pendings):\r\n# current_app.logger.error('You cannot add positions until portfolios have been added to the trade.')\r\n# return (None, None)\r\n\r\n# pending_portfolios: list[Portfolio] = [p.portfolio for p in pendings]\r\n# pending_accounts: list[list[Account]] = [a.accounts for a in pending_portfolios]\r\n\r\n# pending_details = pd.DataFrame(\r\n# [vars(p) for p in pendings]\r\n# ).set_index(['portfolio_id'])\r\n# pending_account_details = pd.DataFrame(\r\n# [vars(pos) for ap in pending_accounts for pos in ap]\r\n# ).set_index(['portfolio_id'])\r\n\r\n# pending_account_details['pending_id'] = pending_details['id']\r\n# pending_account_details.reset_index(inplace=True)\r\n\r\n# # check for positions being loaded that are not associated with any account in the trade\r\n# pending_account_details.set_index(['broker_name', 'account_number'], inplace=True)\r\n# new_position_details = pd.DataFrame(positions).groupby(\r\n# ['broker_name', 'account_number', 'symbol']\r\n# ).agg(lambda x: x.astype(float).sum()).reset_index()\r\n# new_account_details = new_position_details.drop_duplicates(\r\n# subset=['broker_name', 'account_number']\r\n# ).set_index(['broker_name', 'account_number'])\r\n\r\n# if any(pd.concat([pending_account_details, new_account_details], axis=1)['id'].isna()):\r\n# current_app.logger.error('You have a position that is not associated with any account currently loaded in the trade.')\r\n# return (None, None)\r\n\r\n# pending_account_position_details = [vars(ap) for pending in pendings for ap in pending.account_positions]\r\n# new_position_details.set_index(['broker_name', 'account_number'], inplace=True)\r\n# new_position_details = new_position_details.join(\r\n# pending_account_details, on=['broker_name', 'account_number']\r\n# ).reset_index()\r\n\r\n# return new_position_details, pending_account_position_details\r\n\r\n\r\n# def create_pending_account_positions(\r\n# trade: Trade, new_positions: pd.DataFrame, pending_positions: pd.DataFrame\r\n# ):\r\n\r\n# if len(pending_positions) == 0:\r\n# additions = new_positions\r\n# else:\r\n# new_positions.set_index(['broker_name', 'account_number', 'symbol'], inplace=True)\r\n# pending_account_position_details = pd.DataFrame(pending_positions).set_index(\r\n# ['broker_name', 'account_number', 'symbol']\r\n# )\r\n# # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\r\n# # print(additions, '\\n', pending_account_position_details)\r\n# new_positions['account_position_id'] = pending_account_position_details['id']\r\n# # print(additions.loc[:,['id', 'account_id', 'account_position_id']])\r\n# additions = new_positions[new_positions.loc[:, 'account_position_id'].isna()].reset_index()\r\n# if not additions.empty:\r\n# additions['position'] = additions.apply(\r\n# lambda row: AccountPosition(\r\n# pending_id=row['pending_id'],\r\n# portfolio_id=row['portfolio_id'],\r\n# account_id=row['id'],\r\n# broker_name=row['broker_name'],\r\n# account_number=row['account_number'],\r\n# symbol=row['symbol'],\r\n# shares=row['shares']\r\n# ),\r\n# axis=1\r\n# )\r\n\r\n# db_session.bulk_save_objects(additions['position'].tolist())\r\n# db_session.commit()\r\n\r\n# additions['price'] = additions.apply(\r\n# lambda row: Price(\r\n# account_position_id=additions['position'].id,\r\n# trade_id=trade.id,\r\n# symbol=row['symbol']\r\n# ),\r\n# axis=1\r\n# )\r\n# db_session.bulk_save_objects(additions['price'].tolist())\r\n# db_session.commit()\r\n\r\n\r\n# def remove_pending_account_positions(new_positions, pending_positions):\r\n\r\n# if len(pending_positions) == 0:\r\n# return\r\n\r\n# # new_positions.set_index(['broker_name', 'account_number', 'symbol'], inplace=True)\r\n# pending_positions = pd.DataFrame(pending_positions).set_index(\r\n# ['broker_name', 'account_number', 'symbol']\r\n# )\r\n# pending_positions['account_position_id'] = new_positions['id']\r\n# deletions = pending_positions[pending_positions.loc[:, 'account_position_id'].isna()].reset_index()\r\n# # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\r\n# # print(deletions)\r\n# if not deletions.empty:\r\n# db_session.query(Price).filter(\r\n# Price.account_position_id.in_(deletions['id'].tolist())\r\n# ).delete(synchronize_session=False)\r\n# db_session.query(AccountPosition).filter(\r\n# AccountPosition.id.in_(deletions['id'].tolist())\r\n# ).delete(synchronize_session=False)\r\n# db_session.commit()\r\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
helloworld1973/LivingNature_AIMakeUp
[ "06eefd202efc70d67e75994843ae0fdd6cd1e9ef" ]
[ "Ui_MakupGUI.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\pyprojects\\AIMakeup\\MakupGUI.ui'\n#\n# Created by: PyQt5 UI code generator 5.9\n#\n# WARNING! All changes made in this file will be lost!\n\nimport sys,os\nimport numpy as np\nimport cv2\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QImage,QIcon,QPixmap\nfrom PyQt5.QtWidgets import QFileDialog,QMessageBox\nfrom AIMakeup import Makeup,Face,Organ,NoFace\n\nclass Ui_MainWindow(object):\n def __init__(self, MainWindow):\n self.window=MainWindow\n self._setupUi()\n #控件分组\n self.bg_edit=[self.bt_brightening,self.bt_whitening,self.bt_sharpen,self.bt_smooth]\n self.bg_op=[self.bt_confirm,self.bt_cancel,self.bt_reset]\n self.bg_result=[self.bt_view_compare,self.bt_save,self.bt_save_compare]\n self.sls=[self.sl_brightening,self.sl_sharpen,self.sl_whitening,self.sl_smooth]\n #用于显示图片的标签\n self.label=QtWidgets.QLabel(self.window)\n self.sa.setWidget(self.label)\n #批量设置状态\n self._set_statu(self.bg_edit,False)\n self._set_statu(self.bg_op,False)\n self._set_statu(self.bg_result,False)\n self._set_statu(self.sls,False)\n #导入dlib模型文件\n if os.path.exists(\"./data/shape_predictor_68_face_landmarks.dat\"):\n self.path_predictor=os.path.abspath(\"./data/shape_predictor_68_face_landmarks.dat\")\n else:\n QMessageBox.warning(self.centralWidget,'警告','默认的dlib模型文件路径不存在,请指定文件位置。\\\n \\n或从http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2下载')\n self.path_predictor,_=QFileDialog.getOpenFileName(self.centralWidget,'选择dlib模型文件','./','Data Files(*.dat)')\n #实例化化妆器\n self.mu=Makeup(self.path_predictor)\n \n self.path_img=''\n self._set_connect()\n\n def _set_connect(self):\n '''\n 设置程序逻辑\n '''\n self.bt_open.clicked.connect(self._open_img)\n for op in ['sharpen','whitening','smooth','brightening','cancel','confirm','reset','save','save_compare','view_compare']:\n self.__getattribute__('bt_'+op).clicked.connect(self.__getattribute__('_'+op))\n \n def _open_img(self):\n '''\n 打开图片\n '''\n self.path_img,_=QFileDialog.getOpenFileName(self.centralWidget,'打开图片文件','./','Image Files(*.png *.jpg *.bmp)')\n if self.path_img and os.path.exists(self.path_img):\n print(self.path_img)\n self.im_bgr,self.temp_bgr,self.faces=self.mu.read_and_mark(self.path_img)\n self.im_ori,self.previous_bgr=self.im_bgr.copy(),self.im_bgr.copy()\n self._set_statu(self.bg_edit,True)\n self._set_statu(self.bg_op,True)\n self._set_statu(self.bg_result,True)\n self._set_statu(self.sls,True)\n self._set_img()\n else:\n QMessageBox.warning(self.centralWidget,'无效路径','无效路径,请重新选择!')\n \n def _cv2qimg(self,cvImg):\n '''\n 将opencv的图片转换为QImage\n '''\n height, width, channel = cvImg.shape\n bytesPerLine = 3 * width\n return QImage(cv2.cvtColor(cvImg,cv2.COLOR_BGR2RGB).data, width, height, bytesPerLine, QImage.Format_RGB888)\n \n def _set_img(self):\n '''\n 显示pixmap\n '''\n self.label.setPixmap(QPixmap.fromImage(self._cv2qimg(self.temp_bgr)))\n\n def _set_statu(self,group,value):\n '''\n 批量设置状态\n '''\n [item.setEnabled(value) for item in group]\n \n def _confirm(self):\n '''\n 确认操作\n '''\n self.im_bgr[:]=self.temp_bgr[:]\n \n def _cancel(self):\n '''\n 还原到上一步\n '''\n self.temp_bgr[:]=self.previous_bgr[:]\n self._set_img()\n \n def _reset(self):\n '''\n 重置为原始图片\n '''\n self.temp_bgr[:]=self.im_ori[:]\n self._set_img()\n \n def _mapfaces(self,fun,value):\n '''\n 对每张脸进行迭代操作\n '''\n self.previous_bgr[:]=self.temp_bgr[:]\n for face in self.faces[self.path_img]:\n fun(face,value)\n self._set_img()\n\n def _mapfacesliparea(self,fun):\n '''\n 对每张脸嘴唇部位进行操作\n '''\n self.previous_bgr[:] = self.temp_bgr[:]\n for face in self.faces[self.path_img]:\n fun(face)\n self._set_img()\n \n def _sharpen(self):\n value=min(1,max(self.sl_sharpen.value()/200,0))\n print(value)\n def fun(face,value):\n face.organs['left eye'].sharpen(value,confirm=False)\n face.organs['right eye'].sharpen(value,confirm=False)\n self._mapfaces(fun,value)\n \n def _whitening(self):\n def fun(face):\n face.organs['mouth'].lipstipN1()\n self._mapfacesliparea(fun)\n\n '''\n value=min(1,max(self.sl_whitening.value()/200,0))\n print(value)\n def fun(face,v):\n face.organs['left eye'].whitening(value,confirm=False)\n face.organs['right eye'].whitening(value,confirm=False)\n face.organs['left brow'].whitening(value,confirm=False)\n face.organs['right brow'].whitening(value,confirm=False)\n face.organs['nose'].whitening(value,confirm=False)\n face.organs['forehead'].whitening(value,confirm=False)\n face.organs['mouth'].whitening(value,confirm=False)\n face.whitening(value,confirm=False)\n self._mapfaces(fun,value)\n '''\n\n def _brightening(self):\n value = min(1, max(self.sl_brightening.value() / 200.00, 0))\n print(value)\n\n def fun(face, value):\n face.organs['mouth'].brightening(value, confirm=False)\n\n self._mapfaces(fun, value)\n \n def _smooth(self):\n value=min(1,max(self.sl_smooth.value()/100,0))\n def fun(face,value):\n face.smooth(value,confirm=False)\n face.organs['left eye'].smooth(value*2/3,confirm=False)\n face.organs['right eye'].smooth(value*2/3,confirm=False)\n face.organs['left brow'].smooth(value*2/3,confirm=False)\n face.organs['right brow'].smooth(value*2/3,confirm=False)\n face.organs['nose'].smooth(value*2/3,confirm=False)\n face.organs['forehead'].smooth(value*3/2,confirm=False)\n face.organs['mouth'].smooth(value,confirm=False)\n self._mapfaces(fun,value)\n \n def _save(self):\n output_path,_=QFileDialog.getSaveFileName(self.centralWidget,'选择保存位置','./','Image Files(*.png *.jpg *.bmp)')\n if output_path:\n self.save(output_path,self.im_bgr)\n else:\n QMessageBox.warning(self.centralWidget,'无效路径','无效路径,请重新选择!')\n \n def _save_compare(self):\n output_path,_=QFileDialog.getSaveFileName(self.centralWidget,'选择保存位置','./','Image Files(*.png *.jpg *.bmp)')\n if output_path:\n self.save(output_path,np.concatenate([self.im_ori,self.im_bgr],1))\n else:\n QMessageBox.warning(self.centralWidget,'无效路径','无效路径,请重新选择!')\n \n def _view_compare(self):\n cv2.imshow('Compare',np.concatenate([self.im_ori,self.im_bgr],1))\n cv2.waitKey()\n \n def _setupUi(self):\n self.window.setObjectName(\"MainWindow\")\n self.window.resize(837, 838)\n self.centralWidget = QtWidgets.QWidget(self.window)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralWidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.sa = QtWidgets.QScrollArea(self.centralWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.sa.sizePolicy().hasHeightForWidth())\n self.sa.setSizePolicy(sizePolicy)\n self.sa.setWidgetResizable(True)\n self.sa.setObjectName(\"sa\")\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 813, 532))\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\n self.sa.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.sa)\n self.gridLayout = QtWidgets.QGridLayout()\n self.gridLayout.setObjectName(\"gridLayout\")\n self.bt_whitening = QtWidgets.QPushButton(self.centralWidget)\n self.bt_whitening.setObjectName(\"bt_whitening\")\n self.gridLayout.addWidget(self.bt_whitening, 0, 0, 1, 1)\n self.sl_whitening = QtWidgets.QSlider(self.centralWidget)\n self.sl_whitening.setOrientation(QtCore.Qt.Horizontal)\n self.sl_whitening.setObjectName(\"sl_whitening\")\n self.gridLayout.addWidget(self.sl_whitening, 0, 1, 1, 1)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)\n self.bt_smooth = QtWidgets.QPushButton(self.centralWidget)\n self.bt_smooth.setObjectName(\"bt_smooth\")\n self.gridLayout.addWidget(self.bt_smooth, 1, 0, 1, 1)\n self.sl_smooth = QtWidgets.QSlider(self.centralWidget)\n self.sl_smooth.setOrientation(QtCore.Qt.Horizontal)\n self.sl_smooth.setObjectName(\"sl_smooth\")\n self.gridLayout.addWidget(self.sl_smooth, 1, 1, 1, 1)\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem1, 1, 2, 1, 1)\n self.bt_sharpen = QtWidgets.QPushButton(self.centralWidget)\n self.bt_sharpen.setObjectName(\"bt_sharpen\")\n self.gridLayout.addWidget(self.bt_sharpen, 2, 0, 1, 1)\n self.sl_sharpen = QtWidgets.QSlider(self.centralWidget)\n self.sl_sharpen.setOrientation(QtCore.Qt.Horizontal)\n self.sl_sharpen.setObjectName(\"sl_sharpen\")\n self.gridLayout.addWidget(self.sl_sharpen, 2, 1, 1, 1)\n spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem2, 2, 2, 1, 1)\n self.bt_brightening = QtWidgets.QPushButton(self.centralWidget)\n self.bt_brightening.setObjectName(\"bt_brightening\")\n self.gridLayout.addWidget(self.bt_brightening, 3, 0, 1, 1)\n self.sl_brightening = QtWidgets.QSlider(self.centralWidget)\n self.sl_brightening.setOrientation(QtCore.Qt.Horizontal)\n self.sl_brightening.setObjectName(\"sl_brightening\")\n self.gridLayout.addWidget(self.sl_brightening, 3, 1, 1, 1)\n spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem3, 3, 2, 1, 1)\n self.bt_open = QtWidgets.QPushButton(self.centralWidget)\n self.bt_open.setObjectName(\"bt_open\")\n self.gridLayout.addWidget(self.bt_open, 4, 0, 1, 1)\n self.bt_confirm = QtWidgets.QPushButton(self.centralWidget)\n self.bt_confirm.setObjectName(\"bt_confirm\")\n self.gridLayout.addWidget(self.bt_confirm, 5, 0, 1, 1)\n self.bt_cancel = QtWidgets.QPushButton(self.centralWidget)\n self.bt_cancel.setObjectName(\"bt_cancel\")\n self.gridLayout.addWidget(self.bt_cancel, 5, 1, 1, 1)\n self.bt_reset = QtWidgets.QPushButton(self.centralWidget)\n self.bt_reset.setObjectName(\"bt_reset\")\n self.gridLayout.addWidget(self.bt_reset, 5, 2, 1, 1)\n self.bt_view_compare = QtWidgets.QPushButton(self.centralWidget)\n self.bt_view_compare.setObjectName(\"bt_view_compare\")\n self.gridLayout.addWidget(self.bt_view_compare, 6, 0, 1, 1)\n self.bt_save = QtWidgets.QPushButton(self.centralWidget)\n self.bt_save.setObjectName(\"bt_save\")\n self.gridLayout.addWidget(self.bt_save, 7, 1, 1, 1)\n self.bt_save_compare = QtWidgets.QPushButton(self.centralWidget)\n self.bt_save_compare.setObjectName(\"bt_save_compare\")\n self.gridLayout.addWidget(self.bt_save_compare, 7, 2, 1, 1)\n self.verticalLayout.addLayout(self.gridLayout)\n self.window.setCentralWidget(self.centralWidget)\n\n self.retranslateUi()\n QtCore.QMetaObject.connectSlotsByName(self.window)\n\n def save(self,output_path,output_im):\n '''\n 保存图片\n '''\n cv2.imencode('.jpg',output_im)[1].tofile(output_path)\n\n def retranslateUi(self):\n _translate = QtCore.QCoreApplication.translate\n self.window.setWindowTitle(_translate(\"MainWindow\", \"AI美颜\"))\n self.bt_whitening.setText(_translate(\"MainWindow\", \"美白\"))\n self.bt_smooth.setText(_translate(\"MainWindow\", \"磨皮\"))\n self.bt_sharpen.setText(_translate(\"MainWindow\", \"亮眼\"))\n self.bt_brightening.setText(_translate(\"MainWindow\", \"红唇\"))\n self.bt_open.setText(_translate(\"MainWindow\", \"打开文件\"))\n self.bt_confirm.setText(_translate(\"MainWindow\", \"确认更改\"))\n self.bt_cancel.setText(_translate(\"MainWindow\", \"撤销更改\"))\n self.bt_reset.setText(_translate(\"MainWindow\", \"还原\"))\n self.bt_view_compare.setText(_translate(\"MainWindow\", \"查看对比\"))\n self.bt_save.setText(_translate(\"MainWindow\", \"保存\"))\n self.bt_save_compare.setText(_translate(\"MainWindow\", \"保存对比图\"))\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow(MainWindow)\n ui.window.show()\n sys.exit(app.exec_())\n\n" ]
[ [ "numpy.concatenate" ] ]
murphygroup/CellSegmentationEvaluator
[ "6f5d850148d52dc2d08576da42f700c4669d71e4" ]
[ "pipeline/segmentation/methods/convert_to_indexed_image.py" ]
[ "from skimage.io import imread\nimport sys\nimport numpy as np\nfrom os.path import join\nfrom skimage.color import rgb2gray\nimport bz2\nimport pickle\n# from skimage.measure import label\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import label\n\ndef convert_to_indexed(img):\n\tunique_cell = np.unique(img)\n\tn_cell = len(np.unique(img))\n\tfor i in range(n_cell):\n\t\timg[np.where(img == unique_cell[i])] = -i\n\treturn -img\n\nif __name__ == '__main__':\n\tfile_dir = sys.argv[1]\n\ttry:\n\t\timg = np.load(join(file_dir, 'mask_' + sys.argv[2] + '.npy'))\n\texcept:\n\t\timg = imread(join(file_dir, 'mask_' + sys.argv[2] + '.png'), as_gray=True)\n\tif sys.argv[2] == 'CellX' or 'cellsegm':\n\t\timg = label(img)[0]\n\t\n\telse:\n\t\timg = convert_to_indexed(img)\n\tsave_dir = bz2.BZ2File(join(file_dir, 'mask_' + sys.argv[2] + '.pickle'), 'wb')\n\tpickle.dump(img.astype('uint16'), save_dir)\n" ]
[ [ "scipy.ndimage.label", "numpy.where", "numpy.unique" ] ]
Ceglowa/shap
[ "585573ff8f93c39073f58948a886d78a7154a2f6" ]
[ "shap/plots/_waterfall.py" ]
[ "import numpy as np\nimport warnings\ntry:\n import matplotlib.pyplot as pl\n import matplotlib\nexcept ImportError:\n warnings.warn(\"matplotlib could not be loaded!\")\n pass\nfrom ._labels import labels\nfrom ..utils import safe_isinstance, format_value\nfrom . import colors\n\n\n# TODO: If we make a JS version of this plot then we could let users click on a bar and then see the dependence\n# plot that is associated with that feature get overlayed on the plot...it would quickly allow users to answer\n# why a feature is pushing down or up. Perhaps the best way to do this would be with an ICE plot hanging off\n# of the bar...\ndef waterfall(shap_values, max_display=10, show=True):\n \"\"\" Plots an explantion of a single prediction as a waterfall plot.\n\n The SHAP value of a feature represents the impact of the evidence provided by that feature on the model's\n output. The waterfall plot is designed to visually display how the SHAP values (evidence) of each feature\n move the model output from our prior expectation under the background data distribution, to the final model\n prediction given the evidence of all the features. Features are sorted by the magnitude of their SHAP values\n with the smallest magnitude features grouped together at the bottom of the plot when the number of features\n in the models exceeds the max_display parameter.\n \n Parameters\n ----------\n shap_values : Explanation\n A one-dimensional Explanation object that contains the feature values and SHAP values to plot.\n\n max_display : str\n The maximum number of features to plot.\n\n show : bool\n Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot\n to be customized further after it has been created.\n \"\"\"\n \n # Turn off interactive plot\n if show is False:\n pl.ioff()\n \n\n base_values = shap_values.base_values\n \n features = shap_values.data\n feature_names = shap_values.feature_names\n lower_bounds = getattr(shap_values, \"lower_bounds\", None)\n upper_bounds = getattr(shap_values, \"upper_bounds\", None)\n values = shap_values.values\n\n # make sure we only have a single output to explain\n if (type(base_values) == np.ndarray and len(base_values) > 0) or type(base_values) == list:\n raise Exception(\"waterfall_plot requires a scalar base_values of the model output as the first \" \\\n \"parameter, but you have passed an array as the first parameter! \" \\\n \"Try shap.waterfall_plot(explainer.base_values[0], values[0], X[0]) or \" \\\n \"for multi-output models try \" \\\n \"shap.waterfall_plot(explainer.base_values[0], values[0][0], X[0]).\")\n\n # make sure we only have a single explanation to plot\n if len(values.shape) == 2:\n raise Exception(\"The waterfall_plot can currently only plot a single explanation but a matrix of explanations was passed!\")\n \n # unwrap pandas series\n if safe_isinstance(features, \"pandas.core.series.Series\"):\n if feature_names is None:\n feature_names = list(features.index)\n features = features.values\n\n # fallback feature names\n if feature_names is None:\n feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(values))])\n \n # init variables we use for tracking the plot locations\n num_features = min(max_display, len(values))\n row_height = 0.5\n rng = range(num_features - 1, -1, -1)\n order = np.argsort(-np.abs(values))\n pos_lefts = []\n pos_inds = []\n pos_widths = []\n pos_low = []\n pos_high = []\n neg_lefts = []\n neg_inds = []\n neg_widths = []\n neg_low = []\n neg_high = []\n loc = base_values + values.sum()\n yticklabels = [\"\" for i in range(num_features + 1)]\n \n # size the plot based on how many features we are plotting\n pl.gcf().set_size_inches(8, num_features * row_height + 1.5)\n\n # see how many individual (vs. grouped at the end) features we are plotting\n if num_features == len(values):\n num_individual = num_features\n else:\n num_individual = num_features - 1\n\n # compute the locations of the individual features and plot the dashed connecting lines\n for i in range(num_individual):\n sval = values[order[i]]\n loc -= sval\n if sval >= 0:\n pos_inds.append(rng[i])\n pos_widths.append(sval)\n if lower_bounds is not None:\n pos_low.append(lower_bounds[order[i]])\n pos_high.append(upper_bounds[order[i]])\n pos_lefts.append(loc)\n else:\n neg_inds.append(rng[i])\n neg_widths.append(sval)\n if lower_bounds is not None:\n neg_low.append(lower_bounds[order[i]])\n neg_high.append(upper_bounds[order[i]])\n neg_lefts.append(loc)\n if num_individual != num_features or i + 4 < num_individual:\n pl.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n if features is None:\n yticklabels[rng[i]] = feature_names[order[i]]\n else:\n yticklabels[rng[i]] = format_value(features[order[i]], \"%0.03f\") + \" = \" + feature_names[order[i]] \n \n # add a last grouped feature to represent the impact of all the features we didn't show\n if num_features < len(values):\n yticklabels[0] = \"%d other features\" % (len(values) - num_features + 1)\n remaining_impact = base_values - loc\n if remaining_impact < 0:\n pos_inds.append(0)\n pos_widths.append(-remaining_impact)\n pos_lefts.append(loc + remaining_impact)\n c = colors.red_rgb\n else:\n neg_inds.append(0)\n neg_widths.append(-remaining_impact)\n neg_lefts.append(loc + remaining_impact)\n c = colors.blue_rgb\n\n points = pos_lefts + list(np.array(pos_lefts) + np.array(pos_widths)) + neg_lefts + list(np.array(neg_lefts) + np.array(neg_widths))\n dataw = np.max(points) - np.min(points)\n \n # draw invisible bars just for sizing the axes\n label_padding = np.array([0.1*dataw if w < 1 else 0 for w in pos_widths])\n pl.barh(pos_inds, np.array(pos_widths) + label_padding + 0.02*dataw, left=np.array(pos_lefts) - 0.01*dataw, color=colors.red_rgb, alpha=0)\n label_padding = np.array([-0.1*dataw if -w < 1 else 0 for w in neg_widths])\n pl.barh(neg_inds, np.array(neg_widths) + label_padding - 0.02*dataw, left=np.array(neg_lefts) + 0.01*dataw, color=colors.blue_rgb, alpha=0)\n \n # define variable we need for plotting the arrows\n head_length = 0.08\n bar_width = 0.8\n xlen = pl.xlim()[1] - pl.xlim()[0]\n fig = pl.gcf()\n ax = pl.gca()\n xticks = ax.get_xticks()\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n bbox_to_xscale = xlen/width\n hl_scaled = bbox_to_xscale * head_length\n renderer = fig.canvas.get_renderer()\n \n # draw the positive arrows\n for i in range(len(pos_inds)):\n dist = pos_widths[i]\n arrow_obj = pl.arrow(\n pos_lefts[i], pos_inds[i], max(dist-hl_scaled, 0.000001), 0,\n head_length=min(dist, hl_scaled),\n color=colors.red_rgb, width=bar_width,\n head_width=bar_width\n )\n \n if pos_low is not None and i < len(pos_low):\n pl.errorbar(\n pos_lefts[i] + pos_widths[i], pos_inds[i], \n xerr=np.array([[pos_widths[i] - pos_low[i]], [pos_high[i] - pos_widths[i]]]),\n ecolor=colors.light_red_rgb\n )\n\n txt_obj = pl.text(\n pos_lefts[i] + 0.5*dist, pos_inds[i], format_value(1/(1+np.exp(-pos_widths[i])), '%+0.04f'),\n horizontalalignment='center', verticalalignment='center', color=\"white\",\n fontsize=12\n )\n text_bbox = txt_obj.get_window_extent(renderer=renderer)\n arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)\n \n # if the text overflows the arrow then draw it after the arrow\n if text_bbox.width > arrow_bbox.width: \n txt_obj.remove()\n \n txt_obj = pl.text(\n pos_lefts[i] + (5/72)*bbox_to_xscale + dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),\n horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,\n fontsize=12\n )\n \n # draw the negative arrows\n for i in range(len(neg_inds)):\n dist = neg_widths[i]\n \n arrow_obj = pl.arrow(\n neg_lefts[i], neg_inds[i], -max(-dist-hl_scaled, 0.000001), 0,\n head_length=min(-dist, hl_scaled),\n color=colors.blue_rgb, width=bar_width,\n head_width=bar_width\n )\n\n if neg_low is not None and i < len(neg_low):\n pl.errorbar(\n neg_lefts[i] + neg_widths[i], neg_inds[i], \n xerr=np.array([[neg_widths[i] - neg_low[i]], [neg_high[i] - neg_widths[i]]]),\n ecolor=colors.light_blue_rgb\n )\n \n txt_obj = pl.text(\n neg_lefts[i] + 0.5*dist, neg_inds[i], format_value(1/(1+np.exp(-neg_widths[i])), '%+0.02f'),\n horizontalalignment='center', verticalalignment='center', color=\"white\",\n fontsize=12\n )\n text_bbox = txt_obj.get_window_extent(renderer=renderer)\n arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)\n \n # if the text overflows the arrow then draw it after the arrow\n if text_bbox.width > arrow_bbox.width: \n txt_obj.remove()\n \n txt_obj = pl.text(\n neg_lefts[i] - (5/72)*bbox_to_xscale + dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),\n horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,\n fontsize=12\n )\n\n # draw the y-ticks twice, once in gray and then again with just the feature names in black\n ytick_pos = list(range(num_features)) + list(np.arange(num_features)+1e-8) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks\n pl.yticks(ytick_pos, yticklabels[:-1] + [l.split('=')[-1] for l in yticklabels[:-1]], fontsize=13)\n \n # put horizontal lines for each feature row\n for i in range(num_features):\n pl.axhline(i, color=\"#cccccc\", lw=0.5, dashes=(1, 5), zorder=-1)\n \n # mark the prior expected value and the model prediction\n pl.axvline(base_values, 0, 1/num_features, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n fx = base_values + values.sum()\n pl.axvline(fx, 0, 1, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n \n # clean up the main axis\n pl.gca().xaxis.set_ticks_position('bottom')\n pl.gca().yaxis.set_ticks_position('none')\n pl.gca().spines['right'].set_visible(False)\n pl.gca().spines['top'].set_visible(False)\n pl.gca().spines['left'].set_visible(False)\n ax.tick_params(labelsize=13)\n #pl.xlabel(\"\\nModel output\", fontsize=12)\n\n # draw the E[f(X)] tick mark\n xmin,xmax = ax.get_xlim()\n ax2=ax.twiny()\n ax2.set_xlim(xmin,xmax)\n ax2.set_xticks([base_values, base_values+1e-8]) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks\n ax2.set_xticklabels([\"\\n$E[f(X)]$\",\"\\n$ = \"+format_value(base_values, \"%0.03f\")+\"$\"], fontsize=12, ha=\"left\")\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n\n # draw the f(x) tick mark\n ax3=ax2.twiny()\n ax3.set_xlim(xmin,xmax)\n ax3.set_xticks([base_values + values.sum(), base_values + values.sum() + 1e-8]) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks\n ax3.set_xticklabels([\"$f(x)$\",\"$ = \"+format_value(fx, \"%0.03f\")+\"$\"], fontsize=12, ha=\"left\")\n tick_labels = ax3.xaxis.get_majorticklabels()\n tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-10/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(12/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_color(\"#999999\")\n ax3.spines['right'].set_visible(False)\n ax3.spines['top'].set_visible(False)\n ax3.spines['left'].set_visible(False)\n\n # adjust the position of the E[f(X)] = x.xx label\n tick_labels = ax2.xaxis.get_majorticklabels()\n tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-20/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(22/72., -1/72., fig.dpi_scale_trans))\n \n tick_labels[1].set_color(\"#999999\")\n\n # color the y tick labels that have the feature values as gray\n # (these fall behind the black ones with just the feature name)\n tick_labels = ax.yaxis.get_majorticklabels()\n for i in range(num_features):\n tick_labels[i].set_color(\"#999999\")\n \n if show:\n pl.show()\n else:\n return pl.gcf()\n\n\n\ndef waterfall_legacy(expected_value, shap_values=None, features=None, feature_names=None, max_display=10, show=True):\n \"\"\" Plots an explantion of a single prediction as a waterfall plot.\n\n The SHAP value of a feature represents the impact of the evidence provided by that feature on the model's\n output. The waterfall plot is designed to visually display how the SHAP values (evidence) of each feature\n move the model output from our prior expectation under the background data distribution, to the final model\n prediction given the evidence of all the features. Features are sorted by the magnitude of their SHAP values\n with the smallest magnitude features grouped together at the bottom of the plot when the number of features\n in the models exceeds the max_display parameter.\n \n Parameters\n ----------\n expected_value : float\n This is the reference value that the feature contributions start from. For SHAP values it should\n be the value of explainer.expected_value.\n\n shap_values : numpy.array\n One dimensional array of SHAP values.\n\n features : numpy.array\n One dimensional array of feature values. This provides the values of all the\n features, and should be the same shape as the shap_values argument.\n\n feature_names : list\n List of feature names (# features).\n\n max_display : str\n The maximum number of features to plot.\n\n show : bool\n Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot\n to be customized further after it has been created.\n \"\"\"\n\n # Turn off interactive plot\n if show is False:\n pl.ioff()\n \n # support passing an explanation object\n upper_bounds = None\n lower_bounds = None\n if str(type(expected_value)).endswith(\"Explanation'>\"):\n shap_exp = expected_value\n expected_value = shap_exp.expected_value\n shap_values = shap_exp.values\n features = shap_exp.data\n feature_names = shap_exp.feature_names\n lower_bounds = getattr(shap_exp, \"lower_bounds\", None)\n upper_bounds = getattr(shap_exp, \"upper_bounds\", None)\n\n # make sure we only have a single output to explain\n if (type(expected_value) == np.ndarray and len(expected_value) > 0) or type(expected_value) == list:\n raise Exception(\"waterfall_plot requires a scalar expected_value of the model output as the first \" \\\n \"parameter, but you have passed an array as the first parameter! \" \\\n \"Try shap.waterfall_plot(explainer.expected_value[0], shap_values[0], X[0]) or \" \\\n \"for multi-output models try \" \\\n \"shap.waterfall_plot(explainer.expected_value[0], shap_values[0][0], X[0]).\")\n\n # make sure we only have a single explanation to plot\n if len(shap_values.shape) == 2:\n raise Exception(\"The waterfall_plot can currently only plot a single explanation but a matrix of explanations was passed!\")\n \n # unwrap pandas series\n if safe_isinstance(features, \"pandas.core.series.Series\"):\n if feature_names is None:\n feature_names = list(features.index)\n features = features.values\n\n # fallback feature names\n if feature_names is None:\n feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])\n \n # init variables we use for tracking the plot locations\n num_features = min(max_display, len(shap_values))\n row_height = 0.5\n rng = range(num_features - 1, -1, -1)\n order = np.argsort(-np.abs(shap_values))\n pos_lefts = []\n pos_inds = []\n pos_widths = []\n pos_low = []\n pos_high = []\n neg_lefts = []\n neg_inds = []\n neg_widths = []\n neg_low = []\n neg_high = []\n loc = expected_value + shap_values.sum()\n yticklabels = [\"\" for i in range(num_features + 1)]\n \n # size the plot based on how many features we are plotting\n pl.gcf().set_size_inches(8, num_features * row_height + 1.5)\n\n # see how many individual (vs. grouped at the end) features we are plotting\n if num_features == len(shap_values):\n num_individual = num_features\n else:\n num_individual = num_features - 1\n\n # compute the locations of the individual features and plot the dashed connecting lines\n for i in range(num_individual):\n sval = shap_values[order[i]]\n loc -= sval\n if sval >= 0:\n pos_inds.append(rng[i])\n pos_widths.append(sval)\n if lower_bounds is not None:\n pos_low.append(lower_bounds[order[i]])\n pos_high.append(upper_bounds[order[i]])\n pos_lefts.append(loc)\n else:\n neg_inds.append(rng[i])\n neg_widths.append(sval)\n if lower_bounds is not None:\n neg_low.append(lower_bounds[order[i]])\n neg_high.append(upper_bounds[order[i]])\n neg_lefts.append(loc)\n if num_individual != num_features or i + 4 < num_individual:\n pl.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n if features is None:\n yticklabels[rng[i]] = feature_names[order[i]]\n else:\n yticklabels[rng[i]] = format_value(features[order[i]], \"%0.03f\") + \" = \" + feature_names[order[i]] \n \n # add a last grouped feature to represent the impact of all the features we didn't show\n if num_features < len(shap_values):\n yticklabels[0] = \"%d other features\" % (len(shap_values) - num_features + 1)\n remaining_impact = expected_value - loc\n if remaining_impact < 0:\n pos_inds.append(0)\n pos_widths.append(-remaining_impact)\n pos_lefts.append(loc + remaining_impact)\n c = colors.red_rgb\n else:\n neg_inds.append(0)\n neg_widths.append(-remaining_impact)\n neg_lefts.append(loc + remaining_impact)\n c = colors.blue_rgb\n\n points = pos_lefts + list(np.array(pos_lefts) + np.array(pos_widths)) + neg_lefts + list(np.array(neg_lefts) + np.array(neg_widths))\n dataw = np.max(points) - np.min(points)\n \n # draw invisible bars just for sizing the axes\n label_padding = np.array([0.1*dataw if w < 1 else 0 for w in pos_widths])\n pl.barh(pos_inds, np.array(pos_widths) + label_padding + 0.02*dataw, left=np.array(pos_lefts) - 0.01*dataw, color=colors.red_rgb, alpha=0)\n label_padding = np.array([-0.1*dataw if -w < 1 else 0 for w in neg_widths])\n pl.barh(neg_inds, np.array(neg_widths) + label_padding - 0.02*dataw, left=np.array(neg_lefts) + 0.01*dataw, color=colors.blue_rgb, alpha=0)\n \n # define variable we need for plotting the arrows\n head_length = 0.08\n bar_width = 0.8\n xlen = pl.xlim()[1] - pl.xlim()[0]\n fig = pl.gcf()\n ax = pl.gca()\n xticks = ax.get_xticks()\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n bbox_to_xscale = xlen/width\n hl_scaled = bbox_to_xscale * head_length\n renderer = fig.canvas.get_renderer()\n \n # draw the positive arrows\n for i in range(len(pos_inds)):\n dist = pos_widths[i]\n arrow_obj = pl.arrow(\n pos_lefts[i], pos_inds[i], max(dist-hl_scaled, 0.000001), 0,\n head_length=min(dist, hl_scaled),\n color=colors.red_rgb, width=bar_width,\n head_width=bar_width\n )\n \n if pos_low is not None and i < len(pos_low):\n pl.errorbar(\n pos_lefts[i] + pos_widths[i], pos_inds[i], \n xerr=np.array([[pos_widths[i] - pos_low[i]], [pos_high[i] - pos_widths[i]]]),\n ecolor=colors.light_red_rgb\n )\n\n txt_obj = pl.text(\n pos_lefts[i] + 0.5*dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),\n horizontalalignment='center', verticalalignment='center', color=\"white\",\n fontsize=12\n )\n text_bbox = txt_obj.get_window_extent(renderer=renderer)\n arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)\n \n # if the text overflows the arrow then draw it after the arrow\n if text_bbox.width > arrow_bbox.width: \n txt_obj.remove()\n \n txt_obj = pl.text(\n pos_lefts[i] + (5/72)*bbox_to_xscale + dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),\n horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,\n fontsize=12\n )\n \n # draw the negative arrows\n for i in range(len(neg_inds)):\n dist = neg_widths[i]\n \n arrow_obj = pl.arrow(\n neg_lefts[i], neg_inds[i], -max(-dist-hl_scaled, 0.000001), 0,\n head_length=min(-dist, hl_scaled),\n color=colors.blue_rgb, width=bar_width,\n head_width=bar_width\n )\n\n if neg_low is not None and i < len(neg_low):\n pl.errorbar(\n neg_lefts[i] + neg_widths[i], neg_inds[i], \n xerr=np.array([[neg_widths[i] - neg_low[i]], [neg_high[i] - neg_widths[i]]]),\n ecolor=colors.light_blue_rgb\n )\n \n txt_obj = pl.text(\n neg_lefts[i] + 0.5*dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),\n horizontalalignment='center', verticalalignment='center', color=\"white\",\n fontsize=12\n )\n text_bbox = txt_obj.get_window_extent(renderer=renderer)\n arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)\n \n # if the text overflows the arrow then draw it after the arrow\n if text_bbox.width > arrow_bbox.width: \n txt_obj.remove()\n \n txt_obj = pl.text(\n neg_lefts[i] - (5/72)*bbox_to_xscale + dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),\n horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,\n fontsize=12\n )\n\n # draw the y-ticks twice, once in gray and then again with just the feature names in black\n pl.yticks(list(range(num_features))*2, yticklabels[:-1] + [l.split('=')[-1] for l in yticklabels[:-1]], fontsize=13)\n \n # put horizontal lines for each feature row\n for i in range(num_features):\n pl.axhline(i, color=\"#cccccc\", lw=0.5, dashes=(1, 5), zorder=-1)\n \n # mark the prior expected value and the model prediction\n pl.axvline(expected_value, 0, 1/num_features, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n fx = expected_value + shap_values.sum()\n pl.axvline(fx, 0, 1, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n \n # clean up the main axis\n pl.gca().xaxis.set_ticks_position('bottom')\n pl.gca().yaxis.set_ticks_position('none')\n pl.gca().spines['right'].set_visible(False)\n pl.gca().spines['top'].set_visible(False)\n pl.gca().spines['left'].set_visible(False)\n ax.tick_params(labelsize=13)\n #pl.xlabel(\"\\nModel output\", fontsize=12)\n\n # draw the E[f(X)] tick mark\n xmin,xmax = ax.get_xlim()\n ax2=ax.twiny()\n ax2.set_xlim(xmin,xmax)\n ax2.set_xticks([expected_value, expected_value])\n ax2.set_xticklabels([\"\\n$E[f(X)]$\",\"\\n$ = \"+format_value(expected_value, \"%0.03f\")+\"$\"], fontsize=12, ha=\"left\")\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n\n # draw the f(x) tick mark\n ax3=ax2.twiny()\n ax3.set_xlim(xmin,xmax)\n ax3.set_xticks([expected_value + shap_values.sum()] * 2)\n ax3.set_xticklabels([\"$f(x)$\",\"$ = \"+format_value(fx, \"%0.03f\")+\"$\"], fontsize=12, ha=\"left\")\n tick_labels = ax3.xaxis.get_majorticklabels()\n tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-10/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(12/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_color(\"#999999\")\n ax3.spines['right'].set_visible(False)\n ax3.spines['top'].set_visible(False)\n ax3.spines['left'].set_visible(False)\n\n # adjust the position of the E[f(X)] = x.xx label\n tick_labels = ax2.xaxis.get_majorticklabels()\n tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-20/72., 0, fig.dpi_scale_trans))\n tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(22/72., -1/72., fig.dpi_scale_trans))\n tick_labels[1].set_color(\"#999999\")\n\n # color the y tick labels that have the feature values as gray\n # (these fall behind the black ones with just the feature name)\n tick_labels = ax.yaxis.get_majorticklabels()\n for i in range(num_features):\n tick_labels[i].set_color(\"#999999\")\n \n if show:\n pl.show()\n else:\n return pl.gcf()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "numpy.abs", "numpy.min", "numpy.arange", "matplotlib.transforms.ScaledTranslation", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.ioff", "numpy.max", "matplotlib.pyplot.xlim", "numpy.exp", "numpy.array", "matplotlib.pyplot.show" ] ]
bardkw/SMQTK-Descriptors
[ "21b449a64f4e19588c5e10e8ffe63473c7891610" ]
[ "smqtk_descriptors/impls/descriptor_element/solr.py" ]
[ "import time\nfrom typing import Any, Dict, Hashable, Mapping, Optional\n\nimport numpy\n\nfrom smqtk_descriptors import DescriptorElement\n\n\n# Try to import required module\ntry:\n import solr # type: ignore\nexcept ImportError:\n solr = None\n\n\nclass SolrDescriptorElement (DescriptorElement): # lgtm [py/missing-equals]\n \"\"\"\n Descriptor element that uses a Solr instance as the backend storage medium.\n\n Fields where data is stored in the Solr documents are specified at\n construction time. We additionally set the ``id`` field to a string UUID.\n ``id`` is set because it is a common, required field for unique\n identification of documents. The value set to the ``id`` field is\n reproducible from this object's key attributes.\n\n \"\"\"\n\n @classmethod\n def is_usable(cls) -> bool:\n return solr is not None\n\n def __init__(\n self,\n uuid: Hashable,\n solr_conn_addr: str,\n uuid_field: str,\n vector_field: str,\n timestamp_field: str,\n timeout: int = 10,\n persistent_connection: bool = False,\n commit_on_set: bool = True\n ):\n \"\"\"\n Initialize a new Solr-stored descriptor element.\n\n :param uuid: Unique ID reference of the descriptor.\n :param solr_conn_addr: HTTP(S) address for the Solr index to use\n :param uuid_field: Solr index field to store descriptor UUID string\n value in.\n :param vector_field: Solr index field to store the descriptor vector of\n floats in.\n :param timestamp_field: Solr index field to store floating-point UNIX\n timestamps.\n :param timeout: Whether or not the Solr connection should\n be persistent or not.\n :param persistent_connection: Maintain a connection between Solr index\n interactions.\n :param commit_on_set: Immediately commit changes when a vector is set.\n \"\"\"\n super(SolrDescriptorElement, self).__init__(uuid)\n\n self.uuid_field = uuid_field\n self.vector_field = vector_field\n self.timestamp_field = timestamp_field\n\n self.solr_conn_addr = solr_conn_addr\n self.solr_timeout = timeout\n self.solr_persistent_connection = persistent_connection\n self.solr_commit_on_set = commit_on_set\n\n self.solr = self._make_solr_inst()\n\n def __getstate__(self) -> Dict[str, Any]:\n state = super(SolrDescriptorElement, self).__getstate__()\n state.update({\n \"uuid_field\": self.uuid_field,\n \"vector_field\": self.vector_field,\n \"timestamp_field\": self.timestamp_field,\n \"solr_conn_addr\": self.solr_conn_addr,\n \"solr_persistent_connection\": self.solr_persistent_connection,\n \"solr_timeout\": self.solr_timeout,\n \"solr_commit_on_set\": self.solr_commit_on_set,\n })\n return state\n\n def __setstate__(self, state: Mapping[str, Any]) -> None:\n self.uuid_field = state['uuid_field']\n self.vector_field = state['vector_field']\n self.timestamp_field = state['timestamp_field']\n self.solr_conn_addr = state['solr_conn_addr']\n self.solr_timeout = state['solr_timeout']\n self.solr_persistent_connection = state['solr_persistent_connection']\n self.solr_commit_on_set = state['solr_commit_on_set']\n\n self.solr = self._make_solr_inst()\n\n def __repr__(self) -> str:\n return super(SolrDescriptorElement, self).__repr__() + \\\n '[url: %s, timeout: %d, ' \\\n 'persistent: %s]' \\\n % (self.solr.url, self.solr.timeout, self.solr.persistent)\n\n def _make_solr_inst(self) -> \"solr.Solr\":\n return solr.Solr(self.solr_conn_addr,\n persistent=self.solr_persistent_connection,\n timeout=self.solr_timeout,\n # debug=True # This makes things pretty verbose\n )\n\n def _base_doc(self) -> Dict[str, Any]:\n \"\"\"\n :returns: A new dictionary representing the basic document structure\n for interacting with our elements in Solr.\n \"\"\"\n suuid = str(self.uuid())\n return {\n 'id': '-'.join([suuid]),\n self.uuid_field: suuid,\n }\n\n def _get_existing_doc(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n :return: An existing document dict. If there isn't one for our uuid\n we return None.\n \"\"\"\n b_doc = self._base_doc()\n r = self.solr.select(f\"id:{b_doc['id']} \\\n AND {self.uuid_field}:{b_doc[self.uuid_field]}\")\n if r.numFound == 1:\n return r.results[0]\n else:\n return None\n\n def get_config(self) -> Dict[str, Any]:\n return {\n \"solr_conn_addr\": self.solr_conn_addr,\n \"uuid_field\": self.uuid_field,\n \"vector_field\": self.vector_field,\n \"timestamp_field\": self.timestamp_field,\n \"timeout\": self.solr_timeout,\n \"persistent_connection\": self.solr_persistent_connection,\n \"commit_on_set\": self.solr_commit_on_set,\n }\n\n def has_vector(self) -> bool:\n return bool(self._get_existing_doc())\n\n def set_vector(self, new_vec: numpy.ndarray) -> \"SolrDescriptorElement\":\n doc = self._base_doc()\n doc[self.vector_field] = new_vec.tolist()\n doc[self.timestamp_field] = time.time()\n self.solr.add(doc, commit=self.solr_commit_on_set)\n return self\n\n def vector(self) -> Optional[numpy.ndarray]:\n doc = self._get_existing_doc()\n if doc is None:\n return None\n # Vectors stored as lists in solr doc\n return numpy.array(doc[self.vector_field])\n" ]
[ [ "numpy.array" ] ]
shreeju/TensorNetwork
[ "00065ffdbbe0d6e4075045888e469e78e06dcf55" ]
[ "tensornetwork/backends/pytorch/pytorch_backend_test.py" ]
[ "import numpy as np\nfrom tensornetwork.backends.pytorch import pytorch_backend\nimport torch\nimport pytest\nfrom unittest.mock import Mock\n\ntorch_dtypes = [torch.float32, torch.float64, torch.int32]\ntorch_eye_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]\ntorch_randn_dtypes = [torch.float32, torch.float64]\n\n\ndef test_tensordot():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(2 * np.ones((2, 3, 4)))\n b = backend.convert_to_tensor(np.ones((2, 3, 4)))\n actual = backend.tensordot(a, b, ((1, 2), (1, 2)))\n expected = np.array([[24.0, 24.0], [24.0, 24.0]])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_reshape():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(np.ones((2, 3, 4)))\n actual = backend.shape_tuple(backend.reshape(a, (6, 4, 1)))\n assert actual == (6, 4, 1)\n\n\ndef test_transpose():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(\n np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))\n actual = backend.transpose(a, [2, 0, 1])\n expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_shape_concat():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(2 * np.ones((1, 3, 1)))\n b = backend.convert_to_tensor(np.ones((1, 2, 1)))\n expected = backend.shape_concat((a, b), axis=1)\n actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_slice():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(\n np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))\n actual = backend.slice(a, (1, 1), (2, 2))\n expected = np.array([[5., 6.], [8., 9.]])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_slice_raises_error():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(\n np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))\n with pytest.raises(ValueError):\n backend.slice(a, (1, 1), (2, 2, 2))\n\n\ndef test_shape_tensor():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(np.ones([2, 3, 4]))\n assert isinstance(backend.shape_tensor(a), torch.Tensor)\n actual = backend.shape_tensor(a)\n expected = np.array([2, 3, 4])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_shape_tuple():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(np.ones([2, 3, 4]))\n actual = backend.shape_tuple(a)\n assert actual == (2, 3, 4)\n\n\ndef test_shape_prod():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4]))\n actual = np.array(backend.shape_prod(a))\n assert actual == 2**24\n\n\ndef test_sqrt():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(np.array([4.0, 9.0]))\n actual = backend.sqrt(a)\n expected = np.array([2, 3])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_diag():\n backend = pytorch_backend.PyTorchBackend()\n b = backend.convert_to_tensor(np.array([1.0, 2.0, 3.0]))\n actual = backend.diag(b)\n expected = np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]])\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_convert_to_tensor():\n backend = pytorch_backend.PyTorchBackend()\n array = np.ones((2, 3, 4))\n actual = backend.convert_to_tensor(array)\n expected = torch.ones((2, 3, 4))\n assert isinstance(actual, type(expected))\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_trace():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(\n np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]],\n [[9., 10.], [11., 12.]]]))\n actual = backend.trace(a)\n expected = np.array([5., 13., 21.])\n np.testing.assert_allclose(expected, actual)\n a = backend.convert_to_tensor(np.array([[1., 2.], [3., 4.]]))\n actual = backend.trace(a)\n np.testing.assert_allclose(actual, 5)\n\n\ndef test_outer_product():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(2 * np.ones((2, 1)))\n b = backend.convert_to_tensor(np.ones((1, 2, 2)))\n actual = backend.outer_product(a, b)\n expected = np.ones((2, 1, 1, 2, 2)) * 2\n\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_norm():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.convert_to_tensor(np.ones((2, 2)))\n assert backend.norm(a) == 2\n\n\n@pytest.mark.parametrize(\"dtype\", torch_eye_dtypes)\ndef test_eye(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.eye(N=4, M=5, dtype=dtype)\n np.testing.assert_allclose(torch.eye(n=4, m=5, dtype=dtype), a)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_dtypes)\ndef test_ones(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.ones((4, 4), dtype=dtype)\n np.testing.assert_allclose(torch.ones((4, 4), dtype=dtype), a)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_dtypes)\ndef test_zeros(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.zeros((4, 4), dtype=dtype)\n np.testing.assert_allclose(torch.zeros((4, 4), dtype=dtype), a)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_randn(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.randn((4, 4), dtype=dtype)\n assert a.shape == (4, 4)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_random_uniform(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.random_uniform((4, 4), dtype=dtype)\n assert a.shape == (4, 4)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_eye_dtypes)\ndef test_eye_dtype(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.eye(N=4, M=4, dtype=dtype)\n assert a.dtype == dtype\n\n\n@pytest.mark.parametrize(\"dtype\", torch_dtypes)\ndef test_ones_dtype(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.ones((4, 4), dtype=dtype)\n assert a.dtype == dtype\n\n\n@pytest.mark.parametrize(\"dtype\", torch_dtypes)\ndef test_zeros_dtype(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.zeros((4, 4), dtype=dtype)\n assert a.dtype == dtype\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_randn_dtype(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.randn((4, 4), dtype=dtype)\n assert a.dtype == dtype\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_random_uniform_dtype(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.random_uniform((4, 4), dtype=dtype)\n assert a.dtype == dtype\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_randn_seed(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.randn((4, 4), seed=10, dtype=dtype)\n b = backend.randn((4, 4), seed=10, dtype=dtype)\n np.testing.assert_allclose(a, b)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_random_uniform_seed(dtype):\n backend = pytorch_backend.PyTorchBackend()\n a = backend.random_uniform((4, 4), seed=10, dtype=dtype)\n b = backend.random_uniform((4, 4), seed=10, dtype=dtype)\n torch.allclose(a, b)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_random_uniform_boundaries(dtype):\n lb = 1.2\n ub = 4.8\n backend = pytorch_backend.PyTorchBackend()\n a = backend.random_uniform((4, 4), seed=10, dtype=dtype)\n b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)\n assert (torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and\n torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all())\n\n\ndef test_random_uniform_behavior():\n backend = pytorch_backend.PyTorchBackend()\n a = backend.random_uniform((4, 4), seed=10)\n torch.manual_seed(10)\n b = torch.empty((4, 4), dtype=torch.float64).uniform_()\n torch.allclose(a, b)\n\n\ndef test_conj():\n backend = pytorch_backend.PyTorchBackend()\n real = np.random.rand(2, 2, 2)\n a = backend.convert_to_tensor(real)\n actual = backend.conj(a)\n expected = real\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_eigsh_lanczos_0():\n #this test should just not crash\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n D = 4\n init = backend.randn((2, 2, 2), dtype=dtype)\n tmp = backend.randn((8, 8), dtype=dtype)\n H = tmp + backend.transpose(backend.conj(tmp), (1, 0))\n H = H.reshape([2, 2, 2, 2, 2, 2])\n\n def mv(x, mat):\n return torch.tensordot(mat, x, ([0, 3, 5], [2, 0, 1])).permute([2, 0, 1])\n\n backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=D)\n\n\ndef test_eigsh_lanczos_1():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n D = 24\n init = backend.randn((D,), dtype=dtype)\n tmp = backend.randn((D, D), dtype=dtype)\n H = tmp + backend.transpose(backend.conj(tmp), (1, 0))\n\n def mv(x, mat):\n return mat.mv(x)\n\n eta1, U1 = backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=D)\n eta2, U2 = H.symeig(eigenvectors=True)\n v2 = U2[:, 0]\n v2 = v2 / sum(v2)\n v1 = np.reshape(U1[0], (D))\n v1 = v1 / sum(v1)\n np.testing.assert_allclose(eta1[0], min(eta2))\n np.testing.assert_allclose(v1, v2)\n\n\ndef test_eigsh_small_number_krylov_vectors():\n backend = pytorch_backend.PyTorchBackend()\n init = backend.convert_to_tensor(np.array([1, 1], dtype=np.float64))\n H = backend.convert_to_tensor(np.array([[1, 2], [3, 4]], dtype=np.float64))\n\n def mv(x, mat):\n return mat.mv(x)\n\n eta1, _ = backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=1)\n np.testing.assert_allclose(eta1[0], 5)\n\n\n@pytest.mark.parametrize(\"numeig\", [1, 2, 3, 4])\ndef test_eigsh_lanczos_reorthogonalize(numeig):\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n D = 24\n np.random.seed(10)\n tmp = backend.randn((D, D), dtype=dtype, seed=10)\n H = tmp + backend.transpose(backend.conj(tmp), (1, 0))\n\n def mv(x, mat):\n return mat.mv(x)\n\n eta1, U1 = backend.eigsh_lanczos(\n mv, [H],\n shape=(D,),\n dtype=dtype,\n numeig=numeig,\n num_krylov_vecs=D,\n reorthogonalize=True,\n ndiag=1,\n tol=10**(-12),\n delta=10**(-12))\n eta2, U2 = np.linalg.eigh(H)\n\n np.testing.assert_allclose(eta1[0:numeig], eta2[0:numeig])\n for n in range(numeig):\n v2 = U2[:, n]\n v2 /= np.sum(v2) #fix phases\n v1 = np.reshape(U1[n], (D))\n v1 /= torch.sum(v1)\n\n np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5))\n\n\ndef test_eigsh_lanczos_2():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n D = 16\n tmp = backend.randn((D, D), dtype=dtype)\n H = tmp + backend.transpose(backend.conj(tmp), (1, 0))\n\n def mv(x, mat):\n return mat.mv(x)\n\n eta1, U1 = backend.eigsh_lanczos(\n mv, [H],\n shape=(D,),\n dtype=dtype,\n reorthogonalize=True,\n ndiag=1,\n tol=10**(-12),\n delta=10**(-12))\n eta2, U2 = H.symeig(eigenvectors=True)\n v2 = U2[:, 0]\n v2 = v2 / sum(v2)\n v1 = np.reshape(U1[0], (D))\n v1 = v1 / sum(v1)\n np.testing.assert_allclose(eta1[0], min(eta2))\n np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5))\n\n\ndef test_eigsh_lanczos_raises():\n backend = pytorch_backend.PyTorchBackend()\n with pytest.raises(\n ValueError, match='`num_krylov_vecs` >= `numeig` required!'):\n backend.eigsh_lanczos(lambda x: x, numeig=10, num_krylov_vecs=9)\n with pytest.raises(\n ValueError,\n match=\"Got numeig = 2 > 1 and `reorthogonalize = False`. \"\n \"Use `reorthogonalize=True` for `numeig > 1`\"):\n backend.eigsh_lanczos(lambda x: x, numeig=2, reorthogonalize=False)\n with pytest.raises(\n ValueError,\n match=\"if no `initial_state` is passed, then `shape` and\"\n \"`dtype` have to be provided\"):\n backend.eigsh_lanczos(lambda x: x, shape=(10,), dtype=None)\n with pytest.raises(\n ValueError,\n match=\"if no `initial_state` is passed, then `shape` and\"\n \"`dtype` have to be provided\"):\n backend.eigsh_lanczos(lambda x: x, shape=None, dtype=torch.float64)\n with pytest.raises(\n ValueError,\n match=\"if no `initial_state` is passed, then `shape` and\"\n \"`dtype` have to be provided\"):\n backend.eigsh_lanczos(lambda x: x)\n with pytest.raises(\n TypeError, match=\"Expected a `torch.Tensor`. Got <class 'list'>\"):\n backend.eigsh_lanczos(lambda x: x, initial_state=[1, 2, 3])\n\n\n@pytest.mark.parametrize(\"a, b, expected\", [\n pytest.param(1, 1, 2),\n pytest.param(\n np.ones((1, 2, 3)), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))),\n])\ndef test_addition(a, b, expected):\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.convert_to_tensor(a)\n tensor2 = backend.convert_to_tensor(b)\n result = backend.addition(tensor1, tensor2)\n\n np.testing.assert_allclose(result, expected)\n assert tensor1.dtype == tensor2.dtype == result.dtype\n\n\n@pytest.mark.parametrize(\"a, b, expected\", [\n pytest.param(1, 1, 0),\n pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))),\n])\ndef test_subtraction(a, b, expected):\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.convert_to_tensor(a)\n tensor2 = backend.convert_to_tensor(b)\n result = backend.subtraction(tensor1, tensor2)\n\n np.testing.assert_allclose(result, expected)\n assert tensor1.dtype == tensor2.dtype == result.dtype\n\n\n@pytest.mark.parametrize(\"a, b, expected\", [\n pytest.param(1, 1, 1),\n pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))),\n])\ndef test_multiply(a, b, expected):\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.convert_to_tensor(a)\n tensor2 = backend.convert_to_tensor(b)\n result = backend.multiply(tensor1, tensor2)\n\n np.testing.assert_allclose(result, expected)\n assert tensor1.dtype == tensor2.dtype == result.dtype\n\n\n@pytest.mark.parametrize(\"a, b, expected\", [\n pytest.param(2., 2., 1.),\n pytest.param(\n np.ones(()), 2. * np.ones((1, 2, 3)), 0.5 * np.ones((1, 2, 3))),\n])\ndef test_divide(a, b, expected):\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.convert_to_tensor(a)\n tensor2 = backend.convert_to_tensor(b)\n result = backend.divide(tensor1, tensor2)\n\n np.testing.assert_allclose(result, expected)\n assert tensor1.dtype == tensor2.dtype == result.dtype\n\n\ndef test_eigh():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n H = backend.randn((4, 4), dtype)\n H = H + np.conj(np.transpose(H))\n\n eta, U = backend.eigh(H)\n eta_ac, _ = np.linalg.eigh(H)\n M = U.transpose(1, 0).mm(H).mm(U)\n np.testing.assert_allclose(eta, eta_ac)\n np.testing.assert_almost_equal(np.diag(eta), M)\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_index_update(dtype):\n backend = pytorch_backend.PyTorchBackend()\n tensor = backend.randn((4, 2, 3), dtype=dtype, seed=10)\n out = backend.index_update(tensor, tensor > 0.1, 0.0)\n tensor[tensor > 0.1] = 0.0\n np.testing.assert_allclose(out, tensor)\n\n\ndef test_matrix_inv():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n matrix = backend.randn((4, 4), dtype=dtype, seed=10)\n inverse = backend.inv(matrix)\n m1 = matrix.mm(inverse)\n m2 = inverse.mm(matrix)\n\n np.testing.assert_almost_equal(m1, np.eye(4))\n np.testing.assert_almost_equal(m2, np.eye(4))\n\n\n@pytest.mark.parametrize(\"dtype\", torch_randn_dtypes)\ndef test_matrix_inv_raises(dtype):\n backend = pytorch_backend.PyTorchBackend()\n matrix = backend.randn((4, 4, 4), dtype=dtype, seed=10)\n with pytest.raises(ValueError):\n backend.inv(matrix)\n\n\ndef test_eigs_not_implemented():\n backend = pytorch_backend.PyTorchBackend()\n with pytest.raises(NotImplementedError):\n backend.eigs(np.ones((2, 2)))\n\n\ndef test_broadcast_right_multiplication():\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.randn((2, 4, 3), dtype=torch.float64, seed=10)\n tensor2 = backend.randn((3,), dtype=torch.float64, seed=10)\n out = backend.broadcast_right_multiplication(tensor1, tensor2)\n np.testing.assert_allclose(out, tensor1 * tensor2)\n\n\ndef test_broadcast_right_multiplication_raises():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.randn((2, 4, 3), dtype=dtype, seed=10)\n tensor2 = backend.randn((3, 3), dtype=dtype, seed=10)\n with pytest.raises(ValueError):\n backend.broadcast_right_multiplication(tensor1, tensor2)\n\n\ndef test_broadcast_left_multiplication():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.randn((3,), dtype=dtype, seed=10)\n tensor2 = backend.randn((3, 4, 2), dtype=dtype, seed=10)\n out = backend.broadcast_left_multiplication(tensor1, tensor2)\n np.testing.assert_allclose(out, np.reshape(tensor1, (3, 1, 1)) * tensor2)\n\n\ndef test_broadcast_left_multiplication_raises():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n tensor1 = backend.randn((3, 3), dtype=dtype, seed=10)\n tensor2 = backend.randn((2, 4, 3), dtype=dtype, seed=10)\n with pytest.raises(ValueError):\n backend.broadcast_left_multiplication(tensor1, tensor2)\n\n\ndef test_sparse_shape():\n dtype = torch.float64\n backend = pytorch_backend.PyTorchBackend()\n tensor = backend.randn((2, 3, 4), dtype=dtype, seed=10)\n np.testing.assert_allclose(backend.sparse_shape(tensor), tensor.shape)\n\n\ndef test_sum():\n np.random.seed(10)\n backend = pytorch_backend.PyTorchBackend()\n tensor = np.random.rand(2, 3, 4)\n a = backend.convert_to_tensor(tensor)\n actual = backend.sum(a, axis=(1, 2))\n expected = np.sum(tensor, axis=(1, 2))\n np.testing.assert_allclose(expected, actual)\n\n\ndef test_matmul():\n np.random.seed(10)\n backend = pytorch_backend.PyTorchBackend()\n t1 = np.random.rand(10, 2, 3)\n t2 = np.random.rand(10, 3, 4)\n a = backend.convert_to_tensor(t1)\n b = backend.convert_to_tensor(t2)\n actual = backend.matmul(a, b)\n expected = np.matmul(t1, t2)\n np.testing.assert_allclose(expected, actual)\n" ]
[ [ "numpy.diag", "torch.ge", "torch.zeros", "torch.sum", "torch.le", "torch.allclose", "torch.ones", "numpy.reshape", "numpy.eye", "numpy.matmul", "torch.eye", "numpy.zeros", "torch.tensordot", "torch.empty", "numpy.linalg.eigh", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.transpose", "numpy.array", "numpy.sum", "numpy.random.seed", "torch.manual_seed", "numpy.ones" ] ]
yidazhao/fashioniq2020_retrieval
[ "337cd1d2f603e1e39cc95f886e290b0285011d9f" ]
[ "torch_functions.py" ]
[ "\n# TODO(lujiang): put it into the third-party\n# MIT License\n\n# Copyright (c) 2018 Nam Vo\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n\"\"\"Metric learning functions.\n\nCodes are modified from:\nhttps://github.com/lugiavn/generalization-dml/blob/master/nams.py\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.nn.utils.rnn import pad_packed_sequence\n\n\ndef pairwise_distances(x, y=None):\n \"\"\"\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between\n x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n source:\n https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/2\n \"\"\"\n x_norm = (x**2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y**2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return torch.clamp(dist, 0.0, np.inf)\n\n\nclass MyTripletLossFunc(torch.autograd.Function):\n\n def __init__(self, triplets):\n super(MyTripletLossFunc, self).__init__()\n self.triplets = triplets\n self.triplet_count = len(triplets)\n\n def forward(self, features):\n self.save_for_backward(features)\n\n self.distances = pairwise_distances(features).cpu().numpy()\n\n loss = 0.0\n triplet_count = 0.0\n correct_count = 0.0\n for i, j, k in self.triplets:\n w = 1.0\n triplet_count += w\n loss += w * np.log(1 +\n np.exp(self.distances[i, j] - self.distances[i, k]))\n if self.distances[i, j] < self.distances[i, k]:\n correct_count += 1\n\n loss /= triplet_count\n return torch.FloatTensor((loss,))\n\n def backward(self, grad_output):\n features, = self.saved_tensors\n features_np = features.cpu().numpy()\n grad_features = features.clone() * 0.0\n grad_features_np = grad_features.cpu().numpy()\n\n for i, j, k in self.triplets:\n w = 1.0\n f = 1.0 - 1.0 / (\n 1.0 + np.exp(self.distances[i, j] - self.distances[i, k]))\n grad_features_np[i, :] += w * f * (\n features_np[i, :] - features_np[j, :]) / self.triplet_count\n grad_features_np[j, :] += w * f * (\n features_np[j, :] - features_np[i, :]) / self.triplet_count\n grad_features_np[i, :] += -w * f * (\n features_np[i, :] - features_np[k, :]) / self.triplet_count\n grad_features_np[k, :] += -w * f * (\n features_np[k, :] - features_np[i, :]) / self.triplet_count\n\n for i in range(features_np.shape[0]):\n grad_features[i, :] = torch.from_numpy(grad_features_np[i, :])\n grad_features *= float(grad_output.data[0])\n return grad_features\n\n\nclass TripletLoss(torch.nn.Module):\n \"\"\"Class for the triplet loss.\"\"\"\n def __init__(self, pre_layer=None):\n super(TripletLoss, self).__init__()\n self.pre_layer = pre_layer\n\n def forward(self, x, triplets):\n if self.pre_layer is not None:\n x = self.pre_layer(x)\n loss = MyTripletLossFunc(triplets)(x)\n return loss\n\n\nclass NormalizationLayer(torch.nn.Module):\n \"\"\"Class for normalization layer.\"\"\"\n def __init__(self, normalize_scale=1.0, learn_scale=True):\n super(NormalizationLayer, self).__init__()\n self.norm_s = float(normalize_scale)\n if learn_scale:\n self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,)))\n\n def forward(self, x):\n features = self.norm_s * x / torch.norm(x, dim=1, keepdim=True).expand_as(x)\n return features\n\n\ndef l2norm(inputs, dim=-1):\n # inputs: (batch, dim_ft)\n norm = torch.norm(inputs, p=2, dim=dim, keepdim=True)\n inputs = inputs / norm\n return inputs\n\ndef sequence_mask(lengths, max_len=None):\n ''' Creates a boolean mask from sequence lengths.\n '''\n # lengths: LongTensor, (batch, )\n batch_size = lengths.size(0)\n max_len = max_len or lengths.max()\n return (torch.arange(0, max_len)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))\n\n \ndef rnn_factory(rnn_type, **kwargs):\n # Use pytorch version when available.\n rnn = getattr(nn, rnn_type.upper())(**kwargs)\n return rnn\n\n\ndef calc_rnn_outs_with_sort(rnn, inputs, seq_lens, init_states=None):\n '''\n inputs: FloatTensor, (batch, seq_len, dim_ft)\n seq_lens: LongTensor, (batch,)\n '''\n seq_len = inputs.size(1)\n # sort\n sorted_seq_lens, seq_sort_idx = torch.sort(seq_lens, descending=True)\n _, seq_unsort_idx = torch.sort(seq_sort_idx, descending=False)\n # pack\n inputs = torch.index_select(inputs, 0, seq_sort_idx)\n packed_inputs = pack_padded_sequence(inputs, sorted_seq_lens, batch_first=True)\n if init_states is not None:\n if isinstance(init_states, tuple):\n new_states = []\n for i, state in enumerate(init_states):\n new_states.append(torch.index_select(state, 1, seq_sort_idx))\n init_states = tuple(new_states)\n else:\n init_states = torch.index_select(init_states, 1, seq_sort_idx)\n # rnn\n packed_outs, states = rnn(packed_inputs, init_states)\n # unpack\n outs, _ = pad_packed_sequence(packed_outs, batch_first=True, \n total_length=seq_len, padding_value=0)\n # unsort\n # outs.size = (batch, seq_len, num_directions * hidden_size) \n outs = torch.index_select(outs, 0, seq_unsort_idx) \n if isinstance(states, tuple):\n # states: (num_layers * num_directions, batch, hidden_size)\n new_states = []\n for i, state in enumerate(states):\n new_states.append(torch.index_select(state, 1, seq_unsort_idx))\n states = tuple(new_states)\n else:\n states = torch.index_select(states, 1, seq_unsort_idx)\n\n return outs, states" ]
[ [ "torch.norm", "torch.transpose", "torch.mm", "torch.from_numpy", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.utils.rnn.pad_packed_sequence", "torch.FloatTensor", "torch.sort", "torch.arange", "torch.clamp", "numpy.exp", "torch.index_select" ] ]
dhanani94/object-detection
[ "513dd38aefc19e3fde259b55ad83b12a6033ac54" ]
[ "src/detectors/ssd_detection.py" ]
[ "import cv2\nimport numpy as np\nimport pandas as pd\n\nfrom src.detectors.base_detector import BaseDetector\nfrom src.utils import timeit, read_json\n\n\nclass Detector(BaseDetector):\n \"\"\"Class ssd\"\"\"\n\n @timeit\n def __init__(self):\n super().__init__()\n self.swaprb = True\n self.class_names = read_json('./models/ssd_mobilenet/labels.json')\n self.model = cv2.dnn.readNetFromTensorflow(\n 'models/ssd_mobilenet/frozen_inference_graph.pb',\n 'models/ssd_mobilenet/ssd_mobilenet_v2_coco_2018_03_29.pbtxt')\n self.colors = np.random.uniform(0, 255, size=(100, 3))\n\n @timeit\n def prediction(self, image):\n self.model.setInput(\n cv2.dnn.blobFromImage(image, size=(300, 300), swapRB=self.swaprb))\n output = self.model.forward()\n result = output[0, 0, :, :]\n return result\n\n @timeit\n def filter_prediction(self, output, image, conf_th=0.5, conf_class=None):\n if not conf_class:\n conf_class = []\n height, width = image.shape[:-1]\n df = pd.DataFrame(\n output,\n columns=[\n '_', 'class_id', 'confidence', 'x1', 'y1', 'x2', 'y2'])\n df = df.assign(\n x1=lambda x: (x['x1'] * width).astype(int).clip(0),\n y1=lambda x: (x['y1'] * height).astype(int).clip(0),\n x2=lambda x: (x['x2'] * width).astype(int),\n y2=lambda x: (x['y2'] * height).astype(int),\n class_name=lambda x: (x['class_id'].astype(int).astype(str).replace(self.class_names)),\n confidence=lambda x: (x['confidence'])\n )\n df['label'] = (df['class_name'] + ': ' +\n df['confidence'].astype(str).str.slice(stop=4))\n df = df[df['confidence'] > conf_th]\n if len(conf_class) > 0:\n df = df[df['class_id'].isin(conf_class)]\n return df\n\n @staticmethod\n def get_detection_dict(df):\n output = []\n for idx, box in df.iterrows():\n output.append({\n \"points\": [(box['x1'], box['y1']), (box['x2'], box['y2'])],\n \"label\": box['label']\n })\n return output\n" ]
[ [ "numpy.random.uniform", "pandas.DataFrame" ] ]
rdarie/electropy
[ "4357267806833d5d6749db497454aee3c3fd56b5" ]
[ "electropy/volume.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\n\ndef potential(\n charge_objs,\n x_range=[-10, 10],\n y_range=[-10, 10],\n z_range=[-10, 10],\n h=0.01,\n ):\n \"\"\"Calculate potential in a volume\n\n Args:\n charge_objs: list of Charge objects\n x_range, y_range, z_range: [min, max] distances of volume.\n units: meters\n h: spacing between array elements. units: meters\n\n Return: 3D numpy array\n\n \"\"\"\n x = _arange(x_range[0], x_range[1], h)\n y = _arange(y_range[0], y_range[1], h)\n z = _arange(z_range[0], z_range[1], h)\n\n potential_grid = np.zeros([x.size, y.size, z.size], dtype=float)\n\n for charge in tqdm(charge_objs):\n for (i, j, k), _ in tqdm(np.ndenumerate(potential_grid)):\n potential_grid[i][j][k] += charge.potential([x[i], y[j], z[k]])\n\n return potential_grid\n\n\ndef field(\n charge_objs,\n x_range=[-10, 10],\n y_range=[-10, 10],\n z_range=[-10, 10],\n h=0.01,\n type=\"analytical\",\n component=None,\n ):\n \"\"\"Calculate field in a volume\n\n Args:\n charge_objs: list of Charge objects\n x_range, y_range, z_range: [min, max] distances of volume.\n units: meters\n h: spacing between array elements. units: meters\n type: type of field calculation. 'analytical' (default) or from\n gradient of potential.\n component: 'x', 'y', 'z', or None (default)\n\n Return: 3D numpy array\n\n \"\"\"\n x = _arange(x_range[0], x_range[1], h)\n y = _arange(y_range[0], y_range[1], h)\n z = _arange(z_range[0], z_range[1], h)\n\n if component is None:\n field_grid = np.empty([x.size, y.size, z.size], dtype=object)\n else:\n field_grid = np.zeros([x.size, y.size, z.size], dtype=float)\n\n for charge in tqdm(charge_objs):\n for (i, j, k), _ in tqdm(np.ndenumerate(field_grid)):\n if field_grid[i][j][k] is None:\n if component is None:\n field_grid[i][j][k] = charge.field(\n [x[i], y[j], z[k]], type=type\n )\n else:\n if component is None:\n field_grid[i][j][k] += charge.field(\n [x[i], y[j], z[k]], type=type\n )\n elif component == \"x\":\n field_grid[i][j][k] += charge.field(\n [x[i], y[j], z[k]], type=type\n )[0]\n elif component == \"y\":\n field_grid[i][j][k] += charge.field(\n [x[i], y[j], z[k]], type=type\n )[1]\n elif component == \"z\":\n field_grid[i][j][k] += charge.field(\n [x[i], y[j], z[k]], type=type\n )[2]\n\n return field_grid\n\n\ndef _arange(_min, _max, _step):\n \"\"\" Alternative to numpy's arange that handles well floating point steps\n Also happens to give \"rounder\" decimals than np.arange =)\n \"\"\"\n return np.linspace(_min, _max, 1 + int(np.rint((_max - _min) / _step)))\n\nclass potentialSolution:\n # Initializer / Instance Attributes\n def __init__(\n self, \n charge_objs,\n x_range=[-10, 10],\n y_range=[-10, 10],\n z_range=[-10, 10],\n h=0.01, verbose=0):\n #\n self.h = h\n self.verbose = verbose\n self.xi = _arange(x_range[0], x_range[1], h)\n self.yi = _arange(y_range[0], y_range[1], h)\n self.zi = _arange(z_range[0], z_range[1], h)\n if verbose > 0:\n print('Calculating potential')\n self.phi = potential(\n charge_objs, x_range=x_range,\n y_range=y_range, z_range=z_range, h=h)\n self.phi = np.nan_to_num(self.phi)\n #\n if verbose > 0:\n print('Calculating gradient')\n dPhiDx, dPhiDy, dPhiDz = np.gradient(self.phi)\n self.DPhi = {\n 'dx': dPhiDx,\n 'dy': dPhiDy,\n 'dz': dPhiDz}\n #\n self.D2Phi = {}\n for key, value in self.DPhi.items():\n if verbose > 0:\n print('Calculating hessian row {}'.format(key))\n dVDx, dVDy, dVDz = np.gradient(value)\n self.D2Phi[key] = {\n 'dx': dVDx,\n 'dy': dVDy,\n 'dz': dVDz}\n self.dimLookup = {\n 0: 'dx', 1: 'dy', 2: 'dz'}\n self.hessianPhi = np.zeros([\n self.xi.size, self.yi.size, self.zi.size,\n 3, 3],\n dtype=float)\n dL = self.dimLookup\n for (i, j, k, a, b), _ in np.ndenumerate(self.hessianPhi):\n self.hessianPhi[i][j][k][a][b] = self.D2Phi[dL[a]][dL[b]][i, j, k]\n\n def getActivatingFunction(self, x, y, z, directionVector):\n return np.inner(\n directionVector,\n np.inner(\n self.hessianPhi[x, y, z, :, :],\n directionVector))" ]
[ [ "numpy.inner", "numpy.gradient", "numpy.rint", "numpy.nan_to_num", "numpy.ndenumerate", "numpy.zeros", "numpy.empty" ] ]
tudorcebere/PyTorchXAI
[ "9ea70c1a1ab66323aa21484a8066512c9cd4fc43" ]
[ "examples/tutorial_01_tensorboard_mnist/mnist/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nmodel = Net().to(device)\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.cuda.is_available", "torch.device", "torch.nn.functional.max_pool2d" ] ]
plumdeq/hypothtest
[ "f98ddc5c71873e5a9c63bbb99ba41a5ffcdc66c5" ]
[ "hypotest/stats/generalization_stats.py" ]
[ "# coding: utf8\n\"\"\"\nauthor: Asan Agibetov\n\n Copyright 2015-2017 Asan Agibetov <asan.agibetov@gmail.com>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nScript to print latex tables for the generalization stats over different\nconfigurations of the hypothesis\n\nEssentially, we are producing different *delta* confidences, starting from an\ninitial configuration of the hypothesis, by slightly changing the source and the\ntarget of the hypothesis. And we do so for all the unevidenced nodes.\n\n\"\"\"\nimport pandas as pd\n\nfrom hypotest import utils as hypotest_utils\nfrom hypotest.print_stats import utils as stats_utils\nfrom hypotest.assert_evidence import assert_evidence, unassert_evidence\nfrom hypotest import assert_endpoints\nfrom hypotest.inference import generalization\n\n\ndef generalization_stats(H, endpoints=None, evidenced_nodes=None):\n \"\"\"\n (hypothgraph, (source, target)@opt, [evidenced_node]@opt) ->\n generalization obj\n\n Simulate the generalization of the hypothesis and collect all data in a\n generalization object\n\n The generalization object is\n\n {\n 'hyperthropy': {\n 'confidence': [0.2, ...],\n 'dist_delta': [-1, ...]\n },\n 'cell viability': {\n ...\n }\n }\n\n\n \"\"\"\n # assert new endpoints and evidenced nodes in the copy of the graph and\n # return reference to it\n H = stats_utils.preapre_hypothesis_graph(H, endpoints, evidenced_nodes)\n\n source, target = hypotest_utils.find_causal_endpoints(H)\n unevidenced_nodes = hypotest_utils.find_missing_nodes(H)\n\n generalizations = {}\n # we evidence one node at a time and see how well does it generalize\n for unevidenced_node in unevidenced_nodes:\n generalizations[unevidenced_node] = generalize_wrapper(\n H, unevidenced_node, f=generalization.generalization_data_for_plot)\n\n return generalizations\n\n\ndef generalize_wrapper(H, unevidenced_node, f=generalization.generalize,\n *args, **kwargs):\n \"\"\"\n () -> evidences ``unevidenced_node``; f() ; unevidence back the node\n\n ``f`` is the generalization function\n\n \"\"\"\n assert_evidence(H, unevidenced_node)\n result = f(H, *args, **kwargs)\n unassert_evidence(H, unevidenced_node)\n\n return result\n\n\ndef convert_to_dataframe(H, endpoints=None, evidenced_nodes=None):\n \"\"\"\n (hypothgraph, (source, target)@opt, [evidenced_node]@opt) ->\n generalization obj\n\n Data preparation for a dataframe, such that we have\n\n index confidence dist_delta conf_delta node_id\n 0 0.2 -1 0.5 'hyperthropy'\n 1 0.2 -2 0.5 'hyperthropy'\n ...\n 2 0.2 -1 0.5 'cell viability'\n 3 0.2 -3 0.5 'cell viability'\n\n The stats object is\n\n {\n 'hyperthropy': {\n 'confidences': [0.2, ...],\n 'dist_deltas': [-1, ...]\n },\n 'cell viability': {\n ...\n }\n }\n\n \"\"\"\n generalization_obj = generalization_stats(H, endpoints, evidenced_nodes)\n # we need to duplicate node_id for all other values, we identify the length\n # of other arrays only ones\n series_size = None\n\n # final dataframe\n final_df = None\n\n for node_id, stats_obj in generalization_obj.items():\n if series_size is None:\n series_size = len(stats_obj['confidences'])\n\n stats_obj['unevidenced_node'] = [H.node[node_id]['label']] * \\\n series_size\n\n # first time creating the dataframe\n if final_df is None:\n final_df = pd.DataFrame(stats_obj)\n else:\n df = pd.DataFrame(stats_obj)\n final_df = pd.concat([final_df, df])\n final_df.reset_index()\n\n return final_df\n\n\ndef generalization_different_evidenced_nodes(H, endpoints, evidenced_list,\n evidenced_list_labels):\n \"\"\"\n (hypothgraph, (source, target),\n [[evidenced1, ....], ...], [label_list1, ...) -> stats_obj\n\n Constructs hypothesis configuration, one for each from the\n evidenced_nodes_list, and compares the generalization scores for each of\n them\n\n \"\"\"\n source, target = endpoints\n hypoth_confs = [H.copy() for _ in range(len(evidenced_list))]\n\n stats_evidenced = {}\n\n # evidence all the needed nodes\n for hypoth_conf, evidenced_nodes, label in zip(hypoth_confs,\n evidenced_list,\n evidenced_list_labels):\n assert_endpoints.assert_endpoints(hypoth_conf, source, target)\n for evidenced_node in evidenced_nodes:\n assert_evidence(hypoth_conf, evidenced_node)\n\n stats_evidenced[label] = \\\n generalization.generalization_data_for_plot(hypoth_conf)\n\n return stats_evidenced\n\n\n# CAN BE REFACTOR SO THAT IT REUSES PREVIOUS FUNCTION FOR DATAFRAME CONVERSION\n#\ndef generalization_evidenced_df(H, endpoints, evidenced_list,\n evidenced_list_labels):\n \"\"\"\n Arranges statistics for differenct generalization under different hypothesis\n configurations in a Pandas dataframe\n\n \"\"\"\n evidenced_obj = generalization_different_evidenced_nodes(\n H, endpoints, evidenced_list, evidenced_list_labels)\n\n # we need to duplicate node_id for all other values, we identify the length\n # of other arrays only ones\n series_size = None\n\n # final dataframe\n final_df = None\n\n for label, stats_obj in evidenced_obj.items():\n if series_size is None:\n series_size = len(stats_obj['confidences'])\n\n stats_obj['evidenced configuration'] = [label] * \\\n series_size\n\n # first time creating the dataframe\n if final_df is None:\n final_df = pd.DataFrame(stats_obj)\n else:\n df = pd.DataFrame(stats_obj)\n final_df = pd.concat([final_df, df])\n final_df.reset_index()\n\n return final_df\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
NuTufts/detr
[ "2c09de90cf1dbdb7aea3311c9c04a90a40b038f8" ]
[ "engine.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nTrain and eval functions used in main.py\n\"\"\"\nimport math\nimport os\nimport sys\nfrom typing import Iterable\n\nimport torch\n\nimport util.misc as utils\nfrom datasets.coco_eval import CocoEvaluator\nfrom datasets.panoptic_eval import PanopticEvaluator\n\n\ndef train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, max_norm: float = 0):\n model.train()\n criterion.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n for samples, targets in metric_logger.log_every(data_loader, print_freq, header):\n samples = samples.to(device)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n outputs = model(samples)\n loss_dict = criterion(outputs, targets)\n weight_dict = criterion.weight_dict\n losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n loss_dict_reduced_unscaled = {f'{k}_unscaled': v\n for k, v in loss_dict_reduced.items()}\n loss_dict_reduced_scaled = {k: v * weight_dict[k]\n for k, v in loss_dict_reduced.items() if k in weight_dict}\n losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())\n\n loss_value = losses_reduced_scaled.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n print(loss_dict_reduced)\n sys.exit(1)\n\n optimizer.zero_grad()\n losses.backward()\n if max_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)\n optimizer.step()\n\n metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)\n metric_logger.update(class_error=loss_dict_reduced['class_error'])\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n\n\n@torch.no_grad()\ndef evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):\n model.eval()\n criterion.eval()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))\n header = 'Test:'\n\n iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())\n if base_ds is not None:\n coco_evaluator = CocoEvaluator(base_ds, iou_types)\n # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]\n else:\n coco_evaluator = None\n\n panoptic_evaluator = None\n if 'panoptic' in postprocessors.keys():\n panoptic_evaluator = PanopticEvaluator(\n data_loader.dataset.ann_file,\n data_loader.dataset.ann_folder,\n output_dir=os.path.join(output_dir, \"panoptic_eval\"),\n )\n\n for samples, targets in metric_logger.log_every(data_loader, 10, header):\n samples = samples.to(device)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n outputs = model(samples)\n loss_dict = criterion(outputs, targets)\n weight_dict = criterion.weight_dict\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n loss_dict_reduced_scaled = {k: v * weight_dict[k]\n for k, v in loss_dict_reduced.items() if k in weight_dict}\n loss_dict_reduced_unscaled = {f'{k}_unscaled': v\n for k, v in loss_dict_reduced.items()}\n metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),\n **loss_dict_reduced_scaled,\n **loss_dict_reduced_unscaled)\n metric_logger.update(class_error=loss_dict_reduced['class_error'])\n\n orig_target_sizes = torch.stack([t[\"orig_size\"] for t in targets], dim=0)\n results = postprocessors['bbox'](outputs, orig_target_sizes)\n if 'segm' in postprocessors.keys():\n target_sizes = torch.stack([t[\"size\"] for t in targets], dim=0)\n results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)\n res = {target['image_id'].item(): output for target, output in zip(targets, results)}\n if coco_evaluator is not None:\n coco_evaluator.update(res)\n\n if panoptic_evaluator is not None:\n res_pano = postprocessors[\"panoptic\"](outputs, target_sizes, orig_target_sizes)\n for i, target in enumerate(targets):\n image_id = target[\"image_id\"].item()\n file_name = f\"{image_id:012d}.png\"\n res_pano[i][\"image_id\"] = image_id\n res_pano[i][\"file_name\"] = file_name\n\n panoptic_evaluator.update(res_pano)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n if coco_evaluator is not None:\n coco_evaluator.synchronize_between_processes()\n if panoptic_evaluator is not None:\n panoptic_evaluator.synchronize_between_processes()\n\n # accumulate predictions from all images\n if coco_evaluator is not None:\n coco_evaluator.accumulate()\n coco_evaluator.summarize()\n panoptic_res = None\n if panoptic_evaluator is not None:\n panoptic_res = panoptic_evaluator.summarize()\n stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n if coco_evaluator is not None:\n if 'bbox' in postprocessors.keys():\n stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()\n if 'segm' in postprocessors.keys():\n stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()\n if panoptic_res is not None:\n stats['PQ_all'] = panoptic_res[\"All\"]\n stats['PQ_th'] = panoptic_res[\"Things\"]\n stats['PQ_st'] = panoptic_res[\"Stuff\"]\n return stats, coco_evaluator\n" ]
[ [ "torch.stack", "torch.no_grad" ] ]
pauljxtan/pystuff
[ "5abceaa95f8e39e6ef9ba9e937a745f4dc3996c4" ]
[ "pycompvis/compvis/tests.py" ]
[ "import numpy as np\nimport random\nimport unittest\n\nimport compvis.imgform.transforms as tr\n\nclass TestTransforms(unittest.TestCase):\n\n def test_2d_transforms_random_values(self):\n # Get some random vectors and parameters\n x = np.random.random(2)\n t = np.random.random(2)\n theta = np.pi / np.random.random()\n s = np.random.random()\n a = np.random.random()\n b = np.random.random()\n A = np.random.random((2, 3))\n x_hom = np.random.random(3)\n H_hom = np.random.random((3, 3))\n s_sq = np.random.random(2)\n a_psf = np.random.random(8)\n\n # Error tolerance\n epsilon = 1e-6\n \n # Translation\n expected = np.array((x[0] + t[0], x[1] + t[1]))\n result = tr.translate(x, t)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # 2D Euclidean transform\n expected = np.array((np.cos(theta)*x[0] - np.sin(theta)*x[1] + t[0]*1,\n np.sin(theta)*x[0] + np.cos(theta)*x[1] + t[1]*1))\n result = tr.euclidean_2d(x, theta, t)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # 2D similarity transform\n expected = np.array((s*np.cos(theta)*x[0] - s*np.sin(theta)*x[1]\n + t[0]*1,\n s*np.sin(theta)*x[0] + s*np.cos(theta)*x[1]\n + t[1]*1))\n result = tr.similarity_2d(x, s, theta, t)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # 2D similarity transform (alternate)\n expected = np.array((a*x[0] - b*x[1] + t[0]*1,\n b*x[0] + a*x[1] + t[1]*1))\n result = tr.similarity_2d_alt(x, a, b, t)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # Affine transform\n expected = np.array((A[0,0]*x[0] + A[0,1]*x[1] + A[0,2]*1,\n A[1,0]*x[0] + A[1,1]*x[1] + A[1,2]*1))\n result = tr.affine_2d(x, A)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # Stretch / squash\n expected = np.array((s_sq[0]*x[0] + t[0], s_sq[1]*x[1] + t[1]))\n result = tr.stretch_2d(x, s_sq, t)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # Planar surface flow\n expected = np.array((a_psf[0] + a_psf[1]*x[0] + a_psf[2]*x[1]\n + a_psf[6]*x[0]**2 + a_psf[7]*x[0]*x[1],\n a_psf[3] + a_psf[4]*x[0] + a_psf[5]*x[1]\n + a_psf[7]*x[0]**2 + a_psf[6]*x[0]*x[1]))\n result = tr.planar_surface_flow(x, a_psf)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\n # Bilinear interpolant\n expected = np.array((a_psf[0] + a_psf[1]*x[0] + a_psf[2]*x[1]\n + a_psf[6]*x[0]*x[1],\n a_psf[3] + a_psf[4]*x[0] + a_psf[5]*x[1]\n + a_psf[7]*x[0]*x[1]))\n result = tr.bilinear_interpolant(x, a_psf)\n self.assertTrue((np.abs(expected - result) < epsilon).all())\n\nif __name__ == '__main__':\n unittest.main()\n\n" ]
[ [ "numpy.random.random", "numpy.abs", "numpy.cos", "numpy.sin", "numpy.array" ] ]
ioannis-krmp/Vitis-Tutorials
[ "c1b42f316ebe195b5f467dd3f949c003ba919647", "c1b42f316ebe195b5f467dd3f949c003ba919647" ]
[ "Machine_Learning/Design_Tutorials/04-Keras_GoogleNet_ResNet/files/code/cifar10_eval_graph.py", "Machine_Learning/Feature_Tutorials/tf2_quant_fine_tune/files/make_target.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n## © Copyright (C) 2016-2020 Xilinx, Inc\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n## not use this file except in compliance with the License. A copy of the\n## License is located at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n## License for the specific language governing permissions and limitations\n## under the License.\n'''\n# Author: Daniele Bagni, Xilinx Inc\n# date 6 May 2021\n\n##################################################################\n# Evaluation of frozen/quantized graph\n#################################################################\n\nimport os\nimport sys\nimport glob\nimport argparse\nimport shutil\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport gc # memory garbage collector #DB\n\nimport tensorflow.contrib.decent_q\n\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.keras.preprocessing.image import img_to_array\n\nfrom config import cifar10_config as cfg #DB\n\n\n#DB\nDATAS_DIR = cfg.DATASET_DIR\nTEST_DIR = os.path.join(DATAS_DIR, \"test\")\nprint(\"\\n eval_graph.py runs from \", DATAS_DIR)\n\n\ndef graph_eval(input_graph_def, input_node, output_node):\n\n #Reading image paths\n test_img_paths = [img_path for img_path in glob.glob(TEST_DIR+\"/*/*.png\")]\n NUMEL = len(test_img_paths)\n assert (NUMEL > 0 )\n\n y_test= np.zeros((NUMEL,1), dtype=\"uint8\")\n x_test= np.zeros((NUMEL,cfg.IMAGE_HEIGHT,cfg.IMAGE_WIDTH,3),dtype=\"uint8\")\n\n i = 0\n for img_path in test_img_paths:\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n ##swap channels: from BGR to RGB\n #B, G, R = cv2.split(img)\n #img = cv2.merge([R, G, B])\n\n img_array = img_to_array(img, data_format=None)\n\n filename = os.path.basename(img_path)\n class_name = filename.split(\"_\")[0]\n label = cfg.labelNames_dict[class_name]\n\n #print(\"filename: \", img_path)\n #print(\"classname: \", class_name)\n\n x_test[i] = img_array\n y_test[i] = int(label)\n i = i + 1\n\n x_test = cfg.Normalize(x_test)\n #print(x_test[0])\n\n #collect garbage to save memory #DB\n #del img\n #del test_img_paths\n #del img_path\n #gc.collect()\n\n x_test = np.reshape(x_test, [-1, cfg.IMAGE_HEIGHT,cfg.IMAGE_WIDTH, 3])\n y_test = tf.keras.utils.to_categorical(y_test, num_classes=cfg.NUM_CLASSES)\n\n tf.compat.v1.import_graph_def(input_graph_def,name = '')\n\n # Get input placeholders & tensors\n images_in = tf.compat.v1.get_default_graph().get_tensor_by_name(input_node+':0')\n labels = tf.compat.v1.placeholder(tf.int32,shape = [None,cfg.NUM_CLASSES])\n\n # get output tensors\n logits = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node+':0')\n\n # top 5 and top 1 accuracy\n in_top5 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=5)\n in_top1 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=1)\n top5_acc = tf.reduce_mean(tf.cast(in_top5, tf.float32))\n top1_acc = tf.reduce_mean(tf.cast(in_top1, tf.float32))\n\n # Create the Computational graph\n with tf.compat.v1.Session() as sess:\n\n sess.run(tf.compat.v1.initializers.global_variables())\n\n feed_dict={images_in: x_test, labels: y_test}\n t5_acc,t1_acc = sess.run([top5_acc,top1_acc], feed_dict)\n #print(dir(x_test))\n #print(max(x_test[0]))\n #print(min(x_test[0]))\n print (' Top 1 accuracy with validation set: {:1.4f}'.format(t1_acc))\n print (' Top 5 accuracy with validation set: {:1.4f}'.format(t5_acc))\n\n print ('FINISHED!')\n return\n\n\ndef main(unused_argv):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu\n input_graph_def = tf.Graph().as_graph_def()\n input_graph_def.ParseFromString(tf.io.gfile.GFile(FLAGS.graph, \"rb\").read())\n graph_eval(input_graph_def, FLAGS.input_node, FLAGS.output_node)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--graph', type=str,\n default='./freeze/frozen_graph.pb',\n help='graph file (.pb) to be evaluated.')\n parser.add_argument('--input_node', type=str,\n default='images_in',\n help='input node.')\n parser.add_argument('--output_node', type=str,\n default='dense_1/BiasAdd',\n help='output node.')\n parser.add_argument('--class_num', type=int,\n default=cfg.NUM_CLASSES,\n help='number of classes.')\n parser.add_argument('--gpu', type=str,\n default='0',\n help='gpu device id.')\n FLAGS, unparsed = parser.parse_known_args()\n tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "'''\n Copyright 2021 Xilinx Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\n'''\nMake the target folder\nCreates images, copies application code and compiled xmodel to 'target'\n'''\n\n'''\nAuthor: Mark Harvey\n'''\n\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport cv2\nfrom tqdm import tqdm\n\n# Silence TensorFlow messages\nos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n\nimport tensorflow as tf\n\nfrom dataset_utils import parser, resize_centercrop\n\nDIVIDER = '-----------------------------------------'\n\n\n\ndef input_fn(tfrec_dir, batchsize):\n '''\n Dataset pipeline\n '''\n tfrecord_files = tf.data.Dataset.list_files('{}/*.tfrecord'.format(tfrec_dir), shuffle=False)\n dataset = tf.data.TFRecordDataset(tfrecord_files)\n dataset = dataset.map(parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.map(lambda x,y: resize_centercrop(x,y), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batchsize, drop_remainder=False)\n return dataset\n\n\n\ndef make_target(target_dir,tfrec_dir,num_images,app_dir,model):\n\n # remove any previous data\n shutil.rmtree(target_dir, ignore_errors=True) \n os.makedirs(target_dir)\n os.makedirs(target_dir+'/images')\n\n # make the dataset\n target_dataset = input_fn(tfrec_dir,1)\n\n '''\n # extract images & labels from TFRecords\n # save as JPEG image files\n # the label will be built into the JPEG filename\n '''\n i = 0\n for tfr in tqdm(target_dataset):\n\n label = tfr[1][0].numpy()\n\n # reshape image to remove batch dimension\n img = tf.reshape(tfr[0], [tfr[0].shape[1],tfr[0].shape[2],tfr[0].shape[3]] )\n\n # JPEG encode\n img = tf.cast(img, tf.uint8)\n img = tf.io.encode_jpeg(img)\n\n # save as file\n filepath = os.path.join(target_dir, 'images', str(label)+'_image'+str(i)+'.jpg' )\n tf.io.write_file(filepath, img)\n\n i += 1\n\n\n # copy application code\n print('Copying application code from',app_dir,'...')\n shutil.copy(os.path.join(app_dir, 'app_mt.py'), target_dir)\n\n # copy compiled model\n print('Copying compiled model from',model,'...')\n shutil.copy(model, target_dir)\n\n\n return\n\n\n\ndef main():\n\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument('-td','--target_dir', type=str, default='target', help='Full path of target folder. Default is target')\n ap.add_argument('-t', '--tfrec_dir', type=str, default='tfrec_val', help='Path to folder for reading TFRecord files. Default is tfrec_val') \n ap.add_argument('-n', '--num_images', type=int, default=1000, help='Number of test images. Default is 1000')\n ap.add_argument('-a', '--app_dir', type=str, default='application', help='Full path of application code folder. Default is application')\n ap.add_argument('-m', '--model', type=str, default='compiled_model/mobilenet.xmodel', help='Full path of compiled model.Default is compiled_model/mobilenet.xmodel')\n args = ap.parse_args() \n\n print('\\n------------------------------------')\n print(sys.version)\n print('------------------------------------')\n print ('Command line options:')\n print (' --target_dir : ', args.target_dir)\n print (' --tfrec_dir : ', args.tfrec_dir)\n print (' --num_images : ', args.num_images)\n print (' --app_dir : ', args.app_dir)\n print (' --model : ', args.model)\n print('------------------------------------\\n')\n\n\n make_target(args.target_dir,args.tfrec_dir,args.num_images,args.app_dir,args.model)\n\n\nif __name__ == \"__main__\":\n main()\n \n" ]
[ [ "tensorflow.compat.v1.get_default_graph", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.Graph", "tensorflow.compat.v1.import_graph_def", "numpy.reshape", "tensorflow.io.gfile.GFile", "tensorflow.cast", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.argmax", "tensorflow.compat.v1.initializers.global_variables", "numpy.zeros", "tensorflow.keras.utils.to_categorical", "tensorflow.compat.v1.app.run" ], [ "tensorflow.data.TFRecordDataset", "tensorflow.cast", "tensorflow.reshape", "tensorflow.io.encode_jpeg", "tensorflow.io.write_file" ] ]
thmegy/mmdetection
[ "9d6420b9d2dd339d0952d2e1d8ec2dd22173226c" ]
[ "tools/train.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport copy\nimport os\nimport os.path as osp\nimport time\nimport warnings\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv import Config, DictAction\nfrom mmcv.runner import get_dist_info, init_dist\nfrom mmcv.utils import get_git_hash\n\nfrom mmdet import __version__\nfrom mmdet.apis import init_random_seed, set_random_seed, train_detector\nfrom mmdet.datasets import build_dataset\nfrom mmdet.models import build_detector\nfrom mmdet.utils import (collect_env, get_device, get_root_logger,\n setup_multi_processes, update_data_root)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--work-dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume-from', help='the checkpoint file to resume from')\n parser.add_argument(\n '--auto-resume',\n action='store_true',\n help='resume from the latest checkpoint automatically')\n parser.add_argument(\n '--no-validate',\n action='store_true',\n help='whether not to evaluate the checkpoint during training')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n type=int,\n help='(Deprecated, please use --gpu-id) number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='(Deprecated, please use --gpu-id) ids of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-id',\n type=int,\n default=0,\n help='id of gpu to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument(\n '--diff-seed',\n action='store_true',\n help='Whether or not set different seeds for different ranks')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file (deprecate), '\n 'change to --cfg-options instead.')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. If the value to '\n 'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n 'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n 'Note that the quotation marks are necessary and that no white space '\n 'is allowed.')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--incremental-learning', action='store_true', help='Use sampler for incremental learning')\n parser.add_argument(\n '--auto-scale-lr',\n action='store_true',\n help='enable automatically scaling LR.')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n if args.options and args.cfg_options:\n raise ValueError(\n '--options and --cfg-options cannot be both '\n 'specified, --options is deprecated in favor of --cfg-options')\n if args.options:\n warnings.warn('--options is deprecated in favor of --cfg-options')\n args.cfg_options = args.options\n\n return args\n\n\ndef main():\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n\n # update data root according to MMDET_DATASETS\n update_data_root(cfg)\n\n if args.cfg_options is not None:\n cfg.merge_from_dict(args.cfg_options)\n\n if args.auto_scale_lr:\n if 'auto_scale_lr' in cfg and \\\n 'enable' in cfg.auto_scale_lr and \\\n 'base_batch_size' in cfg.auto_scale_lr:\n cfg.auto_scale_lr.enable = True\n else:\n warnings.warn('Can not find \"auto_scale_lr\" or '\n '\"auto_scale_lr.enable\" or '\n '\"auto_scale_lr.base_batch_size\" in your'\n ' configuration file. Please update all the '\n 'configuration files to mmdet >= 2.23.1.')\n\n # set multi-process settings\n setup_multi_processes(cfg)\n\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n\n # work_dir is determined in this priority: CLI > segment in file > filename\n if args.work_dir is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.work_dir = args.work_dir\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.work_dir = osp.join('./work_dirs',\n osp.splitext(osp.basename(args.config))[0])\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n cfg.auto_resume = args.auto_resume\n if args.gpus is not None:\n cfg.gpu_ids = range(1)\n warnings.warn('`--gpus` is deprecated because we only support '\n 'single GPU mode in non-distributed training. '\n 'Use `gpus=1` now.')\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids[0:1]\n warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '\n 'Because we only support single GPU mode in '\n 'non-distributed training. Use the first GPU '\n 'in `gpu_ids` now.')\n if args.gpus is None and args.gpu_ids is None:\n cfg.gpu_ids = [args.gpu_id]\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n # re-set gpu_ids with distributed training mode\n _, world_size = get_dist_info()\n cfg.gpu_ids = range(world_size)\n\n # create work_dir\n mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n # dump config\n cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))\n # init the logger before other steps\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(cfg.work_dir, f'{timestamp}.log')\n logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)\n\n # init the meta dict to record some important information such as\n # environment info and seed, which will be logged\n meta = dict()\n # log env info\n env_info_dict = collect_env()\n env_info = '\\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])\n dash_line = '-' * 60 + '\\n'\n logger.info('Environment info:\\n' + dash_line + env_info + '\\n' +\n dash_line)\n meta['env_info'] = env_info\n meta['config'] = cfg.pretty_text\n # log some basic info\n logger.info(f'Distributed training: {distributed}')\n logger.info(f'Config:\\n{cfg.pretty_text}')\n\n cfg.device = get_device()\n # set random seeds\n seed = init_random_seed(args.seed, device=cfg.device)\n seed = seed + dist.get_rank() if args.diff_seed else seed\n logger.info(f'Set random seed to {seed}, '\n f'deterministic: {args.deterministic}')\n set_random_seed(seed, deterministic=args.deterministic)\n cfg.seed = seed\n meta['seed'] = seed\n meta['exp_name'] = osp.basename(args.config)\n\n model = build_detector(\n cfg.model,\n train_cfg=cfg.get('train_cfg'),\n test_cfg=cfg.get('test_cfg'))\n model.init_weights()\n\n datasets = [build_dataset(cfg.data.train)]\n if len(cfg.workflow) == 2:\n val_dataset = copy.deepcopy(cfg.data.val)\n val_dataset.pipeline = cfg.data.train.pipeline\n datasets.append(build_dataset(val_dataset))\n if cfg.checkpoint_config is not None:\n # save mmdet version, config file content and class names in\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n mmdet_version=__version__ + get_git_hash()[:7],\n CLASSES=datasets[0].CLASSES)\n # add an attribute for visualization convenience\n model.CLASSES = datasets[0].CLASSES\n train_detector(\n model,\n datasets,\n cfg,\n distributed=distributed,\n validate=(not args.no_validate),\n timestamp=timestamp,\n meta=meta,\n incremental_learning=args.incremental_learning)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.distributed.get_rank" ] ]
IdeasLabUT/EDA-Artifact-Detection
[ "7c4bd467bec9099669ddf3581b00a9a843a2f136" ]
[ "MLP_inSample_AWW.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 23 12:03:59 2017\n\n@author: Kevin\n\"\"\"\n\nimport numpy as np\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import LeaveOneGroupOut,GridSearchCV\n\ndataPath = 'AlanWalksWales/'\n# Comment out one of the two data names to change portion of data to use\ndataName = 'AWW_rest'\n#dataName = 'AWW_walk'\nnJobs = 12 # Number of cores to use\n\n# Load feature matrices, labels, and groups (denoting which labeled time\n# segment each row of the feature matrix comes from)\nfeaturesAll = np.loadtxt(dataPath+dataName+'_all.csv',delimiter=',')\nfeaturesAcc = np.loadtxt(dataPath+dataName+'_acc.csv',delimiter=',')\nfeaturesEda = np.loadtxt(dataPath+dataName+'_eda.csv',delimiter=',')\nlabels = np.loadtxt(dataPath+dataName+'_label.csv')\ngroups = np.loadtxt(dataPath+dataName+'_groups.csv')\n\n# Leave-one-group-out cross-validation\ncv = LeaveOneGroupOut()\n\n# Parameter tuning by grid search\nsolver='lbfgs'\nactivation='relu'\nregParam = 10.0**np.arange(-3,5)\n\n# Comment out one of the choices below (either 1 or 2 hidden layers)\n\n# 1 hidden layer\nhiddenLayerSizes = 2**np.arange(0,8)\n\"\"\"\n# 2 hidden layers\nhidden1,hidden2 = np.meshgrid(2**np.arange(0,8),2**np.arange(0,8))\nhiddenLayerSizes = np.reshape(np.stack([hidden1,hidden2]),\n (2,np.size(hidden1))).T.tolist()\n\"\"\"\nparameters = {'alpha': regParam,\n 'hidden_layer_sizes': hiddenLayerSizes}\n \ngsAll = GridSearchCV(MLPClassifier(solver=solver,activation=activation),\n parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,\n verbose=1)\ngsAll.fit(featuresAll,labels,groups)\nbestAlphaAll = gsAll.best_params_['alpha']\nbestHiddenSizesAll = gsAll.best_params_['hidden_layer_sizes']\n\ngsAcc = GridSearchCV(MLPClassifier(solver=solver,activation=activation),\n parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,\n verbose=1)\ngsAcc.fit(featuresAcc,labels,groups)\nbestAlphaAcc = gsAcc.best_params_['alpha']\nbestHiddenSizesAcc = gsAcc.best_params_['hidden_layer_sizes']\n\ngsEda = GridSearchCV(MLPClassifier(solver=solver,activation=activation),\n parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,\n verbose=1)\ngsEda.fit(featuresEda,labels,groups)\nbestAlphaEda = gsEda.best_params_['alpha']\nbestHiddenSizesEda = gsEda.best_params_['hidden_layer_sizes']\n\npredAll = np.zeros(np.shape(labels))\npredAcc = np.zeros(np.shape(labels))\npredEda = np.zeros(np.shape(labels))\n\nfor train, test in cv.split(featuresAll,labels,groups):\n mlpAll = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAll,\n solver=solver,alpha=bestAlphaAll)\n mlpAll.fit(featuresAll[train,:],labels[train])\n predAll[test] = mlpAll.predict_proba(featuresAll[test,:])[:,1]\n \n mlpAcc = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAcc,\n solver=solver,alpha=bestAlphaAcc)\n mlpAcc.fit(featuresAcc[train,:],labels[train])\n predAcc[test] = mlpAcc.predict_proba(featuresAcc[test,:])[:,1]\n\n mlpEda = MLPClassifier(hidden_layer_sizes=bestHiddenSizesEda,\n solver=solver,alpha=bestAlphaEda)\n mlpEda.fit(featuresEda[train,:],labels[train])\n predEda[test] = mlpEda.predict_proba(featuresEda[test,:])[:,1]\n\n# Save the scores for further analysis\n#np.save('MLPpredAllScores_rest',predAll)\n#np.save('MLPpredAccScores_rest',predAcc)\n#np.save('MLPpredEdaScores_rest',predEda)\n\nprint('MLP AUC ALL: %f (%s)' % (roc_auc_score(labels,predAll),gsAll.best_params_))\nprint('MLP AUC ACC: %f (%s)' % (roc_auc_score(labels,predAcc),gsAcc.best_params_))\nprint('MLP AUC EDA: %f (%s)' % (roc_auc_score(labels,predEda),gsEda.best_params_))\n" ]
[ [ "sklearn.neural_network.MLPClassifier", "sklearn.metrics.roc_auc_score", "numpy.arange", "numpy.shape", "sklearn.model_selection.LeaveOneGroupOut", "numpy.loadtxt" ] ]
aditya-xq/Text-Emotion-Detection-Using-NLP
[ "00b1143e7921f59c93ddc945b53f11b70461a33d" ]
[ "main.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom textblob import Word\nimport re\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\n\ndata = pd.read_csv('text_emotion.csv')\n\ndata = data.drop('author', axis=1)\n\n# Dropping rows with other emotion labels\ndata = data.drop(data[data.sentiment == 'anger'].index)\ndata = data.drop(data[data.sentiment == 'boredom'].index)\ndata = data.drop(data[data.sentiment == 'enthusiasm'].index)\ndata = data.drop(data[data.sentiment == 'empty'].index)\ndata = data.drop(data[data.sentiment == 'fun'].index)\ndata = data.drop(data[data.sentiment == 'relief'].index)\ndata = data.drop(data[data.sentiment == 'surprise'].index)\ndata = data.drop(data[data.sentiment == 'love'].index)\ndata = data.drop(data[data.sentiment == 'hate'].index)\ndata = data.drop(data[data.sentiment == 'neutral'].index)\ndata = data.drop(data[data.sentiment == 'worry'].index)\n\n# Making all letters lowercase\ndata['content'] = data['content'].apply(lambda x: \" \".join(x.lower() for x in x.split()))\n\n# Removing Punctuation, Symbols\ndata['content'] = data['content'].str.replace('[^\\w\\s]',' ')\n\n# Removing Stop Words using NLTK\nstop = stopwords.words('english')\ndata['content'] = data['content'].apply(lambda x: \" \".join(x for x in x.split() if x not in stop))\n\n#Lemmatisation\ndata['content'] = data['content'].apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()]))\n#Correcting Letter Repetitions\n\ndef de_repeat(text):\n pattern = re.compile(r\"(.)\\1{2,}\")\n return pattern.sub(r\"\\1\\1\", text)\n\ndata['content'] = data['content'].apply(lambda x: \" \".join(de_repeat(x) for x in x.split()))\n\n# Code to find the top 10,000 rarest words appearing in the data\nfreq = pd.Series(' '.join(data['content']).split()).value_counts()[-10000:]\n\n# Removing all those rarely appearing words from the data\nfreq = list(freq.index)\ndata['content'] = data['content'].apply(lambda x: \" \".join(x for x in x.split() if x not in freq))\n\n#Encoding output labels 'sadness' as '1' & 'happiness' as '0'\nlbl_enc = preprocessing.LabelEncoder()\ny = lbl_enc.fit_transform(data.sentiment.values)\n\n# Splitting into training and testing data in 90:10 ratio\nX_train, X_val, y_train, y_val = train_test_split(data.content.values, y, stratify=y, random_state=42, test_size=0.1, shuffle=True)\n\n# Extracting TF-IDF parameters\ntfidf = TfidfVectorizer(max_features=1000, analyzer='word',ngram_range=(1,3))\nX_train_tfidf = tfidf.fit_transform(X_train)\nX_val_tfidf = tfidf.fit_transform(X_val)\n\n# Extracting Count Vectors Parameters\ncount_vect = CountVectorizer(analyzer='word')\ncount_vect.fit(data['content'])\nX_train_count = count_vect.transform(X_train)\nX_val_count = count_vect.transform(X_val)\n\n# Model 1: Multinomial Naive Bayes Classifier\nnb = MultinomialNB()\nnb.fit(X_train_tfidf, y_train)\ny_pred = nb.predict(X_val_tfidf)\nprint('naive bayes tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes tfidf accuracy 0.5289017341040463\n\n# Model 2: Linear SVM\nlsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)\nlsvm.fit(X_train_tfidf, y_train)\ny_pred = lsvm.predict(X_val_tfidf)\nprint('svm using tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# svm tfidf accuracy 0.5404624277456648\n\n# Model 3: logistic regression\nlogreg = LogisticRegression(C=1)\nlogreg.fit(X_train_tfidf, y_train)\ny_pred = logreg.predict(X_val_tfidf)\nprint('log reg tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg tfidf accuracy 0.5443159922928709\n\n# Model 4: Random Forest Classifier\nrf = RandomForestClassifier(n_estimators=500)\nrf.fit(X_train_tfidf, y_train)\ny_pred = rf.predict(X_val_tfidf)\nprint('random forest tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# random forest tfidf accuracy 0.5385356454720617\n\n## Building models using count vectors feature\n# Model 1: Multinomial Naive Bayes Classifier\nnb = MultinomialNB()\nnb.fit(X_train_count, y_train)\ny_pred = nb.predict(X_val_count)\nprint('naive bayes count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes count vectors accuracy 0.7764932562620424\n\n# Model 2: Linear SVM\nlsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)\nlsvm.fit(X_train_count, y_train)\ny_pred = lsvm.predict(X_val_count)\nprint('lsvm using count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# lsvm using count vectors accuracy 0.7928709055876686\n\n# Model 3: Logistic Regression\nlogreg = LogisticRegression(C=1)\nlogreg.fit(X_train_count, y_train)\ny_pred = logreg.predict(X_val_count)\nprint('log reg count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg count vectors accuracy 0.7851637764932563\n\n# Model 4: Random Forest Classifier\nrf = RandomForestClassifier(n_estimators=500)\nrf.fit(X_train_count, y_train)\ny_pred = rf.predict(X_val_count)\nprint('random forest with count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# random forest with count vectors accuracy 0.7524084778420038\n\n#Below are 8 random statements. The first 4 depict happiness. The last 4 depict sadness\n\ntweets = pd.DataFrame(['I am very happy today! The atmosphere looks cheerful',\n'Things are looking great. It was such a good day',\n'Success is right around the corner. Lets celebrate this victory',\n'Everything is more beautiful when you experience them with a smile!',\n'Now this is my worst, okay? But I am gonna get better.',\n'I am tired, boss. Tired of being on the road, lonely as a sparrow in the rain. I am tired of all the pain I feel',\n'This is quite depressing. I am filled with sorrow',\n'His death broke my heart. It was a sad day'])\n\n# Doing some preprocessing on these tweets as done before\ntweets[0] = tweets[0].str.replace('[^\\w\\s]',' ')\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\ntweets[0] = tweets[0].apply(lambda x: \" \".join(x for x in x.split() if x not in stop))\nfrom textblob import Word\ntweets[0] = tweets[0].apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()]))\n\n# Extracting Count Vectors feature from our tweets\ntweet_count = count_vect.transform(tweets[0])\n\n#Predicting the emotion of the tweet using our already trained linear SVM\ntweet_pred = lsvm.predict(tweet_count)\nprint(tweet_pred)\n## result\n## [0 0 0 0 1 1 1 1]\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.naive_bayes.MultinomialNB", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.preprocessing.LabelEncoder", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.linear_model.SGDClassifier", "sklearn.metrics.accuracy_score" ] ]
SBU-BMI/seer_distro_test
[ "2f41510a52d56587c12e26ec3a31b05b80a2fc3b" ]
[ "myscript.py" ]
[ "# Compute patch-level nuclear feature results.\n# Tumor-region only.\n# Change coll_name on line 830 to write to quip_comp.[your_collection_name]\nimport argparse\nimport json\nimport os\nimport subprocess\nimport sys\nimport time\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport openslide\nimport pandas\n# from planar import BoundingBox, Vec2\nfrom pymongo import MongoClient, errors\nfrom shapely.geometry import Polygon, Point, MultiPoint\nfrom skimage.color import separate_stains, hed_from_rgb\n\n\ndef assure_path_exists(path):\n \"\"\"\n If path exists, great.\n If not, then create it.\n :param path:\n :return:\n \"\"\"\n m_dir = os.path.dirname(path)\n if not os.path.exists(m_dir):\n os.makedirs(m_dir)\n\n\ndef mongodb_connect(client_uri):\n \"\"\"\n Connection routine\n :param client_uri:\n :return:\n \"\"\"\n try:\n return MongoClient(client_uri, serverSelectionTimeoutMS=1)\n except errors.ConnectionFailure:\n print(\"Failed to connect to server {}\".format(client_uri))\n exit(1)\n\n\ndef get_file_list(substr, filepath):\n \"\"\"\n Find lines in data file containing (case_id) substring.\n Return list.\n :param substr:\n :param filepath:\n :return:\n \"\"\"\n lines = []\n with open(filepath) as f:\n for line in f:\n line = line.strip()\n if substr in line:\n lines.append(line)\n f.close()\n return lines\n\n\ndef copy_src_data(dest):\n \"\"\"\n Copy data from nfs location to computation node.\n :param dest:\n :return:\n \"\"\"\n # Get list of csv files containing features for this case_id\n for csv_dir1 in DATA_FILE_SUBFOLDERS:\n source_dir = os.path.join(DATA_FILE_FOLDER, csv_dir1)\n # copy all *.json and *features.csv files\n m_args = list([\"rsync\", \"-ar\", \"--include\", \"*features.csv\", \"--include\", \"*.json\"])\n # m_args = list([\"rsync\", \"-avz\", \"--include\", \"*features.csv\", \"--include\", \"*.json\"])\n m_args.append(source_dir)\n m_args.append(dest)\n print(\"executing \" + ' '.join(m_args))\n subprocess.call(m_args)\n\n # Get slide\n my_file = Path(os.path.join(dest, (CASE_ID + '.svs')))\n if not my_file.is_file():\n svs_list = get_file_list(CASE_ID, 'config/image_path.list')\n svs_path = os.path.join(SVS_IMAGE_FOLDER, svs_list[0])\n print(\"executing scp\", svs_path, dest)\n subprocess.check_call(['scp', svs_path, dest])\n\n\ndef get_tumor_markup(user_name):\n \"\"\"\n Find what the pathologist circled as tumor.\n :param user_name:\n :return:\n \"\"\"\n tumor_markup_list = []\n execution_id = (user_name + \"_Tumor_Region\")\n try:\n client = mongodb_connect('mongodb://' + DB_HOST + ':27017')\n client.server_info() # force connection, trigger error to be caught\n db = client.quip\n coll = db.objects\n filter_q = {\n 'provenance.image.case_id': CASE_ID,\n 'provenance.analysis.execution_id': execution_id\n }\n projection_q = {\n 'geometry.coordinates': 1,\n '_id': 0\n }\n print('quip.objects')\n print(filter_q, ',', projection_q)\n cursor = coll.find(filter_q, projection_q)\n for item in cursor:\n # geometry.coordinates happens to be a list with one thing in it: a list! (of point coordinates).\n temp = item['geometry']['coordinates'] # [ [ [ x, y ], ... ] ]\n points = temp[0] # [ [x, y ], ... ]\n tumor_markup_list.append(points)\n client.close()\n except errors.ServerSelectionTimeoutError as err:\n print('Error in get_tumor_markup', err)\n exit(1)\n\n count = len(tumor_markup_list)\n if count == 0:\n print('No tumor markups were generated by ', user_name)\n exit(1)\n\n print('Tumor markup count: ', count)\n return tumor_markup_list\n\n\ndef markup_to_polygons(markup_list):\n \"\"\"\n Clean up and convert to something we can use.\n :param markup_list:\n :return:\n \"\"\"\n m_poly_list = []\n try:\n # roll through our list of lists\n for coordinates in markup_list:\n points_list = []\n # convert the point coordinates to Points\n for m_point in coordinates:\n m_point = Point(m_point[0], m_point[1])\n # print('m_point', m_point) # normalized\n points_list.append(m_point)\n # create a Polygon\n m = MultiPoint(points_list)\n m_polygon = Polygon(m)\n # append to return-list\n m_poly_list.append(m_polygon)\n except Exception as ex:\n print('Error in convert_to_polygons', ex)\n exit(1)\n\n # Return list of polygons\n return m_poly_list\n\n\ndef string_to_polygon(poly_data, imw, imh, normalize):\n \"\"\"\n Convert Polygon string to polygon\n :param poly_data:\n :param imw:\n :param imh:\n :param normalize:\n :return:\n \"\"\"\n points_list = []\n\n tmp_str = str(poly_data)\n tmp_str = tmp_str.replace('[', '')\n tmp_str = tmp_str.replace(']', '')\n split_str = tmp_str.split(':')\n m_polygon = {}\n\n try:\n # Get list of points\n for i in range(0, len(split_str) - 1, 2):\n a = float(split_str[i])\n b = float(split_str[i + 1])\n if normalize:\n # Normalize points\n point = [a / float(imw), b / float(imh)]\n else:\n point = [a, b]\n m_point = Point(point)\n points_list.append(m_point)\n # Create a Polygon\n m = MultiPoint(points_list)\n m_polygon = Polygon(m)\n except Exception as ex:\n print('Error in string_to_polygon', ex)\n exit(1)\n\n return m_polygon\n\n\ndef get_data_files():\n \"\"\"\n Return 2 lists containing full paths for CSVs and JSONs.\n :return:\n \"\"\"\n filenames = os.listdir(SLIDE_DIR) # get all files' and folders' names in directory\n\n folders = []\n for filename in filenames: # loop through all the files and folders\n ppath = os.path.join(os.path.abspath(SLIDE_DIR), filename)\n if os.path.isdir(ppath): # check whether the current object is a folder or not\n folders.append(ppath)\n\n folders.sort()\n # print('subfolders: ', len(folders))\n\n json_files = []\n csv_files = []\n for index, filename in enumerate(folders):\n # print(index, filename)\n files = os.listdir(filename)\n for name in files:\n ppath = os.path.join(os.path.abspath(filename), name)\n if name.endswith('json'):\n json_files.append(ppath)\n elif name.endswith('csv'):\n csv_files.append(ppath)\n\n # print('json_files: ', len(json_files))\n # print('csv_files: ', len(csv_files))\n\n json_files.sort()\n csv_files.sort()\n return json_files, csv_files\n\n\ndef get_poly_within(jfiles, tumor_list):\n \"\"\"\n Identify only the files within the tumor regions\n :param jfiles:\n :param tumor_list:\n :return:\n \"\"\"\n # print('files len: ', len(jfiles))\n # print('tumor_list len: ', len(tumor_list))\n temp = {}\n path_poly = {}\n # rtn_jfiles = []\n rtn_obj = {}\n # start_time = time.time()\n\n # Collect data\n z = set()\n count = 0\n for jfile in jfiles:\n with open(jfile, 'r') as f:\n # Read JSON data into the json_dict variable\n json_dict = json.load(f)\n # str = json_dict['out_file_prefix']\n imw = json_dict['image_width']\n imh = json_dict['image_height']\n tile_height = json_dict['tile_height']\n tile_width = json_dict['tile_width']\n tile_minx = json_dict['tile_minx']\n tile_miny = json_dict['tile_miny']\n fp = json_dict['out_file_prefix']\n\n item = 'x' + str(tile_minx) + '_' + 'y' + str(tile_miny)\n if item not in z: # If the object is not in the list yet...\n inc_x = tile_minx + tile_width\n inc_y = tile_miny + tile_height\n # Create polygon for comparison\n point1 = Point(float(tile_minx) / float(imw), float(tile_miny) / float(imh))\n # print('point1', point1) # normalized\n point2 = Point(float(inc_x) / float(imw), float(tile_miny) / float(imh))\n point3 = Point(float(inc_x) / float(imw), float(inc_y) / float(imh))\n point4 = Point(float(tile_minx) / float(imw), float(inc_y) / float(imh))\n point5 = Point(float(tile_minx) / float(imw), float(tile_miny) / float(imh))\n m = MultiPoint([point1, point2, point3, point4, point5])\n polygon = Polygon(m)\n # Map data file location (prefix) to bbox polygon\n # path_poly[f.name[:-pos]] = polygon\n path_poly[item] = {'poly': polygon, 'image_width': imw, 'image_height': imh, 'tile_width': tile_width,\n 'tile_height': tile_height, 'tile_minx': tile_minx, 'tile_miny': tile_miny,\n 'out_file_prefix': fp}\n else:\n count += 1\n\n z.add(item)\n\n f.close()\n temp.update(path_poly)\n\n print('dupes', count)\n print('len', len(temp))\n\n for tumor_roi in tumor_list:\n for key, val in temp.items():\n gotone = False\n p = val['poly']\n if p.within(tumor_roi):\n gotone = True\n elif p.intersects(tumor_roi):\n gotone = True\n elif tumor_roi.within(p):\n gotone = True\n elif tumor_roi.intersects(p):\n gotone = True\n if gotone:\n # print('val', val)\n rtn_obj.update({key: val})\n\n # elapsed_time = time.time() - start_time\n # print('Runtime get_poly_within: ')\n # print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n # return rtn_jfiles\n return rtn_obj\n\n\ndef aggregate_data(jfile_objs, CSV_FILES):\n \"\"\"\n Get data\n :param jfile_objs:\n :param CSV_FILES\n :return:\n \"\"\"\n start_time = time.time()\n obj_map = {}\n obj_map1 = {}\n rtn_dict = {}\n\n for k, v in jfile_objs.items():\n filelist = []\n for ff in CSV_FILES:\n if k in ff:\n filelist.append(ff)\n\n data_obj = {'filelist': filelist, \"image_width\": v['image_width'], \"image_height\": v['image_height'],\n \"tile_height\": v['tile_height'], \"tile_width\": v['tile_width'], \"tile_minx\": v['tile_minx'],\n \"tile_miny\": v['tile_miny']}\n obj_map.update({k: data_obj})\n\n print('obj_map', len(obj_map))\n print('Aggregating csv data...')\n\n for k, v in obj_map.items():\n frames = []\n for ff in v['filelist']:\n df = pandas.read_csv(ff)\n # print('df.shape[0]: ', df.shape[0])\n if df.empty:\n # print('empty!')\n # print(len(v['filelist']))\n # print(k)\n # print(ff)\n continue\n else:\n # new = old[['A', 'C', 'D']].copy()\n df1 = df[\n ['Perimeter', 'Flatness', 'Circularity', 'r_GradientMean', 'b_GradientMean',\n 'b_cytoIntensityMean', 'r_cytoIntensityMean', 'r_IntensityMean', 'r_cytoGradientMean',\n 'Elongation', 'Polygon']].copy()\n frames.append(df1)\n\n if frames:\n result = pandas.concat(frames)\n data_obj1 = {'df': result, \"image_width\": v['image_width'], \"image_height\": v['image_height'],\n \"tile_height\": v['tile_height'], \"tile_width\": v['tile_width'], \"tile_minx\": v['tile_minx'],\n \"tile_miny\": v['tile_miny']}\n\n obj_map1[ff] = data_obj1\n\n # Add to return variable\n rtn_dict.update(obj_map1)\n\n elapsed_time = time.time() - start_time\n print('Runtime aggregate_data: ')\n print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n return rtn_dict\n\n\ndef get_mongo_doc(slide, patch_data):\n \"\"\"\n Return a default mongo doc\n :param slide:\n :param patch_data:\n :return:\n \"\"\"\n # TODO:!\n\n # Ratio of nuclear material\n percent_nuclear_material = float((patch_data['nucleus_area'] / (PATCH_SIZE * PATCH_SIZE)) * 100)\n # print(\"Ratio of nuclear material: \", percent_nuclear_material)\n\n patch_index = patch_data['patch_num']\n\n mydoc = {\n \"case_id\": CASE_ID,\n \"image_width\": image_width,\n \"image_height\": image_height,\n \"mpp_x\": mpp_x,\n \"mpp_y\": mpp_y,\n \"user\": USER_NAME,\n \"tumorFlag\": \"tumor\",\n \"patch_index\": patch_index,\n \"patch_min_x_pixel\": patch_data['patch_minx'],\n \"patch_min_y_pixel\": patch_data['patch_miny'],\n \"patch_size\": PATCH_SIZE,\n \"patch_polygon_area\": patch_polygon_area,\n \"nucleus_area\": patch_data['nucleus_area'],\n \"percent_nuclear_material\": percent_nuclear_material,\n # \"patch_area_selected_percentage\": 100.0,\n \"grayscale_patch_mean\": 0.0,\n \"grayscale_patch_std\": 0.0,\n \"hematoxylin_patch_mean\": 0.0,\n \"hematoxylin_patch_std\": 0.0,\n \"grayscale_segment_mean\": \"n/a\",\n \"grayscale_segment_std\": \"n/a\",\n \"hematoxylin_segment_mean\": \"n/a\",\n \"hematoxylin_segment_std\": \"n/a\",\n \"flatness_segment_mean\": \"n/a\",\n \"flatness_segment_std\": \"n/a\",\n \"perimeter_segment_mean\": \"n/a\",\n \"perimeter_segment_std\": \"n/a\",\n \"circularity_segment_mean\": \"n/a\",\n \"circularity_segment_std\": \"n/a\",\n \"r_GradientMean_segment_mean\": \"n/a\",\n \"r_GradientMean_segment_std\": \"n/a\",\n \"b_GradientMean_segment_mean\": \"n/a\",\n \"b_GradientMean_segment_std\": \"n/a\",\n \"r_cytoIntensityMean_segment_mean\": \"n/a\",\n \"r_cytoIntensityMean_segment_std\": \"n/a\",\n \"b_cytoIntensityMean_segment_mean\": \"n/a\",\n \"b_cytoIntensityMean_segment_std\": \"n/a\",\n \"elongation_segment_mean\": \"n/a\",\n \"elongation_segment_std\": \"n/a\",\n \"tile_minx\": patch_data['tile_minx'],\n \"tile_miny\": patch_data['tile_miny'],\n \"datetime\": datetime.now()\n }\n\n return mydoc\n\n\ndef update_db(slide, patch_data, db_name):\n \"\"\"\n Write data, per patch.\n :param slide:\n :param patch_data:\n :param db_name:\n :return:\n \"\"\"\n\n df = patch_data['df']\n\n mydoc = get_mongo_doc(slide, patch_data)\n\n # read_region returns an RGBA Image (PIL)\n patch = slide.read_region((patch_data['patch_minx'], patch_data['patch_miny']), 0, (PATCH_SIZE, PATCH_SIZE))\n\n # Histology\n mydoc = patch_operations(patch, mydoc)\n\n mycol = DB[db_name] # name\n # Connect to MongoDB\n # try:\n # client = mongodb_connect('mongodb://' + DB_HOST + ':27017')\n # client.server_info() # force connection, trigger error to be caught\n # db = client.quip_comp\n # mycol = db[db_name + '_features_td'] # name\n # except Exception as e:\n # print('Connection error: ', e)\n # exit(1)\n\n try:\n if not df.empty:\n mydoc['flatness_segment_mean'] = df['Flatness'].mean()\n mydoc['flatness_segment_std'] = df['Flatness'].std()\n mydoc['perimeter_segment_mean'] = df['Perimeter'].mean()\n mydoc['perimeter_segment_std'] = df['Perimeter'].std()\n mydoc['circularity_segment_mean'] = df['Circularity'].mean()\n mydoc['circularity_segment_std'] = df['Circularity'].std()\n mydoc['r_GradientMean_segment_mean'] = df['r_GradientMean'].mean()\n mydoc['r_GradientMean_segment_std'] = df['r_GradientMean'].std()\n mydoc['b_GradientMean_segment_mean'] = df['b_GradientMean'].mean()\n mydoc['b_GradientMean_segment_std'] = df['b_GradientMean'].std()\n mydoc['r_cytoIntensityMean_segment_mean'] = df['r_cytoIntensityMean'].mean()\n mydoc['r_cytoIntensityMean_segment_std'] = df['r_cytoIntensityMean'].std()\n mydoc['b_cytoIntensityMean_segment_mean'] = df['b_cytoIntensityMean'].mean()\n mydoc['b_cytoIntensityMean_segment_std'] = df['b_cytoIntensityMean'].std()\n mydoc['elongation_segment_mean'] = df['Elongation'].mean()\n mydoc['elongation_segment_std'] = df['Elongation'].std()\n\n # Insert record in either case\n mycol.insert_one(mydoc)\n\n except Exception as err:\n print('update_db error: ', err)\n exit(1)\n # print('mydoc', json.dumps(mydoc, indent=4, sort_keys=True))\n\n\ndef calculate(tile_data):\n \"\"\"\n Mean and std of Perimeter, Flatness, Circularity,\n r_GradientMean, b_GradientMean, b_cytoIntensityMean, r_cytoIntensityMean.\n :param tile_data:\n :return:\n \"\"\"\n p = Path(os.path.join(SLIDE_DIR, (CASE_ID + '.svs')))\n print('Reading slide...')\n start_time = time.time()\n slide = openslide.OpenSlide(str(p))\n\n elapsed_time = time.time() - start_time\n print('Time it takes to read slide: ', elapsed_time)\n start_time = time.time() # reset\n\n # Iterate through tile data\n for key, val in tile_data.items():\n # Create patches\n do_tiles(val, slide)\n # exit(0) # TESTING ONE.\n\n slide.close()\n\n elapsed_time = time.time() - start_time\n print('Runtime calculate: ')\n print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n\ndef rgb_to_stain(rgb_img_matrix, sizex, sizey):\n \"\"\"\n RGB to stain color space conversion\n :param rgb_img_matrix:\n :param sizex:\n :param sizey:\n :return:\n \"\"\"\n hed_title_img = separate_stains(rgb_img_matrix, hed_from_rgb)\n hematoxylin_img_array = [[0 for x in range(sizex)] for y in range(sizey)]\n for index1, row in enumerate(hed_title_img):\n for index2, pixel in enumerate(row):\n hematoxylin_img_array[index1][index2] = pixel[0]\n\n return hematoxylin_img_array\n\n\ndef patch_operations(patch, mydoc):\n # Convert to grayscale\n img = patch.convert('L')\n # img to array\n img_array = np.array(img)\n # Intensity for all pixels, divided by num pixels\n mydoc['grayscale_patch_mean'] = np.mean(img_array)\n mydoc['grayscale_patch_std'] = np.std(img_array)\n # Intensity for all pixels inside segmented objects...\n # mydoc.grayscale_segment_mean = \"n/a\"\n # mydoc.grayscale_segment_std = \"n/a\"\n\n # Convert to RGB\n img = patch.convert('RGB')\n img_array = np.array(img)\n hed_title_img = separate_stains(img_array, hed_from_rgb)\n max1 = np.max(hed_title_img)\n min1 = np.min(hed_title_img)\n new_img_array = hed_title_img[:, :, 0]\n new_img_array = ((new_img_array - min1) * 255 / (max1 - min1)).astype(np.uint8)\n mydoc['hematoxylin_patch_mean'] = np.mean(new_img_array)\n mydoc['hematoxylin_patch_std'] = np.std(new_img_array)\n # mydoc.Hematoxylin_segment_mean = \"n/a\"\n # mydoc.Hematoxylin_segment_std = \"n/a\"\n\n return mydoc\n\n\ndef tile_operations(patch, type, name_prefix, w, h):\n \"\"\"\n\n :param patch:\n :param type:\n :param name_prefix:\n :param w:\n :param h:\n :return:\n \"\"\"\n data = {}\n\n img = patch.convert(type)\n\n # img to array\n img_array = np.array(img)\n\n if name_prefix == 'hematoxylin':\n # Convert rgb to stain color space\n img_array = rgb_to_stain(img_array, w, h)\n\n # average of the array elements\n patch_mean = np.mean(img_array)\n data[name_prefix + '_patch_mean'] = patch_mean\n\n # standard deviation of the array elements\n patch_std = np.std(img_array)\n data[name_prefix + '_patch_std'] = patch_std\n\n percentiles = [10, 25, 50, 75, 90]\n for i in range(len(percentiles)):\n name = name_prefix + '_patch_percentile_' + str(percentiles[i])\n data[name] = np.percentile(img_array, percentiles[i])\n # print(name_prefix + \" patch {} percentile: {}\".format(percentiles[i],\n # np.percentile(img_array, percentiles[i])))\n\n return data\n\n\ndef histology(slide, min_x, min_y, w, h):\n \"\"\"\n\n :param slide:\n :param min_x:\n :param min_y:\n :param w:\n :param h:\n :return:\n \"\"\"\n rtn_obj = {}\n try:\n # read_region returns an RGBA Image (PIL)\n tile = slide.read_region((min_x, min_y), 0, (w, h))\n\n # convert image and perform calculations\n a = tile_operations(tile, 'L', 'grayscale', w, h)\n b = tile_operations(tile, 'RGB', 'hematoxylin', w, h)\n c = {}\n\n for (key, value) in a.items():\n c.update({key: value})\n\n for (key, value) in b.items():\n c.update({key: value})\n\n rtn_obj = c\n\n except Exception as e:\n print('Error reading region: ', min_x, min_y)\n print(e)\n exit(1)\n\n return rtn_obj\n\n\ndef detect_bright_spots(gray):\n \"\"\"\n Detect bright spots (no staining) and ignore those areas in area computation\n :param gray:\n :return:\n \"\"\"\n # load the image, convert it to grayscale, and blur it\n # image = cv2.imread('img/detect_bright_spots.png')\n # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (11, 11), 0)\n # Pixel values p >= 200 are set to 255 (white)\n # Pixel values < 200 are set to 0 (black).\n thresh = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]\n\n # Do something.\n\n\ndef do_tiles(data, slide):\n \"\"\"\n Divide tile into patches\n :param data:\n :return:\n \"\"\"\n print('Dividing patch into tiles...')\n start_time = time.time()\n\n patch_num = 0\n df = data['df']\n width = data['tile_width']\n height = data['tile_height']\n cols = width / PATCH_SIZE\n rows = height / PATCH_SIZE\n # data_complete = {}\n\n # Divide tile into patches\n for x in range(1, (int(cols) + 1)):\n for y in range(1, (int(rows) + 1)):\n patch_num += 1\n print('patch_num', patch_num)\n # minx = minx + (x * tile_size)\n # miny = miny + (y * tile_size)\n minx = x * PATCH_SIZE\n miny = y * PATCH_SIZE\n minx = minx + data['tile_minx']\n miny = miny + data['tile_miny']\n maxx = minx + PATCH_SIZE\n maxy = miny + PATCH_SIZE\n\n # Normalize\n nminx = minx / image_width\n nminy = miny / image_width\n nmaxx = maxx / image_width\n nmaxy = maxy / image_width\n\n # Bounding box representing patch\n print((minx, miny), (maxx, miny), (maxx, maxy), (minx, maxy))\n # bbox = BoundingBox([(minx, miny), (maxx, miny), (maxx, maxy), (minx, maxy)])\n bbox = Polygon([(minx, miny), (maxx, miny), (maxx, maxy), (minx, maxy), (minx, miny)])\n bbox1 = Polygon([(nminx, nminy), (nmaxx, nminy), (nmaxx, nmaxy), (nminx, nmaxy), (nminx, nminy)])\n\n df2 = pandas.DataFrame()\n nucleus_area = 0.0\n # Figure out which polygons (data rows) belong to which patch\n for index, row in df.iterrows():\n xy = row['Polygon']\n polygon_shape = string_to_polygon(xy, data['image_width'], data['image_height'], False)\n polygon_shape = polygon_shape.buffer(0.0) # Using a zero-width buffer cleans up many topology problems\n # polygon_shape1 = string_to_polygon(xy, data['image_width'], data['image_height'], True)\n # polygon_shape1 = polygon_shape1.buffer(0.0)\n\n # print('polygon_shape', polygon_shape)\n\n # Accumulate information\n if polygon_shape.within(bbox) or polygon_shape.intersects(bbox):\n df2 = df2.append(row)\n if polygon_shape.intersects(bbox):\n try:\n nucleus_area += polygon_shape.intersection(bbox).area\n # nucleus_area += polygon_shape1.intersection(bbox1).area\n # print(nucleus_area * factor)\n except Exception as err:\n # except errors.TopologicalError as toperr:\n print('Invalid geometry', err)\n else:\n nucleus_area += polygon_shape.area\n # nucleus_area += polygon_shape1.area\n # print(nucleus_area * factor)\n\n nucleus_area = nucleus_area / PATCH_SIZE\n print('nucleus_area', nucleus_area)\n\n update_db(slide, {'df': df2, 'nucleus_area': nucleus_area, 'patch_num': patch_num,\n 'patch_minx': minx, 'patch_miny': miny, 'tile_minx': data['tile_minx'],\n 'tile_miny': data['tile_miny'], 'image_width': data['image_width'],\n 'image_height': data['image_height']}, coll_name)\n\n elapsed_time = time.time() - start_time\n print('Runtime do_tiles: ')\n print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n # exit(0) # testing one tile\n\n\ndef get_image_metadata():\n p = Path(os.path.join(SLIDE_DIR, (CASE_ID + '.svs')))\n slide = openslide.OpenSlide(str(p))\n mpp_x = slide.properties[openslide.PROPERTY_NAME_MPP_X]\n mpp_y = slide.properties[openslide.PROPERTY_NAME_MPP_Y]\n mpp_x = round(float(mpp_x), 4)\n mpp_y = round(float(mpp_y), 4)\n image_width, image_height = slide.dimensions\n # image_width = slide.dimensions[0]\n # image_height = slide.dimensions[1]\n slide.close()\n\n return mpp_x, mpp_y, image_width, image_height\n\n\n# constant variables\nWORK_DIR = \"/data1/tdiprima/dataset\"\nDATA_FILE_FOLDER = \"nfs004:/data/shared/bwang/composite_dataset\"\nSVS_IMAGE_FOLDER = \"nfs001:/data/shared/tcga_analysis/seer_data/images\"\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--slide_name\", help=\"svs image name\")\nap.add_argument(\"-u\", \"--user_name\", help=\"user who identified tumor regions\")\nap.add_argument(\"-b\", \"--db_host\", help=\"database host\")\nap.add_argument(\"-p\", \"--patch_size\", type=int, help=\"patch size\")\nargs = vars(ap.parse_args())\nprint(args)\n\nif not len(sys.argv) > 1:\n program_name = sys.argv[0]\n lst = ['python', program_name, '-h']\n subprocess.call(lst) # Show help\n exit(1)\n\nCASE_ID = args[\"slide_name\"]\nUSER_NAME = args[\"user_name\"]\nPATCH_SIZE = args[\"patch_size\"]\nDB_HOST = args[\"db_host\"]\n\nSLIDE_DIR = os.path.join(WORK_DIR, CASE_ID) + os.sep\nDATA_FILE_SUBFOLDERS = get_file_list(CASE_ID, 'config/data_file_path.list')\n# print('DATA_FILE_SUBFOLDERS', DATA_FILE_SUBFOLDERS)\n\n# Fetch data.\nassure_path_exists(SLIDE_DIR)\ncopy_src_data(SLIDE_DIR)\n\nmpp_x, mpp_y, image_width, image_height = get_image_metadata()\npatch_polygon_area = PATCH_SIZE * PATCH_SIZE * mpp_x * mpp_y\nprint('patch_polygon_area', patch_polygon_area)\n\n# Find what the pathologist circled as tumor.\ntumor_mark_list = get_tumor_markup(USER_NAME)\n# print('tumor_mark_list', len(tumor_mark_list))\n\n# List of Tumor polygons\ntumor_poly_list = markup_to_polygons(tumor_mark_list)\n# print('tumor_poly_list', len(tumor_poly_list))\n\n# Fetch list of data files\nJSON_FILES, CSV_FILES = get_data_files()\n\n# Identify only the files within the tumor regions\njfile_objs = get_poly_within(JSON_FILES, tumor_poly_list)\nprint('get_poly_within len: ', len(jfile_objs))\n\n# Get data\ncsv_data = aggregate_data(jfile_objs, CSV_FILES)\nprint('csv_data len: ', len(csv_data))\n\n# Connect to MongoDB\ncoll_name = 'test2_features_td'\nclient = {}\ntry:\n client = mongodb_connect('mongodb://' + DB_HOST + ':27017')\n client.server_info() # force connection, trigger error to be caught\n DB = client.quip_comp\nexcept Exception as e:\n print('Connection error: ', e)\n exit(1)\n\n# Calculate\ncalculate(csv_data)\n\nclient.close()\n\nexit(0)\n" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.min", "numpy.percentile", "pandas.DataFrame", "numpy.max", "numpy.std", "numpy.mean", "numpy.array" ] ]
magland/nwb-jupyter-widgets
[ "3fdabd866d1aa38dfda1eedeae1f87965f2b8bd8" ]
[ "nwbwidgets/ophys.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom pynwb.ophys import RoiResponseSeries, DfOverF, PlaneSegmentation, TwoPhotonSeries\nfrom pynwb.base import NWBDataInterface\nfrom ndx_grayscalevolume import GrayscaleVolume\nfrom collections import OrderedDict\nfrom .utils.cmaps import linear_transfer_function\nimport ipywidgets as widgets\nfrom itertools import cycle\nfrom matplotlib import colors\n\n\ncolor_wheel = ['red', 'green', 'black', 'blue', 'magenta', 'yellow']\n\n\ndef show_two_photon_series(indexed_timeseries: TwoPhotonSeries, neurodata_vis_spec: OrderedDict):\n output = widgets.Output()\n\n if len(indexed_timeseries.data.shape) == 3:\n def show_image(index=0):\n fig, ax = plt.subplots(subplot_kw={'xticks': [], 'yticks': []})\n ax.imshow(indexed_timeseries.data[index], cmap='gray')\n output.clear_output(wait=True)\n with output:\n plt.show(fig)\n elif len(indexed_timeseries.data.shape) == 4:\n import ipyvolume.pylab as p3\n\n def show_image(index=0):\n fig = p3.figure()\n p3.volshow(indexed_timeseries.data[index], tf=linear_transfer_function([0, 0, 0], max_opacity=.3))\n output.clear_output(wait=True)\n with output:\n p3.show()\n else:\n raise NotImplementedError\n\n def on_index_change(change):\n show_image(change.new)\n\n slider = widgets.IntSlider(value=0, min=0,\n max=indexed_timeseries.data.shape[0] - 1,\n orientation='horizontal')\n slider.observe(on_index_change, names='value')\n show_image()\n\n return widgets.VBox([output, slider])\n\n\ndef show_df_over_f(df_over_f: DfOverF, neurodata_vis_spec: OrderedDict):\n if len(df_over_f.roi_response_series) == 1:\n title, input = list(df_over_f.roi_response_series.items())[0]\n return neurodata_vis_spec[RoiResponseSeries](input, neurodata_vis_spec, title=title)\n else:\n return neurodata_vis_spec[NWBDataInterface](df_over_f, neurodata_vis_spec)\n\n\ndef show_roi_response_series(roi_response_series: RoiResponseSeries, neurodata_vis_spec: OrderedDict,\n nchans: int = 30, title: str = None):\n \"\"\"\n\n :param roi_response_series: pynwb.ophys.RoiResponseSeries\n :param neurodata_vis_spec: OrderedDict\n :param nchans: int\n :param title: str\n :return: matplotlib.pyplot.Figure\n \"\"\"\n tt = roi_response_series.timestamps\n data = roi_response_series.data\n if data.shape[1] == len(tt): # fix of orientation is incorrect\n mini_data = data[:nchans, :].T\n else:\n mini_data = data[:, :nchans]\n\n gap = np.median(np.nanstd(mini_data, axis=0)) * 20\n offsets = np.arange(nchans) * gap\n\n fig, ax = plt.subplots()\n ax.plot(tt, mini_data + offsets)\n ax.set_xlabel('time (s)')\n ax.set_ylabel('traces (first 30)')\n if np.isfinite(gap):\n ax.set_ylim(-gap, offsets[-1] + gap)\n ax.set_xlim(tt[0], tt[-1])\n ax.set_yticks(offsets)\n ax.set_yticklabels(np.arange(mini_data.shape[1]))\n\n if title is not None:\n ax.set_title(title)\n\n return fig\n\n\ndef show_plane_segmentation(plane_seg: PlaneSegmentation, neurodata_vis_spec: OrderedDict):\n nrois = len(plane_seg)\n\n if 'voxel_mask' in plane_seg:\n import ipyvolume.pylab as p3\n\n dims = np.array([max(max(plane_seg['voxel_mask'][i][dim]) for i in range(nrois))\n for dim in ['x', 'y', 'z']]).astype('int') + 1\n fig = p3.figure()\n for icolor, color in enumerate(color_wheel):\n vol = np.zeros(dims)\n sel = np.arange(icolor, nrois, len(color_wheel))\n for isel in sel:\n dat = plane_seg['voxel_mask'][isel]\n vol[tuple(dat['x'].astype('int')),\n tuple(dat['y'].astype('int')),\n tuple(dat['z'].astype('int'))] = 1\n p3.volshow(vol, tf=linear_transfer_function(color, max_opacity=.3))\n return fig\n elif 'image_mask' in plane_seg:\n data = plane_seg['image_mask'].data\n img = np.ones(shape=list(data.shape[1:]) + [3])\n for c, img_mask in zip(cycle(color_wheel), data):\n img[img_mask.astype(bool), :] = colors.to_rgb(c)\n\n fig, ax = plt.subplots(figsize=(6, 6))\n ax.imshow(img)\n\n return fig\n\n\ndef show_grayscale_volume(vol: GrayscaleVolume, neurodata_vis_spec: OrderedDict):\n import ipyvolume.pylab as p3\n\n fig = p3.figure()\n p3.volshow(vol.data, tf=linear_transfer_function([0, 0, 0], max_opacity=.1))\n return fig\n" ]
[ [ "matplotlib.colors.to_rgb", "numpy.isfinite", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.nanstd", "matplotlib.pyplot.show", "numpy.zeros" ] ]
MikkelMathiasen23/Variational_proteins
[ "e71bb7c900d9efe63521a622109349202cc7bfb7" ]
[ "train_models.py" ]
[ "import torch \r\nimport numpy as np\r\nfrom misc import data, c\r\nfrom models import *\r\n\r\nfrom torch import optim\r\nfrom scipy.stats import spearmanr\r\nfrom torch.distributions.normal import Normal\r\n\r\n\r\nclass training(torch.nn.Module):\r\n def __init__(self, **kwargs):\r\n super(training, self).__init__()\r\n self.batch_size = kwargs['batch_size']\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n self.dataloader, self.df, self.mutants_tensor, self.mutants_df, self.weights, self.neff = data(batch_size = self.batch_size,neff_w = kwargs['neff_w'], device = device)\r\n\r\n self.wildtype = self.dataloader.dataset[0] # one-hot-encoded wildtype \r\n self.eval_batch = torch.cat([self.wildtype.unsqueeze(0), self.mutants_tensor])\r\n print('initialize train object')\r\n self.alphabet_size = self.dataloader.dataset[0].shape[0]\r\n self.seq_len = self.dataloader.dataset[0].shape[1]\r\n \r\n def train_HVAE(self,**kwargs):\r\n kwargs['alphabet_size'] = self.alphabet_size\r\n kwargs['seq_len'] = self.seq_len\r\n device = kwargs['device']\r\n \r\n step = kwargs['step']\r\n alpha_warm_up = torch.arange(0,1/4+step,step)\r\n vae = HVAE(**kwargs).to(device)\r\n opt = optim.Adam(vae.parameters(), lr = 0.0001)\r\n\r\n # rl = Reconstruction loss\r\n # kl = Kullback-Leibler divergence loss\r\n # cor = Spearman correlation to experimentally measured \r\n # protein fitness according to eq.1 from paper\r\n stats = { 'rl': [], 'kl': [], 'cor': [] }\r\n \r\n #################################### Initialize dicts for weights ############################################# \r\n stats_encoder = {}\r\n stats_W = {}\r\n stats_reconstruct = {}\r\n for l in vae.e.keys():\r\n stats_encoder[l] = []\r\n for l in vae.W.keys():\r\n stats_W[l] = []\r\n cc = 0\r\n for i,l in enumerate(vae.reconstruct):\r\n if type(l) == torch.nn.Linear:\r\n name = 'reconstruct' + str(cc)\r\n stats_reconstruct[name] = []\r\n cc+=1\r\n #################################### Initialize dicts for mean and log ######################################### \r\n stats_p_mu = {}\r\n stats_p_logvar = {}\r\n stats_e_mu = {}\r\n stats_e_logvar = {}\r\n stats_d_mu = {}\r\n stats_d_logvar = {}\r\n for i in range(len(kwargs['layers'])):\r\n stats_d_mu['d'+str(i)] = []\r\n stats_d_logvar['d'+str(i)] = []\r\n stats_e_mu['e'+str(i)] = []\r\n stats_e_logvar['e'+str(i)] = []\r\n stats_p_mu['p'+str(i)] = []\r\n stats_p_logvar['p'+str(i)] = []\r\n \r\n\r\n for epoch in range(kwargs['epochs']):\r\n # Unsupervised training on the MSA sequences.\r\n vae.train()\r\n if epoch > len(alpha_warm_up)-1:\r\n k = len(alpha_warm_up)-1\r\n else:\r\n k = epoch\r\n \r\n epoch_losses = { 'rl': [], 'kl': [] }\r\n for batch in self.dataloader:\r\n opt.zero_grad()\r\n x_hat,_, _,p_mu, p_logvar, d_mu, d_logvar = vae(batch)\r\n loss, rl, kl = vae.loss(x_hat, batch,p_mu,p_logvar,d_mu,d_logvar,alpha_warm_up =alpha_warm_up[k])\r\n loss.mean().backward()\r\n opt.step()\r\n epoch_losses['rl'].append(rl.mean().item())\r\n epoch_losses['kl'].append(kl.mean().item())\r\n\r\n # Evaluation on mutants\r\n vae.eval()\r\n x_hat_eval, e_mu, e_logvar,p_mu, p_logvar, d_mu, d_logvar = vae(self.eval_batch)\r\n elbos, _, _ = vae.loss(x_hat_eval, self.eval_batch,p_mu,p_logvar,d_mu,d_logvar, alpha_warm_up =alpha_warm_up[k])#\r\n diffs = elbos[1:] - elbos[0] # log-ratio (first equation in the paper)\r\n cor, _ = spearmanr(self.mutants_df.value, diffs.cpu().detach())\r\n \r\n # Populate statistics \r\n stats['rl'].append(np.mean(epoch_losses['rl']))\r\n stats['kl'].append(np.mean(epoch_losses['kl']))\r\n stats['cor'].append(np.abs(cor))\r\n to_print = [\r\n f\"{c.HEADER}EPOCH %03d\" % epoch,\r\n f\"{c.OKBLUE}RL=%4.4f\" % stats['rl'][-1], \r\n f\"{c.OKGREEN}KL=%4.4f\" % stats['kl'][-1], \r\n f\"{c.OKCYAN}|rho|=%4.4f{c.ENDC}\" % stats['cor'][-1]\r\n ]\r\n print(\" \".join(to_print))\r\n for l in vae.e.keys():\r\n stats_encoder[l].append(torch.linalg.norm(vae.e[l].weight.data.cpu(), ord = 2).numpy())\r\n for l in vae.W.keys():\r\n stats_W[l].append(torch.linalg.norm(vae.W[l].weight.data.cpu(), ord = 2).numpy())\r\n cc=0\r\n for i,l in enumerate(vae.reconstruct):\r\n if type(l) == torch.nn.Linear:\r\n name = 'reconstruct' + str(cc)\r\n stats_reconstruct[name].append(torch.linalg.norm(l.weight.data.cpu(), ord = 2).numpy())\r\n cc+=1\r\n \r\n for i,(mu, logvar) in enumerate(zip(e_mu, e_logvar)):\r\n stats_e_mu['e'+str(i)].append(mu.mean().data.cpu().numpy())\r\n stats_e_logvar['e'+str(i)].append(logvar.mean().data.cpu().numpy())\r\n\r\n for i,(mu, logvar) in enumerate(zip(p_mu, p_logvar)):\r\n stats_p_mu['p'+str(i)].append(mu.mean().data.cpu().numpy())\r\n stats_p_logvar['p'+str(i)].append(logvar.mean().data.cpu().numpy())\r\n\r\n for i,(mu, logvar) in enumerate(zip(d_mu, d_logvar)):\r\n stats_d_mu['d'+str(i)].append(mu.mean().data.cpu().numpy())\r\n stats_d_logvar['d'+str(i)].append(logvar.mean().data.cpu().numpy())\r\n \r\n torch.save({\r\n 'state_dict': vae.state_dict(), \r\n 'stats_encoder': stats_encoder,\r\n 'stats_W': stats_W,\r\n 'stats_reconstruct': stats_reconstruct,\r\n 'stats_e_mu': stats_e_mu,\r\n 'stats_e_logvar': stats_e_logvar,\r\n 'stats_p_mu': stats_p_mu,\r\n 'stats_p_logvar': stats_p_logvar,\r\n 'stats_d_mu': stats_d_mu,\r\n 'stats_d_logvar': stats_d_logvar,\r\n 'stats': stats,\r\n 'args': kwargs,\r\n }, \"trained.model_HVAE_l2_exp.pth\")\r\n\r\n\r\n def train_vanilla(self,**kwargs):\r\n device = kwargs['device']\r\n kwargs['alphabet_size'] = self.alphabet_size\r\n kwargs['seq_len'] = self.seq_len\r\n vae = VAE(**kwargs).to(device)\r\n opt = optim.Adam(vae.parameters())\r\n\r\n # rl = Reconstruction loss\r\n # kl = Kullback-Leibler divergence loss\r\n # cor = Spearman correlation to experimentally measured \r\n # protein fitness according to eq.1 from paper\r\n stats = { 'rl': [], 'kl': [], 'cor': [] }\r\n\r\n for epoch in range(kwargs['epoch']):\r\n # Unsupervised training on the MSA sequences.\r\n vae.train()\r\n \r\n epoch_losses = { 'rl': [], 'kl': [] }\r\n for batch in self.dataloader:\r\n opt.zero_grad()\r\n x_hat, mu, logvar = vae(batch)\r\n loss, rl, kl = vae.loss(x_hat, batch, mu, logvar)\r\n loss.mean().backward()\r\n opt.step()\r\n epoch_losses['rl'].append(rl.mean().item())\r\n epoch_losses['kl'].append(kl.mean().item())\r\n\r\n # Evaluation on mutants\r\n vae.eval()\r\n x_hat_eval, mu, logvar = vae(self.eval_batch, rep=False)\r\n elbos, _, _ = vae.loss(x_hat_eval, self.eval_batch, mu, logvar)\r\n diffs = elbos[1:] - elbos[0] # log-ratio (first equation in the paper)\r\n cor, _ = spearmanr(self.mutants_df.value, diffs.cpu().detach())\r\n \r\n # Populate statistics \r\n stats['rl'].append(np.mean(epoch_losses['rl']))\r\n stats['kl'].append(np.mean(epoch_losses['kl']))\r\n stats['cor'].append(np.abs(cor))\r\n\r\n to_print = [\r\n f\"{c.HEADER}EPOCH %03d\" % epoch,\r\n f\"{c.OKBLUE}RL=%4.4f\" % stats['rl'][-1], \r\n f\"{c.OKGREEN}KL=%4.4f\" % stats['kl'][-1], \r\n f\"{c.OKCYAN}|rho|=%4.4f{c.ENDC}\" % stats['cor'][-1]\r\n ]\r\n print(\" \".join(to_print))\r\n\r\n torch.save({\r\n 'state_dict': vae.state_dict(), \r\n 'stats': stats,\r\n 'args': kwargs,\r\n }, \"trained.model_vanilla.pth\")\r\n\r\n def train_bayesian(self,**kwargs):\r\n \r\n device = kwargs['device']\r\n epochs = kwargs['epoch']\r\n bayesian = kwargs['bayesian']\r\n n_ensambles = kwargs['n_ensambles']\r\n kwargs['neff'] = self.neff.item()\r\n kwargs['alphabet_size'] = self.alphabet_size\r\n kwargs['seq_len'] = self.seq_len\r\n\r\n\r\n vae = VAE_bayesian(**kwargs).to(device) #Initialize VAE model with the parameters stated above \r\n opt = optim.Adam(vae.parameters()) #Initialize Adam optimizer\r\n\r\n # rl = Reconstruction loss\r\n # kl = Kullback-Leibler divergence loss\r\n # cor = Spearman correlation to experimentally measured \r\n # protein fitness according to eq.1 from paper\r\n if bayesian:\r\n stats = { 'rl': [], 'kl': [], 'cor': [],'KLB': []}\r\n else:\r\n stats = { 'rl': [], 'kl': [], 'cor': []}\r\n\r\n for epoch in range(epochs):\r\n # Unsupervised training on the MSA sequences.\r\n vae.train()\r\n if bayesian:\r\n epoch_losses = { 'rl': [], 'kl': [], 'cor': [],'KLB': []}\r\n else:\r\n epoch_losses = { 'rl': [], 'kl': [], 'cor': []}\r\n\r\n for batch in self.dataloader:\r\n opt.zero_grad()\r\n x_hat, mu, logvar = vae(batch) #Compute forward pass\r\n if bayesian:\r\n loss, rl, kl, KLB = vae.loss(x_hat, batch, mu, logvar) #Compute loss statistics\r\n loss.mean().backward() \r\n opt.step()\r\n #Save statistics\r\n epoch_losses['rl'].append(rl.mean().item())\r\n epoch_losses['kl'].append(kl.mean().item())\r\n epoch_losses['KLB'].append(KLB.mean().item())\r\n\r\n else: #If not in bayesian setting no KLB is present\r\n loss, rl, kl = vae.loss(x_hat, batch, mu, logvar)\r\n loss.mean().backward()\r\n opt.step()\r\n epoch_losses['rl'].append(rl.mean().item())\r\n epoch_losses['kl'].append(kl.mean().item())\r\n \r\n # Evaluation on mutants\r\n vae.eval()\r\n with torch.no_grad(): #Ensure no gradients when computing the ensambles\r\n cor_lst = []\r\n\r\n # Ensemble over 256 iterations of the validation set\r\n if bayesian:\r\n if epoch % 8 == 0: #To speed computations up only do the ensambles every 8 epoch\r\n mt_elbos, wt_elbos, ensambles = 0, 0, n_ensambles\r\n for i in range(ensambles):\r\n if i and (i % 2 == 0):\r\n print(f\"\\tReached {i}\", \" \"*32, end=\"\\r\")\r\n\r\n elbos = vae.logp_calc(self.eval_batch).detach().cpu() #Compute eblos needed for correlation computation\r\n \r\n #Split up computation as spearman correlation is not linear\r\n wt_elbos += elbos[0]\r\n mt_elbos += elbos[1:]\r\n \r\n print()\r\n\r\n diffs = (mt_elbos / ensambles) - (wt_elbos / ensambles)\r\n cor, _ = spearmanr(self.mutants_df.value, diffs) #Compute the correlation\r\n\r\n else: #If not doing bayesian no need for ensambles\r\n elbos = vae.logp_calc(self.eval_batch)\r\n diffs = elbos[1:] - elbos[0] # log-ratio (first equation in the paper)\r\n cor, _ = spearmanr(self.mutants_df.value, diffs.cpu().detach())\r\n\r\n # Populate statistics \r\n stats['rl'].append(np.mean(epoch_losses['rl']))\r\n stats['kl'].append(np.mean(epoch_losses['kl']))\r\n stats['cor'].append(np.abs(cor))\r\n\r\n if bayesian:\r\n stats['KLB'].append(np.mean(epoch_losses['KLB']))\r\n\r\n if bayesian:\r\n to_print = [\r\n f\"{c.HEADER}EPOCH %03d\" % epoch,\r\n f\"{c.OKBLUE}RL=%4.4f\" % stats['rl'][-1], \r\n f\"{c.OKGREEN}KL=%4.4f\" % stats['kl'][-1],\r\n f\"{c.OKGREEN}KLB=%4.4f\" % stats['KLB'][-1], \r\n f\"{c.OKCYAN}|rho|=%4.4f{c.ENDC}\" % stats['cor'][-1]\r\n ]\r\n \r\n else:\r\n to_print = [\r\n f\"{c.HEADER}EPOCH %03d\" % epoch,\r\n f\"{c.OKBLUE}RL=%4.4f\" % stats['rl'][-1], \r\n f\"{c.OKGREEN}KL=%4.4f\" % stats['kl'][-1], \r\n f\"{c.OKCYAN}|rho|=%4.4f{c.ENDC}\" % stats['cor'][-1]\r\n ]\r\n print(\" \".join(to_print))\r\n\r\n if bayesian:\r\n torch.save({\r\n 'state_dict': vae.state_dict(), \r\n 'stats': stats,\r\n 'args': kwargs,\r\n }, \"trained.model.bayesian.pth\")\r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n train_type = 'Bayesian'\r\n kwargs_vae = {\r\n 'epoch': 100,\r\n 'device':torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"),\r\n 'train_type': train_type\r\n }\r\n kwargs_bayesian = {\r\n 'epoch': 500,\r\n 'n_ensambles': 256,\r\n 'device': torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"),\r\n 'bayesian': True,\r\n 'beta': 1,\r\n 'hidden_size': 2000,\r\n 'latent_size': 30,\r\n 'shared_size': 40,\r\n 'repeat': 1,\r\n 'group_sparsity': True,\r\n 'dropout': 0,\r\n 'train_type': train_type\r\n\r\n }\r\n kwargs_hvae = {\r\n 'device':torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"),\r\n 'step': 0.001,\r\n 'hidden_size': 1000,\r\n 'layers': [512,512,1024],\r\n 'latents': [8,8,16],\r\n 'epochs': 500,\r\n 'train_type': train_type\r\n\r\n }\r\n\r\n\r\n\r\n kwargs_init = {'batch_size': 256, 'neff_w': True}\r\n train = training(**kwargs_init)\r\n\r\n if train_type == 'Vanilla':\r\n train.train_vanilla(**kwargs_vae)\r\n elif train_type == 'Bayesian':\r\n train.train_bayesian(**kwargs_bayesian)\r\n elif train_type == 'HVAE':\r\n train.train_HVAE(**kwargs_hvae)\r\n" ]
[ [ "numpy.abs", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "torch.arange", "scipy.stats.spearmanr" ] ]
JiechengZhao/shap
[ "d2042bab95bdd9c14f4bf7f1a8340bb6a2cf8733" ]
[ "tests/test_gradient.py" ]
[ "import matplotlib\nimport numpy as np\nmatplotlib.use('Agg')\nimport shap\n\ndef test_tf_keras_mnist_cnn():\n \"\"\" This is the basic mnist cnn example from keras.\n \"\"\"\n\n try:\n import tensorflow as tf\n from tensorflow.python import keras\n from tensorflow.python.keras.models import Sequential\n from tensorflow.python.keras.layers import Dense, Dropout, Flatten, Activation\n from tensorflow.python.keras.layers import Conv2D, MaxPooling2D\n from tensorflow.python.keras import backend as K\n except Exception as e:\n print(\"Skipping test_tf_keras_mnist_cnn!\")\n return\n import shap\n\n batch_size = 128\n num_classes = 10\n epochs = 1\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(32, activation='relu')) # 128\n model.add(Dropout(0.5))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n model.fit(x_train[:1000,:], y_train[:1000,:],\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test[:1000,:], y_test[:1000,:]))\n\n # explain by passing the tensorflow inputs and outputs\n np.random.seed(0)\n inds = np.random.choice(x_train.shape[0], 20, replace=False)\n e = shap.GradientExplainer((model.layers[0].input, model.layers[-1].input), x_train[inds,:,:])\n shap_values = e.shap_values(x_test[:1], nsamples=1000)\n\n sess = tf.keras.backend.get_session()\n diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \\\n sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds,:,:]}).mean(0)\n\n sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])\n d = np.abs(sums - diff).sum()\n assert d / np.abs(diff).sum() < 0.05, \"Sum of SHAP values does not match difference! %f\" % (d / np.abs(diff).sum())\n" ]
[ [ "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.layers.MaxPooling2D", "numpy.random.seed", "tensorflow.python.keras.utils.to_categorical", "numpy.random.choice", "matplotlib.use", "tensorflow.keras.backend.get_session", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.Dense", "numpy.abs", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.layers.Dropout", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.optimizers.Adadelta", "tensorflow.python.keras.datasets.mnist.load_data" ] ]
laravomfell/tvd_loss
[ "b30a925f95985a03ff70bfa40a6ec3662432779d" ]
[ "figure_1.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 14 09:34:38 2020\n\n@author: Lara Vomfell\n\nThis code generates Figure 1 in our code by generating some Poisson outcomes,\ncontaminating 10% of the data with outliers and then running a KLD-minimizing\nand a TVD-minimizing model and plotting the resulting pmfs.\n\"\"\"\n\nimport numpy as np\n\nfrom npl.likelihood_functions import SimplePoissonLikelihood\n\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\nfrom npl.NPL import NPL\n\n# Figure 1\n\n# set seed\nnp.random.seed(16)\n\n# generate n Poisson outcomes with lambda = 3\nn = 500\nY = np.random.poisson(3, n)\n# contaminate 10% of the data by adding k = 15\nY[0:50] += 15\n\n# tell NPL to use 'SimplePoissonLikelihood', a Poisson lik without covariates\nnpl_fig1 = NPL(SimplePoissonLikelihood(), optimizer = \"BFGS\")\n# generate intercept for NPL\nX = np.ones([n, 1])\n# (quietly) run models\nnpl_fig1.draw_samples(Y, X, B = 500, display_opt = False)\n\n# get MLE and TVD\nmle = npl_fig1.mle.mean()\ntvd = npl_fig1.sample[npl_fig1.sample >= 0].mean()\n\n\n# set up figure\nplt.figure(figsize = (5,3))\nx = range(Y.max() + 1)\n# plot pmf of data\nplt.hist(Y, x, density = 1, color = '#bababa', ec='#9f9f9f',align = 'left')\n\n# plot implied pmfs of both models as lines + dots\nplt.plot(x, stats.poisson.pmf(x, tvd), color = '#009E73', label = 'TVD')\nplt.plot(x, stats.poisson.pmf(x, mle), color = '#56B4E9', label = 'KLD')\n\nplt.plot(x, stats.poisson.pmf(x, mle), color = '#56B4E9', marker = 'o')\nplt.plot(x, stats.poisson.pmf(x, tvd), color = '#009E73', marker = 'o')\n\nplt.xticks(np.array(range(0, 21, 5)))\nplt.ylabel('Probability mass')\nplt.legend(frameon = False)\nplt.tight_layout()\nplt.savefig(\"fig1.png\")" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.random.poisson", "matplotlib.pyplot.ylabel", "scipy.stats.poisson.pmf", "matplotlib.pyplot.hist", "matplotlib.pyplot.figure" ] ]
xdze2/graph_XRD
[ "3e6d9fd5f9ce35d117097a4edf45476a59356af6" ]
[ "sin2_psi/sin2_psi.py" ]
[ "# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport numpy as np\nimport matplotlib.pylab as plt\nplt.rcParams.update({'font.size': 12})\nimport matplotlib.colors as mcolors\ncolors = list( mcolors.TABLEAU_COLORS )\n\nfrom glob import glob\nimport os\nimport peakfit as pf\n\n\ndef read_sin2psi(csv_file):\n \"\"\"\n \n using the one-file export option\n leads to a data array with all different psi and phi measures\n reshape date to have a 3D array of shape: (2theta, psi, phi)\n \"\"\"\n # Try to get the number of header line:\n with open(csv_file, 'r') as f:\n for k in range(40):\n line = f.readline()\n if line.startswith('[Scan points]'):\n header = f.readline()\n break\n \n # import data\n data = np.genfromtxt(csv_file, delimiter=',', skip_header=k+2)\n\n twoth = np.unique( data[:, 0] )\n psi = np.unique( data[:, 1] )\n phi = np.unique( data[:, 2] )\n\n intensities_flat = data[:, -1]\n\n intensities = intensities_flat.reshape(-1,\n len(psi),\n len(phi),\n order='F')\n # ‘F’ Fortran-like index order, with the first index changing fastest\n \n return twoth, psi, phi, intensities\n\n\n# +\ndef distance_from_Bragg(deux_theta, lmbda = 1.5405929):\n # Ang., x-ray wavelength K_alpha1 Cu\n deux_theta = np.asarray(deux_theta)\n return lmbda/2/np.sin(deux_theta/2 *np.pi/180)\n\n\ndef fit_all(twoth_span, psi_span, phi_span, intensities,\n graph=False, output_keys=['x0', 'x0_std', 'fwhm']):\n \n fit_results = {key: np.NaN*np.ones(intensities.shape[1:]) for key in output_keys}\n for phi_idx in range(len(phi_span)):\n if graph: plt.figure()\n \n for k, psi_k in enumerate( psi_span ):\n y = intensities[:, k, phi_idx]\n \n # Graph\n if graph:\n color = colors[k % len(colors)]\n plt.plot(twoth_span, y, '.', label=psi_k, \n alpha=0.5, color=color)\n \n try:\n \n results, fit = pf.peakfit(twoth_span, y,\n pf.PseudoVoigt())\n\n for key, array in fit_results.items():\n array[k, phi_idx] = results[0][key]\n \n if graph:\n plt.plot(twoth_span, fit(twoth_span), color=color)\n\n except RuntimeError:\n print(f'fit error for {phi_idx}, {psi_k}')\n\n if graph:\n plt.text(twoth_span.min(),\n 0.9*intensities[:, :, phi_idx].max(),\n f'phi={phi_span[phi_idx]} deg')\n #plt.title(measure_list[0]);\n plt.xlabel('two theta (deg)');\n\n return fit_results\n\n\n# -\n\n# List data files:\ndata_dir = 'data'\nmeasure_list = glob(os.path.join(data_dir, '*.csv'))\nprint(', '.join(measure_list))\n\n# +\n#for k, psi in enumerate( psi_span ):\n# plt.plot(two_th_span, intensities[:, k, 0], label=psi )\n#plt.legend()\n#plt.title(measure_list[0]);\n#plt.xlabel('two theta (deg)');\n# -\n\n# d0 = {Cu:3.615 Å, Nb:3.3063 Å}\n\n# $$\n# \\varepsilon_{\\psi} = \\varepsilon_{⟂} + (\\varepsilon_{⫽}-\\varepsilon_{⟂})\\sin^2 \\psi\n# $$\n\nd0_mat = {'Cu':3.615, 'Nb':3.3063}\n\ni = 0 # used to selec the filename\n\n# +\n# ========== \n# sin2 psi\n# ==========\n\nfilename = measure_list[i]\nprint('file:', filename)\n\nimage_name = os.path.basename(filename)\nmeasure_name = image_name.replace('csv', '').strip('.')\n\na0 = [a for n, a in d0_mat.items() if n in filename][0]\nprint('a0 (A):', a0)\nhkl = np.array( [float(u) for u in measure_name[-3:]] )\nd0 = a0/np.sqrt(np.sum(hkl**2))\n\nprint('hkl:', hkl)\nprint('d0 (A):', d0)\ntwoth_span, psi_span, phi_span, I = file_path = read_sin2psi(filename)\n\nfit_results = fit_all(twoth_span, psi_span, phi_span, I, graph=True)\n\n\n\nplt.figure(figsize=(8, 4));\nk = 0\nd_hlk = distance_from_Bragg(fit_results['x0'])\nsin2psi = np.sin(psi_span *np.pi/180)**2\nfor phi, d_hlk_phi in zip(phi_span, d_hlk.T):\n \n # linear fit:\n \n eps_phi = (d_hlk_phi - d0)/d0\n mask = np.logical_not(np.isnan(eps_phi))\n slope, intercept = np.polyfit(sin2psi[mask], 100*eps_phi[mask], 1)\n \n eps_normal = intercept\n eps_plan = slope + intercept\n\n print(f'phi={phi:3.0f}°', '-->',f'eps_N≃{eps_normal:.6f}%', f'eps_T≃{eps_plan:.6f}%')\n \n # graph\n color = colors[k % len(colors)]\n k += 1\n plt.plot(sin2psi, 100*eps_phi, '-', \n label=f'$\\phi$={phi:4.0f}° eps_N≃{eps_normal:.3f}% eps_T≃{eps_plan:.3f}%',\n color=color);\n plt.plot(sin2psi, (slope*sin2psi + intercept), 'k:', color=color)\n \n \nplt.legend()\nplt.title(measure_name + f' a={a0:.3f}Å');\nplt.xlabel('sin2(psi)'); plt.ylabel('eps_phi (%)');\nplt.tight_layout();\noutputdir = 'output'\n\nimage_name = image_name.replace('csv', 'svg')\nimage_path = os.path.join(outputdir, image_name)\nplt.savefig(image_path)\nprint(f'{image_path} saved')\n\ni += 1\n# -\n\n\n\n" ]
[ [ "matplotlib.pylab.tight_layout", "matplotlib.pylab.rcParams.update", "numpy.polyfit", "numpy.unique", "numpy.asarray", "numpy.isnan", "matplotlib.pylab.title", "numpy.genfromtxt", "numpy.sin", "numpy.ones", "matplotlib.pylab.figure", "matplotlib.pylab.ylabel", "matplotlib.pylab.plot", "matplotlib.pylab.legend", "matplotlib.pylab.savefig", "matplotlib.pylab.xlabel", "numpy.sum" ] ]
face-alignment-group-of-ahucs/SHN-based-2D-face-alignment
[ "fa7ec5f0a97a943858e851453c0b26f257b2f3c6" ]
[ "model.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 14 18:44:00 2018\n\n@author: xiang\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3,\n stride=strd, padding=padding, bias=bias)\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_planes, out_planes):\n super(ConvBlock, self).__init__()\n planes = int(out_planes/2)\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, bias=False)\n\n if in_planes != out_planes:\n self.downsample = nn.Sequential(\n nn.BatchNorm2d(in_planes),\n nn.ReLU(True),\n nn.Conv2d(in_planes, out_planes,\n kernel_size=1, stride=1, bias=False),\n )\n else:\n self.downsample = None\n\n def forward(self, x):\n residual = x\n\n out1 = self.bn1(x)\n out1 = F.relu(out1, True)\n out1 = self.conv1(out1)\n\n out2 = self.bn2(out1)\n out2 = F.relu(out2, True)\n out2 = self.conv2(out2)\n\n out3 = self.bn3(out2)\n out3 = F.relu(out3, True)\n out3 = self.conv3(out3)\n\n if self.downsample is not None:\n residual = self.downsample(residual)\n\n out3 += residual\n\n return out3\n\n\nclass Upsample(nn.Module):\n def __init__(self, dim_in, dim_out):\n super(Upsample,self).__init__()\n self.upsample = nn.ConvTranspose2d(dim_in, dim_out, kernel_size=4, stride=2, padding=1)\n \n def forward(self, x):\n return self.upsample(x)\n\nclass HourGlass(nn.Module):\n def __init__(self, depth, num_features):\n super(HourGlass, self).__init__()\n self.depth = depth\n self.features = num_features\n self.Upsample = Upsample(256,256)\n self._generate_network(self.depth)\n\n def _generate_network(self, level):\n self.add_module('b1_' + str(level), ConvBlock(256, 256))\n\n self.add_module('b2_' + str(level), ConvBlock(256, 256))\n\n if level > 1:\n self._generate_network(level - 1)\n else:\n self.add_module('b2_plus_' + str(level), ConvBlock(256, 256))\n\n self.add_module('b3_' + str(level), ConvBlock(256, 256))\n\n def _forward(self, level, inp):\n # Upper branch\n up1 = inp\n up1 = self._modules['b1_' + str(level)](up1)\n\n # Lower branch\n low1 = F.avg_pool2d(inp, 2, stride=2)\n low1 = self._modules['b2_' + str(level)](low1)\n\n if level > 1:\n low2 = self._forward(level - 1, low1)\n else:\n low2 = low1\n low2 = self._modules['b2_plus_' + str(level)](low2)\n\n low3 = low2\n low3 = self._modules['b3_' + str(level)](low3)\n\n up2 = self.Upsample(low3)\n\n return up1 + up2\n\n def forward(self, x):\n return self._forward(self.depth, x)\n\nclass FAN(nn.Module):\n\n def __init__(self, inplanes, outplanes, bn=False):\n super(FAN, self).__init__()\n self.bn = bn\n if bn:\n self.bn = nn.BatchNorm2d(inplanes)\n\n # Base part\n self.conv1 = nn.Conv2d(inplanes, 64, kernel_size=3, stride=1, padding=1)\n self.conv2 = ConvBlock(64, 128)\n self.conv3 = ConvBlock(128, 256)\n self.conv4 = HourGlass(4, 256)\n self.conv5 = ConvBlock(256,128)\n self.conv6 = conv3x3(128, outplanes)\n self.Upsample = Upsample(128,128)\n\n def forward(self, x):\n \n if self.bn:\n x = self.bn(x)\n \n x = self.conv1(x)\n x = self.conv2(x)\n x = F.max_pool2d(x,2,stride=2)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.Upsample(x)\n out = self.conv6(x)\n\n return out\n " ]
[ [ "torch.nn.ConvTranspose2d", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.max_pool2d" ] ]
Kimame04/text-subjectivity-conversion
[ "959eb8ab4f75706cf4a7f83de2ed3c784f557f1a" ]
[ "src/CNNModels.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CnnTextClassifier(nn.Module):\n def __init__(self, vocab_size,emb_size,num_filters,num_classes, window_sizes=(3, 4, 5)):\n super(CnnTextClassifier, self).__init__()\n\n self.convs = nn.ModuleList([\n nn.Conv2d(1, num_filters, [window_size, emb_size], padding=(window_size - 1, 0))\n for window_size in window_sizes\n ])\n\n self.fc = nn.Linear(num_filters * len(window_sizes), num_classes)\n\n def forward(self, x):\n x = torch.unsqueeze(x, 1) \n xs = []\n for conv in self.convs:\n x2 = F.relu(conv(x)) \n x2 = torch.squeeze(x2, -1) \n x2 = F.max_pool1d(x2, x2.size(2)) \n xs.append(x2)\n x = torch.cat(xs, 2) \n x = x.view(x.size(0), -1) \n logits = self.fc(x) \n \n #True accuracy\n probs = F.softmax(logits) \n classes = torch.max(probs, 1)[1]\n\n return probs, classes\n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.cat", "torch.nn.Conv2d", "torch.unsqueeze", "torch.squeeze" ] ]
IMLHF/Real-Time-Voice-Cloning
[ "ae4aa2aa1605168d2f04275e1a45f6de2d88f3f0" ]
[ "synthesizer/models/architecture_wrappers.py" ]
[ "\"\"\"A set of wrappers useful for tacotron 2 architecture\nAll notations and variable names were used in concordance with originial tensorflow implementation\n\"\"\"\nimport collections\nimport tensorflow as tf\nfrom synthesizer.models.attention import _compute_attention\nfrom tensorflow.contrib.rnn import RNNCell\nfrom tensorflow.python.framework import ops, tensor_shape\nfrom tensorflow.python.ops import array_ops, check_ops, rnn_cell_impl, tensor_array_ops\nfrom tensorflow.python.util import nest\n\n_zero_state_tensors = rnn_cell_impl._zero_state_tensors\n\n\n\nclass TacotronEncoderCell(RNNCell):\n \"\"\"Tacotron 2 Encoder Cell\n Passes inputs through a stack of convolutional layers then through a bidirectional LSTM\n layer to predict the hidden representation vector (or memory)\n \"\"\"\n\n def __init__(self, convolutional_layers, lstm_layer):\n \"\"\"Initialize encoder parameters\n\n Args:\n convolutional_layers: Encoder convolutional block class\n lstm_layer: encoder bidirectional lstm layer class\n \"\"\"\n super(TacotronEncoderCell, self).__init__()\n #Initialize encoder layers\n self._convolutions = convolutional_layers\n self._cell = lstm_layer\n\n def __call__(self, inputs, input_lengths=None):\n #Pass input sequence through a stack of convolutional layers\n conv_output = self._convolutions(inputs)\n\n #Extract hidden representation from encoder lstm cells\n hidden_representation = self._cell(conv_output, input_lengths)\n\n #For shape visualization\n self.conv_output_shape = conv_output.shape\n return hidden_representation\n\n\nclass TacotronDecoderCellState(\n collections.namedtuple(\"TacotronDecoderCellState\",\n (\"cell_state\", \"attention\", \"time\", \"alignments\",\n \"alignment_history\"))):\n \"\"\"`namedtuple` storing the state of a `TacotronDecoderCell`.\n Contains:\n - `cell_state`: The state of the wrapped `RNNCell` at the previous time\n step.\n - `attention`: The attention emitted at the previous time step.\n - `time`: int32 scalar containing the current time step.\n - `alignments`: A single or tuple of `Tensor`(s) containing the alignments\n emitted at the previous time step for each attention mechanism.\n - `alignment_history`: a single or tuple of `TensorArray`(s)\n containing alignment matrices from all time steps for each attention\n mechanism. Call `stack()` on each to convert to a `Tensor`.\n \"\"\"\n def replace(self, **kwargs):\n \"\"\"Clones the current state while overwriting components provided by kwargs.\n \"\"\"\n return super(TacotronDecoderCellState, self)._replace(**kwargs)\n\nclass TacotronDecoderCell(RNNCell):\n \"\"\"Tactron 2 Decoder Cell\n Decodes encoder output and previous mel frames into next r frames\n\n Decoder Step i:\n 1) Prenet to compress last output information\n 2) Concat compressed inputs with previous context vector (input feeding) *\n 3) Decoder RNN (actual decoding) to predict current state s_{i} *\n 4) Compute new context vector c_{i} based on s_{i} and a cumulative sum of previous alignments *\n 5) Predict new output y_{i} using s_{i} and c_{i} (concatenated)\n 6) Predict <stop_token> output ys_{i} using s_{i} and c_{i} (concatenated)\n\n * : This is typically taking a vanilla LSTM, wrapping it using tensorflow\"s attention wrapper,\n and wrap that with the prenet before doing an input feeding, and with the prediction layer\n that uses RNN states to project on output space. Actions marked with (*) can be replaced with\n tensorflow\"s attention wrapper call if it was using cumulative alignments instead of previous alignments only.\n \"\"\"\n\n def __init__(self, prenet, attention_mechanism, rnn_cell, frame_projection, stop_projection):\n \"\"\"Initialize decoder parameters\n\n Args:\n prenet: A tensorflow fully connected layer acting as the decoder pre-net\n attention_mechanism: A _BaseAttentionMechanism instance, usefull to\n learn encoder-decoder alignments\n rnn_cell: Instance of RNNCell, main body of the decoder\n frame_projection: tensorflow fully connected layer with r * num_mels output units\n stop_projection: tensorflow fully connected layer, expected to project to a scalar\n and through a sigmoid activation\n mask_finished: Boolean, Whether to mask decoder frames after the <stop_token>\n \"\"\"\n super(TacotronDecoderCell, self).__init__()\n #Initialize decoder layers\n self._prenet = prenet\n self._attention_mechanism = attention_mechanism\n self._cell = rnn_cell\n self._frame_projection = frame_projection\n self._stop_projection = stop_projection\n\n self._attention_layer_size = self._attention_mechanism.values.get_shape()[-1].value\n\n def _batch_size_checks(self, batch_size, error_message):\n return [check_ops.assert_equal(batch_size,\n self._attention_mechanism.batch_size,\n message=error_message)]\n\n @property\n def output_size(self):\n return self._frame_projection.shape\n\n @property\n def state_size(self):\n \"\"\"The `state_size` property of `TacotronDecoderCell`.\n\n Returns:\n An `TacotronDecoderCell` tuple containing shapes used by this object.\n \"\"\"\n return TacotronDecoderCellState(\n cell_state=self._cell._cell.state_size,\n time=tensor_shape.TensorShape([]),\n attention=self._attention_layer_size,\n alignments=self._attention_mechanism.alignments_size,\n alignment_history=())\n\n def zero_state(self, batch_size, dtype):\n \"\"\"Return an initial (zero) state tuple for this `AttentionWrapper`.\n\n Args:\n batch_size: `0D` integer tensor: the batch size.\n dtype: The internal state data type.\n Returns:\n An `TacotronDecoderCellState` tuple containing zeroed out tensors and,\n possibly, empty `TensorArray` objects.\n Raises:\n ValueError: (or, possibly at runtime, InvalidArgument), if\n `batch_size` does not match the output size of the encoder passed\n to the wrapper object at initialization time.\n \"\"\"\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n cell_state = self._cell._cell.zero_state(batch_size, dtype)\n error_message = (\n \"When calling zero_state of TacotronDecoderCell %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the requested batch size.\")\n with ops.control_dependencies(\n self._batch_size_checks(batch_size, error_message)):\n cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"checked_cell_state\"),\n cell_state)\n return TacotronDecoderCellState(\n cell_state=cell_state,\n time=array_ops.zeros([], dtype=tf.int32),\n attention=_zero_state_tensors(self._attention_layer_size, batch_size,\n dtype),\n alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),\n alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,\n dynamic_size=True))\n\n def __call__(self, inputs, state):\n #Information bottleneck (essential for learning attention)\n prenet_output = self._prenet(inputs)\n\n #Concat context vector and prenet output to form LSTM cells input (input feeding)\n LSTM_input = tf.concat([prenet_output, state.attention], axis=-1)\n\n #Unidirectional LSTM layers\n LSTM_output, next_cell_state = self._cell(LSTM_input, state.cell_state)\n\n\n #Compute the attention (context) vector and alignments using\n #the new decoder cell hidden state as query vector\n #and cumulative alignments to extract location features\n #The choice of the new cell hidden state (s_{i}) of the last\n #decoder RNN Cell is based on Luong et Al. (2015):\n #https://arxiv.org/pdf/1508.04025.pdf\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n context_vector, alignments, cumulated_alignments = _compute_attention(self._attention_mechanism,\n LSTM_output,\n previous_alignments,\n attention_layer=None)\n\n #Concat LSTM outputs and context vector to form projections inputs\n projections_input = tf.concat([LSTM_output, context_vector], axis=-1)\n\n #Compute predicted frames and predicted <stop_token>\n cell_outputs = self._frame_projection(projections_input)\n stop_tokens = self._stop_projection(projections_input)\n\n #Save alignment history\n alignment_history = previous_alignment_history.write(state.time, alignments)\n\n #Prepare next decoder state\n next_state = TacotronDecoderCellState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=context_vector,\n alignments=cumulated_alignments,\n alignment_history=alignment_history)\n\n return (cell_outputs, stop_tokens), next_state\n" ]
[ [ "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.concat", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.array_ops.identity" ] ]
geblanco/mc-transformers
[ "369d58abc83a2f84e05382a5bc5cd1e99f402e82" ]
[ "mc_transformers/mc_transformers.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for multiple choice (Bert, Roberta, XLNet).\"\"\"\n\n\nimport os\nimport sys\nimport json\nimport logging\n\nfrom pathlib import Path\nfrom typing import Dict, Optional\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\n\nimport numpy as np\n\nfrom transformers import (\n AutoConfig,\n AutoModelForMultipleChoice,\n AutoTokenizer,\n EvalPrediction,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nfrom transformers import is_tf_available\nfrom mc_transformers.utils_mc import MultipleChoiceDataset, Split, processors\nfrom mc_transformers.data_classes import (\n WindowPrediction,\n PredictionOutputWithIds,\n DataCollatorWithIds,\n)\n\n\nif is_tf_available():\n # Force no unnecessary allocation\n import tensorflow as tf\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\nlogger = logging.getLogger(__name__)\nos.environ.update(**{\"WANDB_DISABLED\": \"true\"})\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef compute_metrics(p: EvalPrediction) -> Dict:\n preds = np.argmax(p.predictions, axis=1)\n return {\"acc\": simple_accuracy(preds, p.label_ids)}\n\n\ndef softmax(preds, axis=None):\n # Taken from: https://nolanbconaway.github.io/blog/2017/softmax-numpy.html\n if axis is None:\n raise ValueError(\"Softmax function needs an axis to work!\")\n # make preds at least 2d\n y = np.atleast_2d(preds)\n # subtract the max for numerical stability\n y = y - np.expand_dims(np.max(y, axis=axis), axis)\n y = np.exp(y)\n # take the sum along the specified axis\n ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)\n p = y / ax_sum\n # flatten if preds was 1D\n if len(preds.shape) == 1:\n p = p.flatten()\n\n return p\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: str = field(metadata={\"help\": \"The name of the task to train on: \" + \", \".join(processors.keys())})\n data_dir: str = field(metadata={\"help\": \"Should contain the data files for the task.\"})\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n\n\n@dataclass\nclass DirArguments:\n \"\"\"\n Arguments pertaining to output directories for metrics, results and predictions\n \"\"\"\n metrics_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Output directory for metrics (loss/accuracy)\"\n }\n )\n results_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Output directory for predictions\"\n }\n )\n save_logits: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to store logits along with predictions\"\n }\n )\n\n\n@dataclass\nclass WindowArguments:\n \"\"\"\n Arguments pertaining to output directories for metrics, results and predictions\n \"\"\"\n enable_windowing: bool = field(\n default=False,\n metadata={\n 'help': 'Enable windowing system alltogether'\n }\n )\n stride: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Stride to use when windowing features\"\n }\n )\n no_answer_text: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Text of an unanswerable question option (Triggers \"\n \"label correction mechanism on windowed features)\"\n }\n )\n windows_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Output directory for windowed predictions\"\n }\n )\n\n\ndef vote_windowed_predictions(windowed_predictions):\n predictions = []\n # strategies:\n # max along each column\n # voting?\n for win_pred in windowed_predictions:\n predictions.append(np.argmax(win_pred.predictions, axis=0))\n\n return np.array(predictions)\n\n\ndef parse_windowed_predictions(args, processor, results, split):\n data_args = args['data_args']\n window_args = args['window_args']\n if split == Split.dev:\n file_name = \"dev_windowed_predictions.json\"\n examples = processor.get_dev_examples(data_args.data_dir)\n elif split == Split.test:\n file_name = \"test_windowed_predictions.json\"\n examples = processor.get_test_examples(data_args.data_dir)\n else:\n file_name = \"train_windowed_predictions.json\"\n examples = processor.get_train_examples(data_args.data_dir)\n\n predictions = []\n id_to_example_map = {example.example_id: example for example in examples}\n label_map = {label: i for i, label in enumerate(processor.get_labels())}\n\n example_labels = defaultdict(list)\n example_window_ids = defaultdict(list)\n example_predictions = defaultdict(list)\n\n zipped = zip(results.example_ids, results.label_ids, results.predictions)\n for feat_id, feat_label, feat_preds in zipped:\n str_feat_id = str(feat_id)\n example_id = int(str_feat_id[:-2])\n win_id = int(str_feat_id[-2:])\n example_labels[example_id].append(int(feat_label))\n example_window_ids[example_id].append(win_id)\n example_predictions[example_id].append(feat_preds)\n\n for example_id, example in id_to_example_map.items():\n windowed_predictions = WindowPrediction(\n predictions=np.vstack(example_predictions[example_id]),\n window_ids=example_window_ids[example_id],\n labels=example_labels[example_id],\n label=label_map[example.label],\n example=example,\n )\n predictions.append(windowed_predictions)\n\n if window_args.windows_dir is not None:\n Path(window_args.windows_dir).mkdir(parents=True, exist_ok=True)\n file_path = os.path.join(window_args.windows_dir, file_name)\n window_preds_str = json.dumps([win.todict() for win in predictions])\n with open(file_path, 'w') as fout:\n fout.write(window_preds_str + '\\n')\n\n reduced_predictions = vote_windowed_predictions(predictions)\n example_ids, label_ids = zip(*[\n (win_pred.example.example_id, win_pred.example.label)\n for win_pred in predictions\n ])\n return parse_default_predictions(\n args=args,\n processor=processor,\n example_ids=example_ids,\n label_ids=label_ids,\n predictions=reduced_predictions,\n )\n\n\ndef parse_default_predictions(args, processor, example_ids, label_ids, predictions):\n # ToDo := Test predictions should not have true label\n # cast to avoid json serialization issues\n example_ids = [processor._decode_id(int(ex_id)) for ex_id in example_ids]\n label_ids = [int(lab) for lab in label_ids]\n label_id_map = {i: chr(ord('A') + int(label)) for i, label in enumerate(processor.get_labels())}\n\n pred_logits = predictions.tolist()\n predictions = softmax(predictions, axis=1)\n predictions_dict = defaultdict(list)\n\n for (ex_id, q_id), true_label, preds, logits in zip(example_ids, label_ids, predictions, pred_logits):\n pred_dict = {\n \"probs\": preds.tolist(),\n \"pred_label\": label_id_map[np.argmax(preds)],\n \"label\": label_id_map[true_label],\n }\n\n if args['dir_args'].save_logits:\n pred_dict.update(**{\"logits\": logits})\n\n predictions_dict[ex_id].append(pred_dict)\n\n full_ids = ['-'.join([c_id, qa_id]) for c_id, qa_id in example_ids]\n predictions = np.argmax(predictions, axis=1)\n predicted_labels = [label_id_map[id] for id in predictions]\n predictions_list = dict(zip(full_ids, predicted_labels))\n\n return predictions_dict, predictions_list\n\n\ndef save_metrics(metrics, args, split):\n dir_args = args['dir_args']\n prefix = \"eval\"\n if split == Split.test:\n prefix = \"test\"\n elif split == Split.train:\n prefix = \"train\"\n\n metrics_dict = {}\n output_metrics_file = os.path.join(\n dir_args.metrics_dir,\n f\"{prefix}_metrics.json\"\n )\n for key in [\"eval_loss\", \"eval_acc\"]:\n if metrics.get(key) is not None:\n metrics_dict[key] = metrics.get(key)\n\n if len(metrics_dict.keys()) == 0:\n logger.info(\"Neither loss or accuracy found on result dict!\")\n else:\n Path(dir_args.metrics_dir).mkdir(parents=True, exist_ok=True)\n with open(output_metrics_file, \"w\") as writer:\n writer.write(json.dumps(metrics_dict) + '\\n')\n for key, value in metrics_dict.items():\n logger.info(\" %s = %s\", key, value)\n\n\ndef save_predictions(processor, results, args, split):\n dir_args, window_args = args['dir_args'], args['window_args']\n prefix = \"eval\"\n if split == Split.test:\n prefix = \"test\"\n elif split == Split.train:\n prefix = \"train\"\n\n if window_args.enable_windowing:\n predictions_dict, predictions_list = parse_windowed_predictions(\n args=args,\n processor=processor,\n results=results,\n split=split,\n )\n else:\n predictions_dict, predictions_list = parse_default_predictions(\n args=args,\n processor=processor,\n example_ids=results.example_ids,\n label_ids=results.label_ids,\n predictions=results.predictions,\n )\n\n output_nbest_file = os.path.join(\n dir_args.results_dir,\n f\"{prefix}_nbest_predictions.json\"\n )\n output_predictions_file = os.path.join(\n dir_args.results_dir,\n f\"{prefix}_predictions.json\"\n )\n\n Path(dir_args.results_dir).mkdir(parents=True, exist_ok=True)\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(predictions_dict) + '\\n')\n\n with open(output_predictions_file, \"w\") as writer:\n writer.write(json.dumps(predictions_list) + '\\n')\n\n\ndef save_results(processor, results, args, split):\n # only predict method returns prediction outputs,\n # evaluate and train only return the metrics\n if isinstance(results, PredictionOutputWithIds):\n save_metrics(results.metrics, args, split)\n save_predictions(processor, results, args, split)\n else:\n save_metrics(results, args, split)\n\n\ndef pair_predictions_with_ids(results, data_collator):\n return PredictionOutputWithIds(\n predictions=results.predictions,\n label_ids=results.label_ids,\n example_ids=data_collator.example_ids,\n metrics=results.metrics,\n )\n\n\ndef setup(argc=None, **kwargs):\n if argc is None:\n argc = sys.argv[1:]\n parser = HfArgumentParser((\n ModelArguments, DataTrainingArguments,\n DirArguments, TrainingArguments, WindowArguments\n ))\n if (\n isinstance(argc, list) and\n len(argc) == 1 and\n argc[0].endswith('.json')\n ):\n model_args, data_args, dir_args, training_args, window_args = (\n parser.parse_json_file(argc[0])\n )\n elif isinstance(argc, dict):\n model_args, data_args, dir_args, training_args, window_args = (\n parser.parse_dict(argc)\n )\n else:\n model_args, data_args, dir_args, training_args, window_args = (\n parser.parse_args_into_dataclasses()\n )\n\n if (\n os.path.exists(training_args.output_dir)\n and [f for f in os.listdir(training_args.output_dir) if f != '.gitignore']\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n all_args = {\n 'model_args': model_args,\n 'data_args': data_args,\n 'dir_args': dir_args,\n 'training_args': training_args,\n 'window_args': window_args,\n }\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n try:\n processor = processors[data_args.task_name]()\n label_list = processor.get_labels()\n num_labels = len(label_list)\n except KeyError:\n raise ValueError(\"Task not found: %s\" % (data_args.task_name))\n\n config_kwargs = kwargs.pop('config_kwargs', {})\n tokenizer_kwargs = kwargs.pop('tokenizer_kwargs', {})\n model_kwargs = kwargs.pop('model_kwargs', {})\n\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n **config_kwargs,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n **tokenizer_kwargs,\n )\n model = AutoModelForMultipleChoice.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n **model_kwargs,\n )\n\n return all_args, processor, config, tokenizer, model\n\n\ndef main():\n all_args, processor, config, tokenizer, model = setup()\n model_args, data_args, dir_args, training_args, window_args = (\n all_args.values()\n )\n\n train_dataset = (\n MultipleChoiceDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n task=data_args.task_name,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.train,\n enable_windowing=window_args.enable_windowing,\n stride=window_args.stride,\n no_answer_text=window_args.no_answer_text,\n )\n if training_args.do_train\n else None\n )\n eval_dataset = (\n MultipleChoiceDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n task=data_args.task_name,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.dev,\n enable_windowing=window_args.enable_windowing,\n stride=window_args.stride,\n no_answer_text=window_args.no_answer_text,\n )\n if training_args.do_eval\n else None\n )\n\n test_dataset = (\n MultipleChoiceDataset(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n task=data_args.task_name,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.test,\n enable_windowing=window_args.enable_windowing,\n stride=window_args.stride,\n no_answer_text=window_args.no_answer_text,\n )\n if training_args.do_predict\n else None\n )\n\n data_collator = DataCollatorWithIds()\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n data_collator=data_collator.collate,\n )\n\n # Training\n results = {}\n if training_args.do_train:\n trainer.train(\n model_path=(\n model_args.model_name_or_path\n if os.path.isdir(model_args.model_name_or_path)\n else None\n )\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n logger.info(\"*** Evaluate (train set)***\")\n result = trainer.predict(train_dataset)\n if trainer.is_world_master():\n result = pair_predictions_with_ids(result, data_collator)\n save_results(\n processor, result, all_args, split=Split.train\n )\n results['train'] = result\n data_collator.drop_ids()\n\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n result = trainer.predict(eval_dataset)\n if trainer.is_world_master():\n result = pair_predictions_with_ids(result, data_collator)\n save_results(\n processor, result, all_args, split=Split.dev\n )\n results['eval'] = result\n data_collator.drop_ids()\n\n if training_args.do_predict:\n logger.info(\"*** Test ***\")\n result = trainer.predict(test_dataset)\n if trainer.is_world_master():\n result = pair_predictions_with_ids(result, data_collator)\n save_results(\n processor, result, all_args, split=Split.test\n )\n results['test'] = result\n data_collator.drop_ids()\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "numpy.atleast_2d", "numpy.max", "numpy.argmax", "numpy.array", "numpy.exp", "numpy.sum", "numpy.vstack" ] ]
Shyam-Prasad-Gupta/ML-AI-DL
[ "470ab94e7f76bce1ad14f0ccc2575bd617100d4a" ]
[ "Digit Recogniser/src/network.py" ]
[ "\"\"\"\nnetwork.py\n~~~~~~~~~~\n\nA module to implement the stochastic gradient descent learning\nalgorithm for a feedforward neural network. Gradients are calculated\nusing backpropagation. Note that I have focused on making the code\nsimple, easily readable, and easily modifiable. It is not optimized,\nand omits many desirable features.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport random\n\n# Third-party libraries\nimport numpy as np\n\n\nclass Network(object):\n\n def __init__(self, sizes):\n \"\"\"The list ``sizes`` contains the number of neurons in the\n respective layers of the network. For example, if the list\n was [2, 3, 1] then it would be a three-layer network, with the\n first layer containing 2 neurons, the second layer 3 neurons,\n and the third layer 1 neuron. The biases and weights for the\n network are initialized randomly, using a Gaussian\n distribution with mean 0, and variance 1. Note that the first\n layer is assumed to be an input layer, and by convention we\n won't set any biases for those neurons, since biases are only\n ever used in computing the outputs from later layers.\"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n def feedforward(self, a):\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a)+b)\n return a\n\n def SGD(self, training_data, epochs, mini_batch_size, eta,\n test_data=None):\n \"\"\"Train the neural network using mini-batch stochastic\n gradient descent. The ``training_data`` is a list of tuples\n ``(x, y)`` representing the training inputs and the desired\n outputs. The other non-optional parameters are\n self-explanatory. If ``test_data`` is provided then the\n network will be evaluated against the test data after each\n epoch, and partial progress printed out. This is useful for\n tracking progress, but slows things down substantially.\"\"\"\n if test_data: n_test = len(test_data)\n n = len(training_data)\n for j in range(epochs):\n random.shuffle(training_data)\n mini_batches = [\n training_data[k:k+mini_batch_size]\n for k in range(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n print(\"Epoch {0}: {1} / {2}\".format(\n j, self.evaluate(test_data), n_test))\n else:\n print(\"Epoch {0} complete\".format(j))\n\n def update_mini_batch(self, mini_batch, eta):\n \"\"\"Update the network's weights and biases by applying\n gradient descent using backpropagation to a single mini batch.\n The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``\n is the learning rate.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [w-(eta/len(mini_batch))*nw\n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b-(eta/len(mini_batch))*nb\n for b, nb in zip(self.biases, nabla_b)]\n\n def backprop(self, x, y):\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\n gradient for the cost function C_x. ``nabla_b`` and\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\n to ``self.biases`` and ``self.weights``.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n # backward pass\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n return (nabla_b, nabla_w)\n\n def evaluate(self, test_data):\n \"\"\"Return the number of test inputs for which the neural\n network outputs the correct result. Note that the neural\n network's output is assumed to be the index of whichever\n neuron in the final layer has the highest activation.\"\"\"\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n def cost_derivative(self, output_activations, y):\n \"\"\"Return the vector of partial derivatives \\partial C_x /\n \\partial a for the output activations.\"\"\"\n return (output_activations-y)\n\n#### Miscellaneous functions\ndef sigmoid(z):\n \"\"\"The sigmoid function.\"\"\"\n return 1.0/(1.0+np.exp(-z))\n\ndef sigmoid_prime(z):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n return sigmoid(z)*(1-sigmoid(z))\n" ]
[ [ "numpy.dot", "numpy.exp", "numpy.random.randn", "numpy.zeros" ] ]
alexdiem/nvdbapi-V3
[ "18265ee6d02aed17d6199e5ed42fe731c9320a08" ]
[ "nvdbgeotricks.py" ]
[ "\"\"\"\nEn samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske datasett\n\nDisse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som må \ninstalleres separat. Noen av disse bibliotekene kunne historisk av og til være plundrete å installere, evt \nha versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder hører historien til (stort sett)\n\nAnbefalingen er like fullt å bruke (ana)conda installasjon i et eget \"environment\". Dette er god kodehygiene\nog sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved å lage nytt \"enviroment\", \nuten at det påvirker hele python-installasjonen din. \n\"\"\"\nimport re\nimport pdb\nfrom copy import deepcopy\nimport sqlite3\n\nfrom shapely import wkt \n# from shapely.ops import unary_union\nimport pandas as pd \nimport geopandas as gpd \nfrom datetime import datetime\n\nimport nvdbapiv3\nfrom nvdbapiv3 import apiforbindelse\n\ndef finnoverlapp( dfA, dfB, prefixA=None, prefixB=None, join='inner' ): \n \"\"\"\n Finner overlapp mellom to (geo)pandas (geo)dataframes med veglenkeposisjoner. \n \n For å minimere navnekollisjon gir vi et prefiks til alle kolonnenanv i Dataframe B basert på objekttypen \n (prefikset kan overstyres med nøkkelord prefixB )\n\n Returverdien er en dataframe med alle vegsegmenter som overlapper. Ett vegobjekt har gjerne flere vegsegmenter. \n Hvis man ønsker en rad per vegobjekt-kombinasjon må man filtrere inputada i forkant eller resultatene i \n etterkant. Det mest lettvinte er da å fjerne duplikater basert på Nvdb ID (vegobjekt id). \n\n Hvis du har en verdikjede hvor du ønsker å kombinere mange dataett (for eksempel mange ulike objekttyper) så \n må du selv ta ansvar for å unngå navnekollisjon og forvirring. Vi har tre metoder: \n\n 1) Definer hvilken objekttype som alltid blir dfA i koblingene. Kolonnenavnene i dfA endres ikke i \n resultatdatasettet, og kan derfor \"gjenbrukes\" når resultatdatasettet kobles med dataframes for andre \n objekttyper. For eksempel dersom du kobler tunnelløp med fartsgrense og trafikkmengde kan du gjøre noe slikt: \n\n resultat1 = finnoverlapp( dfTunnellop, dfFartsgrenser ) \n resultat2 = finnoverlapp( resultat1, dfTrafikkmengde )\n\n resultat2 har da tunnelløp koblet med fartsgrenser (med forstavelsen t105_ ) og trafikkmengde (med forstavelsen t540_ )\n\n 2) Ta eksplisitt kontroll over prefiks med nøkkelordene prefixA, prefixB. Merk at prefiks kun føyes til kolonnenavn \n dersom det ikke finnes fra før, så vi inngår prefiks av typen t67_t67_ \n\n 3) Fjern \"overflødige\" kolonner fra mellomliggende resultater, gjerne kombinert med tricks 2) \n \n Samme navnelogikk er brukt i funksjonen finndatter. \n\n TODO: Funksjonen håndterer ikke dictionary-elementer. Spesielt relasjon-strukturen (dictionary) gir oss problemer. \n \n ARGUMENTS\n dfA, dfB - Pandas dataframe eller Geopandas geodataframe, eller kombinasjon. Returverdi blir identisk med dfA. \n\n KEYWORDS\n prefixA=None Valgfri tekststreng med det prefikset som skal føyes til navn i dfA, eller det prefikset som \n er brukt fordi dfA er resultatet fra en tidligere kobling \n\n prefixB=None Valgfri tekststreng med det prefikset som skal føyes til navn i dfB. Hvis ikke angitt så komponerer vi \n prefiks ut fra objektTypeID, for eksempel \"67_\" for 67 Tunnelløp. \n\n join = 'inner' | 'left' . Hva slags sql-join vi skal gjøre, mest aktuelle er 'INNER' eller 'LEFT'. I prinsippet en hvilke\n som helst variant som er støttet av sqlite3.\n\n RETURNS\n Pandas DataFrame, eller Geopandas Geodataframe, avhengig av hva dfA er for slag. \n\n TODO: Inputdata er Vegnett + vegnett eller vegobjekter + vegnett ? (Trengs dette?) \n\n\n \"\"\"\n\n # Lager kopier, så vi ikke får kjipe sideeffekter av orginaldatasettet \n dfA = dfA.copy()\n dfB = dfB.copy()\n\n col_vlinkA = 'veglenkesekvensid' \n col_startA = 'startposisjon' \n col_sluttA = 'sluttposisjon'\n col_relposA = 'relativPosisjon'\n\n if prefixA: \n # Tester om prefikset er i bruk\n if len( [ x for x in list( dfA.columns ) if prefixA in x ] ) == 0: \n dfA = dfA.add_prefix( prefixA )\n\n col_vlinkA = prefixA + col_vlinkA\n col_startA = prefixA + col_startA\n col_sluttA = prefixA + col_sluttA\n col_relposA = prefixA + col_relposA \n\n # Gjetter på prefix B om den ikke finnes. \n if not prefixB: \n temp = [x for x in list( dfB.columns ) if 'objekttype' in x ]\n assert len(temp) == 1, f\"finnoverlapp: Lette etter en kolonne kalt objekttype i dfB, fant {len(temp)} stk: {temp} \"\n temp2 = list( dfB[temp[0]].unique() )\n assert len(temp2) == 1, f\"finnoverlapp: Lette etter unik objekttype i dfB kolonne {temp[0]}, fant {len(temp2)} stk: {temp2} \"\n prefixB = 't' + str( temp2[0] ) + '_'\n\n # Tester om prefikset allerede er i bruk: \n if len( [ x for x in list( dfB.columns ) if prefixB in x ] ) == 0: \n dfB = dfB.add_prefix( prefixB )\n\n col_vlinkB = prefixB + 'veglenkesekvensid' \n col_startB = prefixB + 'startposisjon' \n col_sluttB = prefixB + 'sluttposisjon'\n col_relposB = prefixB + 'relativPosisjon'\n\n # Kvalitetssjekk på at vi har det som trengs: \n assert col_vlinkA in dfA.columns, f\"finnoverlapp: Fant ikke kolonne {col_vlinkA} i dfA {dfA.columns} \"\n assert col_vlinkB in dfB.columns, f\"finnoverlapp: Fant ikke kolonne {col_vlinkB} i dfB {dfB.columns} \"\n\n # Har vi punkt-vs punkt? Spesialcase. De andre tifellene (linje vs linje, punkt-linje eller linje-punkt)\n # kan vi håndtere fint ved å trickse med å sette startposisjon, sluttposisjon - navnente lik relativPosisjon - kolonnen\n # Vi kategoriserer de to \n\n typeA = ''\n typeB = ''\n if col_startA in dfA.columns and col_sluttA in dfA.columns: \n typeA = 'LINJE'\n elif col_relposA in dfA.columns: \n typeA = 'PUNKT'\n col_startA = col_relposA\n col_sluttA = col_relposA\n else: \n raise ValueError( f\"Finner ikke kolonner for veglenkeposisjon: {col_startA, col_sluttA} eller {col_relposA} i dfA\")\n\n if col_startB in dfB.columns and col_sluttB in dfB.columns: \n typeB = 'LINJE'\n elif col_relposB in dfB.columns: \n typeB = 'PUNKT'\n col_startB = col_relposB\n col_sluttB = col_relposB\n else: \n raise ValueError( f\"Finner ikke kolonner for veglenkeposisjon: {col_startB, col_sluttB} eller {col_relposB} i dfB \")\n\n if typeA == 'PUNKT' and typeB == 'PUNKT': \n qry = ( f\"select * from A\\n\"\n f\"{join.upper()} JOIN B ON\\n\"\n f\"A.{col_vlinkA} = B.{col_vlinkB} and\\n\"\n f\"A.{col_relposA} = B{col_relposB} \"\n )\n else: \n qry = ( f\"select * from A\\n\"\n f\"{join.upper()} JOIN B ON\\n\"\n f\"A.{col_vlinkA} = B.{col_vlinkB} and\\n\"\n f\"A.{col_startA} < B.{col_sluttB} and\\n\"\n f\"A.{col_sluttA} > B.{col_startB} \"\n )\n\n print( qry )\n\n conn = sqlite3.connect( ':memory:')\n dfA.to_sql( 'A', conn, index=False )\n dfB.to_sql( 'B', conn, index=False )\n joined = pd.read_sql_query( qry, conn )\n\n # EKSEMPELKODE!\n # LBger virituell database, slik at vi kan gjøre SQL-spørringer\n # conn = sqlite3.connect( ':memory:')\n # temp2010.to_sql( 'v2010', conn, index=False )\n # temp2009.to_sql( 'v2009', conn, index=False )\n\n # qry = \"\"\"\n # select max( v2010.startposisjon, v2009.d2009_startposisjon ) as frapos, \n # min( v2010.sluttposisjon, v2009.d2009_sluttposisjon ) as tilpos, \n # * from v2009\n # INNER JOIN v2010 ON \n # v2009.d2009_veglenkesekvensid = v2010.veglenkesekvensid and\n # v2009.d2009_startposisjon < v2010.sluttposisjon and \n # v2009.d2009_sluttposisjon > v2010.startposisjon\n # \"\"\"\n #\n # joined = pd.read_sql_query( qry, conn) \n\n return joined \n\n\n # raise NotImplementedError( \"Har ikke fått laget denne ennå, sjekk om noen dager\")\n\n\n\ndef finnDatter( morDf, datterDf, prefixMor=None, prefixDatter=None, ignorerDatterPrefix=False ): \n \"\"\"\n Finner relasjoner mellom vegobjekter i (geo)dataframe \n \n Returnerer ny dataframe hvor alle elementer i datterDf er påført informasjon fra mor-objektet hentet fra morDf \n\n For å unngå navnekollisjon er standardoppførselen å føye forstavelsen kolonnenavn <vegobjektTypeId>_ til \n alle kolonnenavn i datterdf. Denne oppførselen reguleres med nøkkelordene addprefix_datter og prefix. \n\n Når du har en verdikjede med flere koblinger etter hverandre (evt med funksjonen finnoverlapp) er det risiko \n for navnekollisjon og navneforvirring. Hvis ikke du overstyrer med argumentet prefiksMor så beholder vi kolonnenavn\n fra morDf, men endrer alle kolonnenavnene i datterDf med forstavelsen \"<objektTypeID>_\", for eksempel \"67_\". \n Forstavelse for datterDf kan også overstyres med nøkkelord prefixDatter. Merk at hvis morDf eller datterDf allerede er \n \"omdøpt\" med dette prefikset så føyes det ikke til enda en gang (men brukes for å identifisere riktige kolonner) \n Se også dokumentasjon for funksjonen finnoverlapp. \n\n I noen sammenhenger er det riktig å behandle hvert vegsegment til et objekt separat, andre ganger ønsker man kun \n en rad per objekt Id. Funksjonen finnDatter kan ikke avgjøre hva som er riktig for deg, men gir ut det den får inn. \n Dvs hvis ID2 er datterobjekt til ID1 så vil du få returnert en rad med kombinasjonen ID1->ID2 for hver kombinasjon av \n vegsegmenter for objektene ID1, ID2. Dvs hvis ID1 har to vegsegmenter og Id2 har tre så får du seks rader i resultatene. \n Du må selv filtrere vekk de kombinasjonene du ikke vil ha, eller filtrere \n vekk duplikater fra inputdata. I så fall er anbefalingen å filtrere på Nvdb Id. \n\n ARGUMENTS: \n morDf, datterDf: Pandas dataframe eller geopandas geodataframe. \n\n KEYWORDS: \n Her er nøkkelord som regulerer hvordan vi døper om kolonner i datterDf (og evt morDf) for å minimere navnekollisjon. \n Standardoppførselen er å beholde alle navn i morDf, men døpe vi om alle kolonnenavn i datterDf med \"t<objektTypeID>_\" som prefiks. \n Merk at vi ikke endrer kolonnenavn som allerede inneholder det vi ellers ville brukt som prefiks for å døpe dem om. \n\n prefixMor=None eller tekststreng. Brukes hvis det er ønskelig å døpe om alle kolonnenavn i morDf med dette som prefix \n\n prefixDatter=None eller tekststreng. Angis hvis du vil bruke noe annet enn \"t<objektTypeID>_\" som prefiks når du gir nye navn til \n kolonner i datterDf. \n \n ignorerDatterPrefix: Endrer IKKE kolonnenavn i datterDf. \n RETURNS\n dataFrame eller Geodataframe (samme som morDf)\n \"\"\"\n\n # Lager kopier, så vi ikke får kjipe sideeffekter av orginaldatasettet \n mDf = morDf.copy()\n dDf = datterDf.copy()\n\n idKey = 'nvdbId'\n if prefixMor: \n # Sjekker om prefixet er i bruk allerede:\n if len( [ x for x in list( mDf.columns ) if prefixMor in x ] ) == 0: \n mDf = mDf.add_prefix( prefixMor )\n idKey = prefixMor + 'nvdbId'\n\n if prefixDatter and not ignorerDatterPrefix: \n # Sjekker om prefikset er i bruk allerede\n if len( [ x for x in list( dDf.columns ) if prefixDatter in x ] ) == 0: \n dDf = dDf.add_prefix( prefixDatter )\n\n relKey = prefixDatter + 'relasjoner'\n datterIdKey = prefixDatter + 'nvdbId'\n \n else: \n temp = [x for x in list( dDf.columns ) if 'objekttype' in x ]\n assert len(temp) == 1, f\"finnDatter: Lette etter en kolonne kalt objekttype i datterDf, fant {len(temp)} stk: {temp} \"\n temp2 = list( dDf[temp[0]].unique() )\n assert len(temp2) == 1, f\"finnDatter: Lette etter unik objekttype i datterDf kolonne {temp[0]}, fant {len(temp2)} stk: {temp2} \"\n\n if ignorerDatterPrefix: \n relKey = 'relasjoner' \n datterIdKey = 'nvdbId'\n\n else: \n relKey = 't' + str( temp2[0] ) + '_relasjoner'\n datterIdKey = 't' + str( temp2[0] ) + '_nvdbId'\n dDf = dDf.add_prefix( 't' + str( temp2[0] ) + '_' )\n\n assert len( [x for x in list( mDf.columns ) if idKey in x ] ) == 1, f\"Fant ikke unik kolonne {idKey} i mor-datasett, prefixMor={prefixMor} \"\n assert len( [x for x in list( dDf.columns ) if relKey in x ] ) == 1, f\"Fant ikke unik kolonne {relKey} i datter-datasett, prefixDatter={prefixDatter} \"\n assert len( [x for x in list( dDf.columns ) if datterIdKey in x ] ) == 1, f\"Fant ikke unik kolonne {datterIdKey} i datter-datasett, prefixDatter={prefixDatter} \"\n\n returdata = []\n for ii, row in dDf.iterrows(): \n\n row_resultat = []\n if relKey in row and 'foreldre' in row[relKey]: \n morIdListe = []\n morObjektTypeId = []\n for mortype in row[relKey]['foreldre']: \n morIdListe.extend( mortype['vegobjekter'] )\n morObjektTypeId.append( mortype['type'])\n\n morDict = []\n for morId in morIdListe: \n tempDf = mDf[ mDf[idKey] == morId ]\n for jj, morRow in tempDf.iterrows(): \n morDict = morRow.to_dict()\n datterDict = row.to_dict()\n blanding = { **morDict, **datterDict } \n row_resultat.append( deepcopy( blanding ) )\n\n if len( row_resultat ) > 1: \n print( f\"Flere mødre { morIdListe } funnet for datterobjekt {row[datterIdKey]}\" )\n elif len( morIdListe) > 1 and len( row_resultat) == 1: \n print( f\"Flere mødre angitt for datterobjekt {row[datterIdKey]}, men fant heldigvis kun ett treff i morDf\" )\n\n returdata.extend( row_resultat )\n\n returDf = pd.DataFrame( returdata )\n\n return returDf \n\ndef records2gpkg( minliste, filnavn, lagnavn ): \n \"\"\"\n Tar en liste med records (dictionaries) a la dem vi får fra nvdbapiv3.to_records() og skriver til geopackage\n\n Forutsetning: Alle records har et \"geometri\"-element med WKT-streng og inneholder ingen lister. \n Vi tester for en del kjente snublefeller mhp disse forutsetningene, men ikke alle. \n \"\"\"\n if len( minliste ) == 0: \n raise ValueError( 'nvdbgeotrics.records2gpkg: Tom liste som inngangsverdi, funker dårlig')\n\n mindf = pd.DataFrame( minliste )\n # Må trickse litt for å unngå navnekollisjon\n kolonner = list( mindf.columns )\n lowerkolonner = [ x.lower() for x in kolonner ]\n # Duplicate element indices in list \n # Using list comprehension + list slicing \n # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ \n res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] \n for ii, dublett in enumerate( res):\n mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True )\n\n if isinstance( mindf.iloc[0].geometri, dict ): \n mindf['geometri'] = mindf['geometri'].apply( lambda x : x['wkt'] )\n\n mindf['geometry'] = mindf['geometri'].apply( wkt.loads )\n minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) \n # må droppe kolonne vegsegmenter hvis data er hentet med vegsegmenter=False \n if 'vegsegmenter' in minGdf.columns:\n minGdf.drop( 'vegsegmenter', 1, inplace=True)\n\n minGdf.drop( 'geometri', 1, inplace=True)\n minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") \n\n\n\ndef nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True):\n \"\"\"\n Lagrer NVDB vegnett og angitte objekttyper til geopackage\n\n ARGUMENTS\n objekttyper: Liste med objekttyper du vil lagre \n\n KEYWORDS\n mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 }\n Samme filter brukes på både vegnett og fagdata\n\n vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke\n\n vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer\n\n geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes)\n\n Hvis du ønsker å presentere vegobjekt ut fra objektets stedfesting langs veg så bruker du kombinasjonen \n vegsegmenter=True, geometri=False. Ett enkelt objekt blir da repetert for hvert vegsegment som det er \n tilknyttet (stedfestet til). \n \n Standardverdiene vegsegmenter=False, geometri=True er valgt ut fra antagelsen om at du ønsker \n en rad per objekt, uten duplisering. \n\n RETURNS \n None \n \"\"\"\n\n if not '.gpkg' in filnavn: \n filnavn = filnavn + '_' + datetime.today().strftime('%Y-%m-%d') + '.gpkg'\n\n if not isinstance(objekttyper, list ): \n objekttyper = [ objekttyper ]\n\n for enObjTypeId in objekttyper: \n\n enObjTypeId = int( enObjTypeId )\n\n sok = nvdbapiv3.nvdbFagdata( enObjTypeId )\n if mittfilter: \n sok.filter( mittfilter )\n\n stat = sok.statistikk()\n objtypenavn = sok.objektTypeDef['navn']\n print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn )\n lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) \n\n rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri )\n\n # Lagringsrutine skilt ut med funksjonen records2gpkg, IKKE TESTET (men bør gå greit) \n if len( rec ) > 0: \n records2gpkg( rec, filnavn, lagnavn )\n else: \n print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) \n\n if vegnett: \n veg = nvdbapiv3.nvdbVegnett()\n if mittfilter: \n junk = mittfilter.pop( 'egenskap', None)\n junk = mittfilter.pop( 'overlapp', None)\n veg.filter( mittfilter )\n print( 'Henter vegnett')\n rec = veg.to_records()\n mindf = pd.DataFrame( rec)\n mindf['geometry'] = mindf['geometri'].apply( wkt.loads )\n mindf.drop( 'geometri', 1, inplace=True)\n minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) \n minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") \n\n\ndef dumpkontraktsomr( komr = [] ): \n \"\"\"\n Dumper et har (hardkodede) kontraktsområder \n \"\"\"\n if not komr: \n\n komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ]\n komr = [ '9253 Agder elektro og veglys 2021-2024']\n\n\n if isinstance( komr, str): \n komr = [ komr ]\n\n objliste = [ 540, # Trafikkmengde\n 105, # Fartsgrense\n 810, # Vinterdriftsklasse\n 482, # trafikkregistreringsstasjon\n 153, # Værstasjon\n 64, # Ferjeleie\n 39, # Rasteplass \n 48, # Fortau\n 199, # Trær\n 15, # Grasdekker\n 274, # Blomsterbeplanting\n 511, # Busker\n 300 , # Naturområde (ingen treff i Haugesund kontrakt)\n 517, # Artsrik vegkant\n 800, # Fremmede arter\n 67, # Tunnelløp\n 846, # Skredsikring, bremsekjegler \n 850 # Skredsikring, forbygning\n ]\n\n objliste = []\n\n for enkontrakt in komr: \n\n filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt )\n\n nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt })\n\n\ndef firefeltrapport( mittfilter={}): \n \"\"\"\n Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie \n\n Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3\n\n ARGUMENTS\n None \n\n KEYWORDS:\n mittfilter: Dictionary med søkefilter \n\n RETURNS\n geodataframe med resultatet\n \"\"\"\n\n v = nvdbapiv3.nvdbVegnett()\n\n # Legger til filter på kun fase = V (eksistende veg), såfremt det ikke kommer i konflikt med anna filter\n if not 'vegsystemreferanse' in mittfilter.keys(): \n mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv'\n\n if not 'kryssystem' in mittfilter.keys():\n mittfilter['kryssystem'] = 'false' \n\n if not 'sideanlegg' in mittfilter.keys():\n mittfilter['sideanlegg'] = 'false' \n\n v.filter( mittfilter )\n \n # Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT\n v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } )\n\n data = []\n vegsegment = v.nesteForekomst()\n while vegsegment: \n\n if sjekkfelt( vegsegment, felttype='firefelt'):\n vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] )\n vegsegment['geometri'] = vegsegment['geometri']['wkt']\n vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform']\n vegsegment['vegnr'] = vegsegment['vref'].split()[0]\n vegsegment['vegkategori'] = vegsegment['vref'][0]\n vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp']\n\n data.append( vegsegment )\n\n vegsegment = v.nesteForekomst()\n\n if len( data ) > 1: \n mindf = pd.DataFrame( data )\n mindf['geometry'] = mindf['geometri'].apply( wkt.loads )\n mindf.drop( 'geometri', 1, inplace=True)\n mindf.drop( 'kontraktsområder', 1, inplace=True)\n mindf.drop( 'riksvegruter', 1, inplace=True) \n mindf.drop( 'href', 1, inplace=True) \n mindf.drop( 'metadata', 1, inplace=True) \n mindf.drop( 'kortform', 1, inplace=True) \n mindf.drop( 'veglenkenummer', 1, inplace=True) \n mindf.drop( 'segmentnummer', 1, inplace=True) \n mindf.drop( 'startnode', 1, inplace=True) \n mindf.drop( 'sluttnode', 1, inplace=True) \n mindf.drop( 'referanse', 1, inplace=True) \n mindf.drop( 'målemetode', 1, inplace=True) \n mindf.drop( 'måledato', 1, inplace=True) \n minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) \n return minGdf\n else: \n return None \n\n\ndef sjekkfelt( vegsegment, felttype='firefelt' ): \n \"\"\"\n Sjekker hva slags felt som finnes på et vegsegment\n\n ARGUMENTS: \n vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ \n\n KEYWORDS: \n felttype - hva slags felttype som skal sjekkes. Mulige verdier: \n firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt \n\n (flere varianter kommer når de trengs)\n\n RETURNS\n boolean - True hvis kjørefeltene er av riktig type \n \"\"\"\n svar = False\n vr = 'vegsystemreferanse'\n sr = 'strekning'\n\n if felttype == 'firefelt': \n if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: \n kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) )\n if vr in vegsegment.keys(): \n\n if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: \n if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}): \n svar = True\n # Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet. \n elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ): \n svar = True \n\n\n return svar \n else: \n raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (ennå)' )\n \n\ndef filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]):\n \"\"\"\n Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode vi evt har\n\n ARGUMENTS\n feltoversikt - Liste med feltkoder for et vegsegment. \n\n KEYWORDS\n mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk håndbok v830 \n Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: \n 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver. \n 'K' - kollektivfelt\n 'R' - reversibelt felt\n 'S' - Sykkelfelt\n 'H' - Svingefelt mot høyre\n 'V' - Svingefelt mot venstre\n 'B' - Ekstra felt for bompengeinnkreving \n RETURNS\n Liste med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert \n \"\"\"\n data = [ ]\n for felt in feltoversikt: \n feltbokstav = re.findall( '[A-Za-z]', felt)\n if feltbokstav: \n feltbokstav = feltbokstav[0]\n else: \n feltbokstav = 'vanlig'\n \n if feltbokstav in mittfilter: \n feltnummer = int( re.split( '[A-Z]', felt)[0] ) \n data.append( feltnummer )\n\n return data \n \n" ]
[ [ "pandas.read_sql_query", "pandas.DataFrame" ] ]
imyu37/ray-optics
[ "5a4836bb95920f10ff2c30bba6d48be895a88da1" ]
[ "src/rayoptics/oprops/doe.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright © 2019 Michael J. Hayford\n\"\"\" Module for diffractive/holographic optical elements\n\n Classes that implement diffractive optics capabilities must implement\n the function phase() for use by the ray trace engine.\n\n The :class:`~.DiffractiveElement` and :class:`~.HolographicElement`\n implementations are patterned after Wang, et al, `Ray tracing and wave\n aberration calculation for diffractive optical elements\n <https://doi.org/10.1117/1.600780>`_\n\n.. Created on Fri Jul 5 11:27:13 2019\n\n.. codeauthor: Michael J. Hayford\n\"\"\"\n\n\nfrom math import sqrt\nimport numpy as np\nimport importlib\n\nimport rayoptics.raytr.raytrace as rt\nfrom rayoptics.util.misc_math import normalize\n\n\ndef radial_phase_fct(pt, coefficients):\n \"\"\"Evaluate the phase and slopes at **pt**\n\n Args:\n pt: 3d point of incidence in :class:`~.Interface` coordinates\n coefficients: list of even power radial phase coefficients,\n e.g. r**2, r**4, ...\n\n Returns:\n (**dW, dWdX, dWdY**)\n\n - dW: phase added by diffractive interaction\n - dWdX: slope in x direction\n - dWdY: slope in y direction\n \"\"\"\n x, y, z = pt\n r_sqr = x*x + y*y\n dW = 0\n dWdX = 0\n dWdY = 0\n for i, c in enumerate(coefficients):\n dW += c*r_sqr**(i+1)\n r_exp = r_sqr**(i)\n factor = 2*(i+1)\n dWdX += factor*c*x*r_exp\n dWdY += factor*c*y*r_exp\n return dW, dWdX, dWdY\n\n\nclass DiffractiveElement:\n \"\"\"Container class for a phase fct driven diffractive optical element\n\n Attributes:\n phase_fct: fct the takes an input pt and returns phase and slope\n coefficients: list of coeficients for phase function\n ref_wl: wavelength in nm for phase measurement\n order: which diffracted order to calculate the phase for\n label: optical labeling for listing\n \"\"\"\n\n def __init__(self, label='', coefficients=None, ref_wl=550., order=1,\n phase_fct=None):\n self.label = label\n if coefficients is None:\n self.coefficients = []\n else:\n self.coefficients = coefficients\n self.ref_wl = ref_wl\n self.order = order\n self.phase_fct = phase_fct\n self.debug_output = False\n\n def __repr__(self):\n return (type(self).__name__ + '(label=' + repr(self.label) +\n ', coefficients=' + repr(self.coefficients) +\n ', ref_wl=' + repr(self.ref_wl) +\n ', order=' + repr(self.order) +\n ', phase_fct=' + repr(self.phase_fct) + ')')\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n # Save model name and function name of phase_fct, so that fct can\n # restored later (hopefully)\n del attrs['debug_output']\n del attrs['phase_fct']\n attrs['phase_fct_module'] = self.phase_fct.__module__\n attrs['phase_fct_name'] = self.phase_fct.__name__\n return attrs\n\n def __json_decode__(self, **attrs):\n module_name = attrs.pop('phase_fct_module')\n fct_name = attrs.pop('phase_fct_name')\n # try to import module and look up function - then assign to phase_fct\n mod = importlib.import_module(module_name)\n phase_fct = getattr(mod, fct_name)\n self.__init__(phase_fct=phase_fct, **attrs)\n\n def listobj(self):\n if len(self.label) == 0:\n label = 'doe'\n else:\n label = self.label\n print(f\"{label}: {self.phase_fct.__name__}\")\n print(f\"coefficients: {self.coefficients}\")\n print(f\"ref wl: {self.ref_wl}nm order: {self.order}\")\n\n def phase(self, pt, in_dir, srf_nrml, z_dir, wl, n_in, n_out):\n \"\"\"Returns a diffracted ray and phase increment.\n\n Args:\n pt: point of incidence in :class:`~.Interface` coordinates\n in_dir: incoming direction cosine of incident ray\n srf_nrml: :class:`~.Interface` surface normal at pt\n z_dir: -1 if after an odd # of reflections, +1 otherwise\n wl: wavelength in nm for ray, defaults to ref_wl\n\n Returns:\n (**out_dir, dW**)\n\n - out_dir: direction cosine of the out going ray\n - dW: phase added by diffractive interaction\n \"\"\"\n order = self.order\n normal = normalize(srf_nrml)\n inc_dir = in_dir\n if n_in != 1.0:\n inc_dir = rt.bend(in_dir, srf_nrml, n_in, 1)\n in_cosI = np.dot(inc_dir, normal)\n mu = 1.0 if wl is None else wl/self.ref_wl\n dW, dWdX, dWdY = self.phase_fct(pt, self.coefficients)\n # print(wl, mu, dW, dWdX, dWdY)\n b = in_cosI + order*mu*(normal[0]*dWdX + normal[1]*dWdY)\n c = mu*(mu*(dWdX**2 + dWdY**2)/2 +\n order*(inc_dir[0]*dWdX + inc_dir[1]*dWdY))\n # pick the root based on z_dir\n Q = -b + z_dir*sqrt(b*b - 2*c)\n if self.debug_output:\n print('inc_dir:', inc_dir)\n scale_dir = in_dir\n scale_dir[2] = n_in\n scale_dir = normalize(scale_dir)\n print('scale_dir:', scale_dir)\n print(\" mu dW dWdX dWdY b\"\n \" c Q\")\n print(f\"{mu:6.3f} {dW:12.5g} {dWdX:12.5g} {dWdY:12.5g} {b:12.7g}\"\n f\" {c:12.7g} {Q:12.7g}\")\n out_dir = inc_dir + order*mu*(np.array([dWdX, dWdY, 0])) + Q*normal\n dW *= mu\n if n_in != 1.0:\n out_dir = rt.bend(out_dir, srf_nrml, 1, n_in)\n\n return out_dir, dW\n\n\nclass HolographicElement:\n \"\"\"Two point hologram element. \"\"\"\n def __init__(self, label=''):\n self.label = label\n self.ref_pt = np.array([0., 0., -1e10])\n self.ref_virtual = False\n self.obj_pt = np.array([0., 0., -1e10])\n self.obj_virtual = False\n self.ref_wl = 550.0\n\n def listobj(self):\n if len(self.label) == 0:\n label = 'hoe'\n else:\n label = self.label\n print(f\"{label}: ref wl: {self.ref_wl}nm\")\n print(f\"ref_pt: {self.ref_pt[0]:12.5g} {self.ref_pt[1]:12.5g} \"\n f\"{self.ref_pt[2]:12.5g} virtual: {self.ref_virtual}\")\n print(f\"obj_pt: {self.obj_pt[0]:12.5g} {self.obj_pt[1]:12.5g} \"\n f\"{self.obj_pt[2]:12.5g} virtual: {self.obj_virtual}\")\n\n def phase(self, pt, in_dir, srf_nrml, z_dir, wl, n_in, n_out):\n normal = normalize(srf_nrml)\n ref_dir = normalize(pt - self.ref_pt)\n if self.ref_virtual:\n ref_dir = -ref_dir\n ref_cosI = np.dot(ref_dir, normal)\n obj_dir = normalize(pt - self.obj_pt)\n if self.obj_virtual:\n obj_dir = -obj_dir\n obj_cosI = np.dot(obj_dir, normal)\n in_cosI = np.dot(in_dir, normal)\n mu = 1.0 if wl is None else wl/self.ref_wl\n b = in_cosI + mu*(obj_cosI - ref_cosI)\n refp_cosI = np.dot(ref_dir, in_dir)\n objp_cosI = np.dot(obj_dir, in_dir)\n ro_cosI = np.dot(ref_dir, obj_dir)\n c = mu*(mu*(1.0 - ro_cosI) + (objp_cosI - refp_cosI))\n # pick the root based on z_dir\n Q = -b + z_dir*sqrt(b*b - 2*c)\n out_dir = in_dir + mu*(obj_dir - ref_dir) + Q*normal\n dW = 0.\n return out_dir, dW\n" ]
[ [ "numpy.dot", "numpy.array" ] ]
sivaabhishek/HandwrittenDigitRecognition
[ "02aa667d8042d04ec8f94b0465e6ee59d5810e37" ]
[ "generateClassifier.py" ]
[ "# Import the modules\nimport joblib\nfrom sklearn import datasets\nfrom skimage.feature import hog\nfrom sklearn.svm import LinearSVC\nimport numpy as np\nfrom collections import Counter\n\n# Load the dataset\ndataset = datasets.fetch_openml(\"mnist_784\")\nprint('Dataset')\n\n# Extract the features and labels\nfeatures = np.array(dataset.data, 'int16')\nlabels = np.array(dataset.target, 'int')\n\n# Extract the hog features\nlist_hog_fd = []\nfor feature in features:\n fd = hog(feature.reshape((28, 28)), orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1),\n visualize=False)\n list_hog_fd.append(fd)\nprint('Extracted')\n\nhog_features = np.array(list_hog_fd, 'float64')\n\nprint(\"Count of digits in dataset\", Counter(labels))\n\n# Create an linear SVM object\nclf = LinearSVC()\n\n# Perform the training\nclf.fit(hog_features, labels)\n\n\n# Save the classifier as pkl file\njoblib.dump(clf, \"myclassifier.pkl\", compress=3)\n" ]
[ [ "numpy.array", "sklearn.datasets.fetch_openml", "sklearn.svm.LinearSVC" ] ]
samplise/katib
[ "08234c3eccd43b11484fd78c9352eafbdc9152d8" ]
[ "pkg/suggestion/v1alpha2/grid_service.py" ]
[ "from logging import getLogger, StreamHandler, INFO, DEBUG\nimport itertools\nimport grpc\nimport numpy as np\nfrom pkg.apis.manager.v1alpha2.python import api_pb2\nfrom pkg.apis.manager.v1alpha2.python import api_pb2_grpc\nfrom . import parsing_util\n\nclass GridService(api_pb2_grpc.SuggestionServicer):\n def __init__(self):\n self.manager_addr = \"katib-manager\"\n self.manager_port = 6789\n self.default_grid = 10\n\n def _get_experiment(self, name):\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n exp = client.GetExperiment(api_pb2.GetExperimentRequest(experiment_name=name), 10)\n return exp.experiment\n\n def _get_algorithm_settings(self, experiment_name):\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n alg = client.GetAlgorithmExtraSettings(api_pb2.GetAlgorithmExtraSettingsRequest(\n experiment_name=experiment_name), 10)\n params = alg.extra_algorithm_settings\n alg_settings = {}\n for param in params:\n alg_settings[param.name] = param.value\n return alg_settings\n\n def _get_trials(self, experiment_name):\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n trials = client.GetTrialList(api_pb2.GetTrialListRequest(\n experiment_name=experiment_name), 10)\n return trials.trials\n\n def _create_all_combinations(self, parameters, alg_settings):\n param_ranges = []\n cur_index = 0\n parameter_config = parsing_util.parse_parameter_configs(parameters)\n default_grid_size = alg_settings.get(\"DefaultGrid\", self.default_grid)\n for idx, param_type in enumerate(parameter_config.parameter_types):\n param_name = parameter_config.names[idx]\n if param_type in [api_pb2.DOUBLE, api_pb2.INT]:\n num = alg_settings.get(param_name, default_grid_size)\n param_values = \\\n np.linspace(parameter_config.lower_bounds[0, cur_index],\n parameter_config.upper_bounds[0, cur_index],\n num=num)\n cur_index += 1\n if param_type == api_pb2.INT:\n param_values = param_values.astype(np.int64)\n elif param_type == api_pb2.DISCRETE:\n for discrete_param in parameter_config.discrete_info:\n if param_name == discrete_param[\"name\"]:\n param_values = discrete_param[\"values\"]\n break\n cur_index += 1\n elif param_type == api_pb2.CATEGORICAL:\n for categ_param in parameter_config.categorical_info:\n if param_name == categ_param[\"name\"]:\n param_values = categ_param[\"values\"]\n break\n cur_index += categ_param[\"number\"]\n param_ranges.append(param_values)\n all_combinations = [comb for comb in itertools.product(*param_ranges)]\n return all_combinations, parameter_config\n\n def GetSuggestions(self, request, context):\n \"\"\"\n Main function to provide suggestion.\n \"\"\"\n experiment_name = request.experiment_name\n request_number = request.request_number\n experiment = self._get_experiment(experiment_name)\n parameters = experiment.spec.parameter_specs.parameters\n alg_settings = self._get_algorithm_settings(experiment_name)\n combinations, parameter_config = self._create_all_combinations(parameters, alg_settings)\n total_combinations = len(combinations)\n\n allocated_trials = self._get_trials(experiment_name)\n total_allocated_trials = len(allocated_trials)\n return_start_index = total_allocated_trials\n return_end_index = return_start_index + request_number\n\n if return_start_index > total_combinations:\n return_start_index = 0\n return_end_index = return_start_index + request_number\n elif return_start_index + request_number > total_combinations:\n return_start_index = total_combinations - request_number\n return_end_index = total_combinations\n if return_start_index < 0:\n return_start_index = 0\n\n trial_specs = []\n for elem in combinations[return_start_index:return_end_index]:\n suggestion = parsing_util.parse_x_next_tuple(elem, parameter_config.parameter_types,\n parameter_config.names)\n trial_spec = api_pb2.TrialSpec()\n trial_spec.experiment_name = experiment_name\n for param in suggestion:\n trial_spec.parameter_assignments.assignments.add(name=param['name'],\n value=str(param['value']))\n trial_specs.append(trial_spec)\n reply = api_pb2.GetSuggestionsReply()\n for trial_spec in trial_specs:\n reply.trials.add(spec=trial_spec)\n return reply\n" ]
[ [ "numpy.linspace" ] ]
peachy88/RL_trade_ES_futures
[ "8b681f1ef672f812bf78b78bb18d07ccab6d0b67" ]
[ "ressup.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom ressuputils import *\nimport matplotlib.pyplot as plt\n\n\ndef normalize(arr):\n 'arr - numpy.array'\n mu = np.mean(arr)\n st = np.std(arr)\n return ((arr - mu) / st), mu, st\n\ndef denorm(arr, mu, st):\n return (arr * st) + mu\n\n\n# def slopeRes(xs, ys, m, b, loss):\n# 'returns m, b for the line of residual adjusted best fit'\n\n# return map(float, model.layers[0].get_weights())\n\ndef ressup(DF, period, slopes=False):\n '''\n Returns the Support and Resistance lines for a given price history\n\n Parameters:\n DF (pandas.DataFrame): Price History\n period (int): number of last points for which to contruct the lines\n\n Returns:\n srlDF (pandas.DataFrame): DataFrame with Support and Resistance values\n\n if Slopes == True Returns: \n srlDF, (supm, supb), (resm, resb): DF and line parameters (m, b) from `y = m * x + b`\n '''\n try:\n df = pd.DataFrame(DF.tail(period)['Close'])\n except KeyError:\n df = pd.DataFrame(DF.tail(period)['close'])\n\n x, mx, sx = normalize(df.index)\n try:\n y, my, sy = normalize(df['Close'].values)\n except KeyError: \n y, my, sy = normalize(df['close'].values)\n # PRODUCE m, b for Sup Res Lines\n points = list(zip(x, y))\n\n lh_points = lower_hull(points)\n uh_points = upper_hull(points)\n\n lh_lines = [slope_intercept(lh_points[i], lh_points[i+1]) for i in range(0,len(lh_points)-1)]\n uh_lines = [slope_intercept(uh_points[i], uh_points[i+1]) for i in range(0,len(uh_points)-1)]\n\n closest_lh_line = min(lh_lines, key=lambda x: mse(x, points)) # The line closest to center of mass (at origin since normalized)\n closest_uh_line = min(uh_lines, key=lambda x: mse(x, points))\n\n XS = np.linspace(min(x), max(x))\n #for line in lh_lines:\n # plt.plot(XS, line[0] * XS + line[1], color=\"blue\")\n\n df['Support'] = ((closest_lh_line[0] * x + closest_lh_line[1])) * sy + my\n df['Resistance'] = ((closest_uh_line[0] * x + closest_uh_line[1])) * sy + my\n\n\n # plt.plot(x,y)\n # plt.scatter(*zip(*lh_points), marker=\"s\", color=\"red\")\n # plt.scatter(*zip(*uh_points), marker=\"x\", color=\"purple\")\n # plt.plot(XS, closest_lh_line[0] * XS + closest_lh_line[1], color=\"orange\", label='NewSupport')\n # plt.plot(XS, closest_uh_line[0] * XS + closest_uh_line[1], color=\"green\", label='NewResistance')\n\n # plt.tight_layout()\n # plt.legend()\n # plt.show()\n\n\n if slopes:\n #(supm, supb) = (closest_lh_line[0] * sy, closest_lh_line[1] * sy + my)\n (supm, supb) = (closest_lh_line[0]*sy/sx, -closest_lh_line[0]*sy/sx * mx + closest_lh_line[1] * sy + my)\n #(resm, resb) = (closest_uh_line[0] * sy, closest_uh_line[1] * sy + my)\n (resm, resb) = (closest_uh_line[0]*sy/sx, -closest_uh_line[0]*sy/sx * mx + closest_uh_line[1] * sy + my)\n return df, (supm, supb), (resm, resb)\n else:\n return df\n\n\n\nif __name__ == '__main__':\n import random\n #data = [(0, 0), (1, 5), (2, 4), (3, 6), (4, 2), (5,3)]\n datalen = 100\n data = [(0, 0)]\n for i in range(1, 100):\n data.append((i, data[-1][1] + random.gauss(0, 3)))\n #data = [(i, random.randint(-datalen, datalen)) for i in range(datalen)]\n\n xs = [each[0] for each in data]\n ys = [each[1] for each in data]\n DF = pd.DataFrame({'Close': ys}, dtype=np.float64, index=xs)\n srlDF = srl(DF, datalen)\n\n #print(srlDF.head())\n\n srlDF.plot()\n print(DF.head())\n plt.title(\"SRL\")\n\n #plt.tight_layout()\n #plt.legend()\n plt.show()\n\n" ]
[ [ "matplotlib.pyplot.title", "pandas.DataFrame", "numpy.std", "numpy.mean", "matplotlib.pyplot.show" ] ]
gabefreedman/portfolio
[ "53a67524aba57b0e94df4b5f273c9d340e7730fe" ]
[ "portfolio.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Global imports\nimport pandas as pd\nimport fix_yahoo_finance as yf\n\nfrom update_gsheets import Table\n\n\ns_name = 'MSFT'\ns = yf.Ticker(s_name)\n\n\n# Dummy classes for Indices and Portfolios. Will fill in later\n\nclass Index:\n \n def __repr__(self):\n return 'Index object {}'.format(self.name)\n \n def __init__(self, name):\n self.name = name\n self.tick_items = {}\n \n def add_company(self, ticker):\n s = yf.Ticker(ticker)\n self.tick_items[ticker] = s\n \n def remove_company(self, ticker):\n self.tick_items.pop(ticker)\n \n def index_metadata(self):\n metadata = {k: v.info for k, v in self.tick_items.items()}\n return metadata\n \n def main_metrics_table(self):\n columns = ['marketCap',\n 'forwardPE', 'trailingPE',\n 'trailingAnnualDividendRate',\n 'regularMarketDayRange', 'fiftyTwoWeekRange',\n 'fiftyDayAverage', 'fiftyDayAverageChangePercent',\n 'twoHundredDayAverage', 'twoHundredDayAverageChangePercent']\n metadata = self.index_metadata()\n df = pd.DataFrame(metadata).T\n df = df[columns]\n df['Ticker'] = [key for key, _ in self.tick_items.items()]\n df = df.set_index('Ticker')\n return df\n \n def save_table(self):\n data = self.main_metrics_table()\n tab = Table(data)\n tab.write_table(self.name)\n\nclass Portfolio:\n pass\n\nclass WatchList(Index):\n # Will fill in later\n # Contains all tickers to watch on daily basis, regardless if they are currently owned.\n pass\n\ndef check_for_real_ticker(ind):\n empty_tickers = []\n for key, tick in ind.tick_items.items():\n if not tick.info:\n empty_tickers.append(key)\n\n for key in empty_tickers:\n ind.remove_company(key)\n if empty_tickers:\n print('The following tickers did not exist and were not added to the Index')\n print([item for item in empty_tickers])\n\ndef build_index(name, companies=None):\n ''' Create custom Index object to store stock information\n \n Parameters\n ----------\n name (str) : Name for Index object (eg. VICE, ABCD, SMMR)\n companies (list, optional) : If provided, list of companies to add to index\n Calls add_company class function\n \n Returns\n -------\n index (Index) : Index object containing list of companies as Stock instances\n \n '''\n \n index = Index(name)\n \n if companies:\n # Force uppercase for ticker symbols\n # Remove duplicate tickers\n companies = [x.upper() for x in companies]\n companies = list(set(companies))\n for cmp in companies:\n index.add_company(cmp)\n \n check_for_real_ticker(index)\n \n return index\n" ]
[ [ "pandas.DataFrame" ] ]
Xinrihui/DeepLearningApp
[ "8d86b88251ee8d37358c642b1ec4a341767bfd17", "8d86b88251ee8d37358c642b1ec4a341767bfd17" ]
[ "MachineTranslation/lib/layers/embedding_layer_xrh.py", "LearnTF(Keras)/tensorflow2_training_lower_API.py" ]
[ "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# 适用于 tensorflow >= 2.0, keras 被直接集成到 tensorflow 的内部\n\nfrom tensorflow.keras.layers import Layer\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\nclass SharedEmbedding(Layer):\n \"\"\"\n 共享权重的 Embedding\n\n Author: xrh\n Date: 2022-1-5\n\n ref:\n 1. https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer\n 2. https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup\n 3. https://github.com/tensorflow/models/tree/master/official/nlp/modeling\n\n \"\"\"\n def __init__(self, n_h, n_vocab):\n \"\"\"\n\n :param n_h: 隐藏层的维度\n :param n_vocab: 词表的大小\n \"\"\"\n\n super(SharedEmbedding, self).__init__()\n self.n_h = n_h\n self.n_vocab = n_vocab\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'n_h': self.n_h,\n 'n_vocab': self.n_vocab,\n\n })\n return config\n\n def build(self, input_shape):\n\n self.shared_weights = self.add_weight(\n name='shared_weights',\n shape=(self.n_vocab, self.n_h),\n initializer=tf.random_normal_initializer(\n mean=0., stddev=self.n_h**-0.5),\n trainable=True,\n ) # 必须加上 name, 否则模型无法 checkpoint\n\n def call(self, inputs, mode=\"embedding\"):\n \"\"\"\n\n :param inputs:\n :param mode:\n (1) embedding\n (2) linear\n :return:\n \"\"\"\n if mode == \"embedding\":\n return self.call_embedding(inputs)\n elif mode == \"linear\":\n return self.call_linear(inputs)\n else:\n raise ValueError(\"the value of mode is {}, which is illegal.\".format(mode))\n\n\n def call_embedding(self, inputs):\n \"\"\"\n\n :param inputs: 输入的 tensor, shape (N_batch, seq_length)\n :return:\n out, shape (N_batch, seq_length, n_h)\n\n \"\"\"\n\n out = tf.nn.embedding_lookup(params=self.shared_weights, ids=inputs)\n\n return out\n\n def call_linear(self, inputs):\n \"\"\"\n 输出 线性层(Dense) 的结果\n\n :param inputs: 输入的 tensor, shape (N_batch, seq_length, n_h)\n :return:\n out shape (N_batch, seq_length, n_vocab)\n \"\"\"\n\n N_batch = tf.shape(inputs)[0]\n seq_length = tf.shape(inputs)[1]\n\n x = tf.reshape(inputs, (-1, self.n_h))\n logits = tf.matmul(x, self.shared_weights, transpose_b=True) # 解决 OOM 问题\n\n out = tf.reshape(logits, [N_batch, seq_length, self.n_vocab])\n\n return out\n\n\n\n\nclass Test:\n\n def test_SharedEmbedding(self):\n\n n_vocab = 50\n n_h = 20\n\n embed_layer = SharedEmbedding(n_h, n_vocab)\n\n batch_tokens = np.random.randint(10, size=(4, 6))\n out_embed = embed_layer(inputs=batch_tokens)\n print('out_embed shape: ', out_embed.shape)\n\n batch_h = tf.random.normal(shape=[4, 6, n_h])\n out_liner = embed_layer(batch_h, mode='linear')\n print('out_liner shape: ', out_liner.shape)\n\n\n\nif __name__ == '__main__':\n\n test = Test()\n\n test.test_SharedEmbedding()\n\n", "\nimport time\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport numpy as np\n\n\n# Prepare the training dataset.\nbatch_size = 64\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\nx_train = np.reshape(x_train, (-1, 784))\nx_test = np.reshape(x_test, (-1, 784))\n\n# Reserve 10,000 samples for validation.\nx_val = x_train[-10000:]\ny_val = y_train[-10000:]\nx_train = x_train[:-10000]\ny_train = y_train[:-10000]\n\n# Prepare the training dataset.\ntrain_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)\n\n# Prepare the validation dataset.\nval_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))\nval_dataset = val_dataset.batch(batch_size)\n\n\n# Get model\ninputs = keras.Input(shape=(784,), name=\"digits\")\nx = layers.Dense(64, activation=\"relu\", name=\"dense_1\")(inputs)\nx = layers.Dense(64, activation=\"relu\", name=\"dense_2\")(x)\noutputs = layers.Dense(10, name=\"predictions\")(x)\nmodel = keras.Model(inputs=inputs, outputs=outputs)\n\n\n# Instantiate an optimizer to train the model.\noptimizer = keras.optimizers.SGD(learning_rate=1e-3)\n# Instantiate a loss function.\nloss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n# Prepare the metrics.\ntrain_acc_metric = keras.metrics.SparseCategoricalAccuracy()\nval_acc_metric = keras.metrics.SparseCategoricalAccuracy()\n\n\nepochs = 2\n\nfor epoch in range(epochs):\n\n print(\"\\nStart of epoch %d\" % (epoch,))\n start_time = time.time()\n\n # Iterate over the batches of the dataset.\n for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):\n\n with tf.GradientTape() as tape:\n logits = model(x_batch_train, training=True)\n loss_value = loss_fn(y_batch_train, logits)\n grads = tape.gradient(loss_value, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n # Update training metric.\n train_acc_metric.update_state(y_batch_train, logits)\n\n # Log every 200 batches.\n if step % 200 == 0:\n print(\n \"Training loss (for one batch) at step %d: %.4f\"\n % (step, float(loss_value))\n )\n print(\"Seen so far: %d samples\" % ((step + 1) * batch_size))\n\n # Display metrics at the end of each epoch.\n train_acc = train_acc_metric.result()\n print(\"Training acc over epoch: %.4f\" % (float(train_acc),))\n\n # Reset training metrics at the end of each epoch\n train_acc_metric.reset_states()\n\n # Run a validation loop at the end of each epoch.\n for x_batch_val, y_batch_val in val_dataset:\n val_logits = model(x_batch_val, training=False)\n # Update val metrics\n val_acc_metric.update_state(y_batch_val, val_logits)\n\n val_acc = val_acc_metric.result()\n val_acc_metric.reset_states()\n\n print(\"Validation acc: %.4f\" % (float(val_acc),))\n print(\"Time taken: %.2fs\" % (time.time() - start_time))\n\n\n\n\n\n" ]
[ [ "tensorflow.matmul", "tensorflow.shape", "tensorflow.reshape", "tensorflow.random_normal_initializer", "tensorflow.random.normal", "tensorflow.nn.embedding_lookup", "numpy.random.randint" ], [ "tensorflow.keras.Input", "numpy.reshape", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.GradientTape", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.optimizers.SGD" ] ]
quickhdsdc/Point-Tracking-for-Displacement-Measurement-in-Railway-Applications
[ "119b0bbc5076aa94d2801c7bfb0701204f2ae066" ]
[ "models/PoseResNet.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao (Bin.Xiao@microsoft.com)\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers):\n self.inplanes = 64\n extra = \"POSE_RESNET\"\n self.deconv_with_bias = False\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(3, [256, 256, 256], [4, 4, 4])\n\n self.final_layer = nn.Conv2d(\n in_channels=256,\n out_channels=3,\n kernel_size=1,\n stride=1,\n padding=0\n )\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n x = self.final_layer(x)\n\n return x\n\n def init_weights(self, pretrained=''):\n if os.path.isfile(pretrained):\n logger.info('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n logger.info('=> init {}.weight as 1'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n logger.info('=> init final conv weights from normal distribution')\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n\n # pretrained_state_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n # self.load_state_dict(pretrained_state_dict, strict=False)\n checkpoint = torch.load(pretrained)\n if isinstance(checkpoint, OrderedDict):\n state_dict = checkpoint\n elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n state_dict_old = checkpoint['state_dict']\n state_dict = OrderedDict()\n # delete 'module.' because it is saved from DataParallel module\n for key in state_dict_old.keys():\n if key.startswith('module.'):\n # state_dict[key[7:]] = state_dict[key]\n # state_dict.pop(key)\n state_dict[key[7:]] = state_dict_old[key]\n else:\n state_dict[key] = state_dict_old[key]\n else:\n raise RuntimeError(\n 'No state_dict found in checkpoint file {}'.format(pretrained))\n model_dict = self.state_dict()\n state_dict = {k: v for k, v in state_dict.items() if k in model_dict and k != \"final_layer.bias\" and k != \"final_layer.weight\"}\n model_dict.update(state_dict) \n self.load_state_dict(model_dict, strict=False)\n else:\n logger.error('=> imagenet pretrained model dose not exist')\n logger.error('=> please download it first')\n raise ValueError('imagenet pretrained model does not exist')\n\n\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ConvTranspose2d", "torch.load", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.init.normal_", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
DSIP-UPatras/sEMG-based-gesture-recognition-mgeorgoula
[ "a5f8f234aebad671b38296730f46f0b637fadc64" ]
[ "code/part_b/dbc_capgmyo/vgg19_capgmyo_partb1.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"Make prediction and compute confusion matrix for modified input data\"\"\"\r\n\"\"\"PART B1 : Zero electrode column of emg data\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport random\r\n\r\nimport os\r\nos.environ['PYTHONHASHSEED'] = '0'\r\n\r\n# The below is necessary for starting Numpy generated random numbers\r\n# in a well-defined initial state.\r\nnp.random.seed(1234)\r\n\r\nrandom.seed(12345)\r\n\r\n# \r\nsession_conf = tf.ConfigProto(\r\n intra_op_parallelism_threads=1,\r\n inter_op_parallelism_threads=1\r\n)\r\n\r\nfrom keras import backend as K\r\n\r\n#\r\ntf.set_random_seed(1234)\r\n\r\n\r\nsess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\r\nsess.run(tf.global_variables_initializer())\r\n\r\nK.set_session(sess)\r\n##############################################################################\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom keras import optimizers, initializers, regularizers, constraints\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\nfrom keras.utils import plot_model\r\nfrom utils import *\r\nfrom generator_capgmyo_b import *\r\nimport preprocessing_capgmyo\r\nimport json\r\nfrom sklearn import metrics\r\nimport scipy.io\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import model_from_json\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nwith open('Capgmyo_vgg19_b1.json') as json_file:\r\n config_data = json.load(json_file)\r\n\r\nMODEL_WEIGHTS_SAVE_FILE = os.path.abspath(\r\n 'models_vgg19_capgmyo') + '/'+'_Capgmyo_vgg19_' + '_{}.h5'\r\n\r\nMODEL_SAVE_FILE = os.path.abspath(\r\n 'models_vgg19_capgmyo') + '/'+'_Capgmyo_vgg19_' + '_{}.json'\r\n\r\nPARAMS_MODEL = config_data['model']\r\nPARAMS_DATASET = config_data['dataset']\r\n\r\n \r\nPARAMS_TEST_GENERATOR = DEFAULT_GENERATOR_PARAMS.copy()\r\nparams_gen = PARAMS_DATASET.get('test_generator', {}).copy()\r\nfor key in params_gen.keys():\r\n PARAMS_TEST_GENERATOR[key] = params_gen[key]\r\n\r\n\r\n#input_directory = r'drive/Thesis_emg/Ninapro-DB1_Preprocessed'\r\ninput_directory = r'C:\\Users\\Marina\\Desktop\\CAPGMYO\\Capgmyo_dbc_preprocessed'\r\nPARAMS_TEST_GENERATOR['preprocess_function'] = [preprocessing_capgmyo.lpf]\r\nPARAMS_TEST_GENERATOR['preprocess_function_extra'] = [{'fs':1000}]\r\nPARAMS_TEST_GENERATOR['data_type'] = 'rms'\r\nPARAMS_TEST_GENERATOR['classes'] = [i for i in range(12)]\r\n\r\n\r\nPARAMS_TEST_GENERATOR.pop('input_directory', '')\r\n\r\n\r\ntest_generator = DataGeneratorB(input_directory=input_directory, **PARAMS_TEST_GENERATOR)\r\n\r\nX_test, Y_test, test_reps = test_generator.get_data()\r\ny_test = np.argmax(Y_test, axis=1)\r\n\r\n\r\n# load json and create model\r\nwith open(MODEL_SAVE_FILE,'r') as f:\r\n json = f.read()\r\nloaded_model = model_from_json(json)\r\n\r\nloaded_model.load_weights(MODEL_WEIGHTS_SAVE_FILE)\r\nprint(\"Loaded model from disk\")\r\n \r\n# evaluate loaded model on test data\r\nloaded_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\r\nscore = loaded_model.evaluate(X_test, Y_test, verbose=0)\r\n\r\nprint('Test loss:', score[0])\r\nprint('Test accuracy:', score[1])\r\n\r\n\r\nY_pred = loaded_model.predict(X_test)\r\n\r\ny_pred = np.argmax(Y_pred, axis=1)\r\n\r\n#Display confusion matrix\r\nprint(confusion_matrix(y_test,y_pred))\r\nplt.xlabel('Predicted Label')\r\nplt.ylabel('True Label')\r\nplt.imshow(confusion_matrix(y_test,y_pred))" ]
[ [ "tensorflow.get_default_graph", "numpy.random.seed", "sklearn.metrics.confusion_matrix", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.set_random_seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
cuauv/software
[ "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "5ad4d52d603f81a7f254f365d9b0fe636d03a260" ]
[ "control/pid.py", "vision/modules/gate.py" ]
[ "import operator\nimport math\nimport time\n\nimport numpy as np\n\nfrom auv_math.quat import Quaternion\nfrom auv_python_helpers.angles import heading_sub_degrees\n\nfrom shm import kalman, dvl, desires, settings_control, \\\n control_internal_depth, control_internal_heading, \\\n control_internal_pitch, control_internal_velx, \\\n control_internal_vely, control_internal_roll, \\\n settings_depth, settings_heading, settings_pitch, \\\n settings_velx, settings_vely, settings_roll, settings_quat, \\\n control_locked\n\nfrom conf.vehicle import sensors\n\nclass PID:\n \"\"\"\n This is meant to be generic. The value 0 should never be used.\n \"\"\"\n def __init__(self, P, I, D, diff_func=operator.sub, speed=1):\n self.P = P\n self.I = I\n self.D = D\n self.diff_func = diff_func\n self.speed = speed\n self.rD = None\n self.reset()\n\n def reset(self):\n self.integral = None\n self.last_time = 0\n self.last_value = None\n self.locked = False\n self.out_P = None\n self.out_I = None\n self.out_D = None\n\n def tick(self, value, desired, rate_var=None):\n # TODO shoud we pass in dt as an argument? (DEPENDENCY INJECTION!)\n now = time.time()\n dt = (now - self.last_time) * self.speed\n self.last_time = now\n\n error = self.diff_func(desired, value)\n\n # Ignore any pauses in the controller.\n if dt < 5:\n if self.integral is None:\n self.integral = error * dt\n else:\n self.integral += error * dt\n\n # TODO When else should we fiddle with integral?\n # This generic PID loop erased many cased deemed unnecessary...\n if self.rD is not None and abs(error) > self.rD:\n self.integral = None\n self.locked = True\n else:\n self.locked = False\n\n self.out_P = self.P * error\n output = self.out_P\n\n if self.integral is not None:\n self.out_I = self.I * self.integral\n output += self.out_I\n else:\n self.out_I = None\n\n # Avoid derivative spike on startup.\n if self.last_value is not None:\n if rate_var is not None:\n deriv = -rate_var\n else:\n deriv = self.diff_func(-value, -self.last_value) / dt\n\n self.out_D = self.D * deriv\n output += self.out_D\n else:\n self.out_D = None\n\n self.last_value = value\n\n return output\n\n\nclass DynamicPID(PID):\n \"\"\"A PID loop which can update the PID values on each tick.\"\"\"\n\n def __init__(self, P:float = 1, I:float = 0, D:float = 0, diff_func=operator.sub, speed=1):\n super().__init__(P, I, D, diff_func, speed)\n\n def tick(self, value: float, desired: float, p: float = None, d: float = None, i: float = None):\n if p is not None:\n self.P = p\n\n if d is not None:\n self.D = d\n\n if i is not None:\n self.I = i\n\n return super().tick(value, desired)\n\n\nclass ShmPID:\n def __init__(self, gain_group, value, desire, diff_func, out_group, on, locked, rate_var=None, speed=1):\n self.gain_group = gain_group\n self.value = value\n self.desire = desire\n self.out_group = out_group\n self.on = on\n self.locked = locked\n self.rate_var = rate_var\n\n self.pid = PID(0, 0, 0, diff_func, speed)\n self.update_gains()\n\n def update_gains(self):\n self.pid.P = self.gain_group.kP.get()\n self.pid.I = self.gain_group.kI.get()\n self.pid.D = self.gain_group.kD.get()\n self.pid.rD = self.gain_group.rD.get()\n\n def reset(self):\n self.pid.reset()\n self.out_group.integral.set(0)\n self.out_group.out.set(0)\n\n def tick(self):\n self.update_gains()\n\n if self.on.get():\n rate = self.rate_var() if self.rate_var is not None else None\n out = self.pid.tick(self.value.get(), self.desire.get(),\n rate_var=rate)\n else:\n out = 0\n\n self.out_group.out.set(out)\n\n if self.pid.integral is None:\n integral = 0\n else:\n integral = self.pid.integral\n\n self.out_group.integral.set(integral)\n\n # Output helpful debugging data.\n out_P = 0 if self.pid.out_P is None else self.pid.out_P\n out_I = 0 if self.pid.out_I is None else self.pid.out_I\n out_D = 0 if self.pid.out_D is None else self.pid.out_D\n self.out_group.out_P.set(out_P)\n self.out_group.out_I.set(out_I)\n self.out_group.out_D.set(out_D)\n\n self.locked.set(self.pid.locked)\n\nclass PIDLoop:\n \"\"\" Class for updating PID values \"\"\"\n\n def __init__(self, speed=1.0):\n \"\"\"\n Initializes the vehicle PID controller.\n\n Arguments:\n speed -- a float multiplier to apply to the real time passed to obtain\n the amount of simulated time passed. For example, if speed is\n 2.0 and 0.1 seconds have passed since the last step, dt will\n be calculated as if 0.2 seconds passed.\n This does not affect the time returned by step().\n \"\"\"\n\n # Only use the z_vel variable if the vehicle actually has a way of\n # measuring z velocity directly\n if \"velz\" in sensors:\n depth_rate = lambda: kalman.velz.get()\n else:\n depth_rate = None\n\n velx_pid = ShmPID(settings_velx, kalman.velx, desires.speed,\n operator.sub, control_internal_velx,\n settings_control.velx_active,\n control_locked.velx, speed=speed)\n vely_pid = ShmPID(settings_vely, kalman.vely, desires.sway_speed,\n operator.sub, control_internal_vely,\n settings_control.vely_active, control_locked.vely,\n speed=speed)\n depth_pid = ShmPID(settings_depth, kalman.depth,\n control_internal_depth.desire, operator.sub,\n control_internal_depth,\n settings_control.depth_active, control_locked.depth,\n rate_var=depth_rate, speed=speed)\n\n heading_pid = ShmPID(settings_heading, kalman.heading, desires.heading,\n heading_sub_degrees, control_internal_heading,\n settings_control.heading_active,\n control_locked.heading,\n rate_var=lambda: self.heading_rate, speed=speed)\n pitch_pid = ShmPID(settings_pitch, kalman.pitch, desires.pitch,\n heading_sub_degrees, control_internal_pitch,\n settings_control.pitch_active, control_locked.pitch,\n rate_var=lambda: self.pitch_rate, speed=speed)\n roll_pid = ShmPID(settings_roll, kalman.roll, desires.roll,\n heading_sub_degrees, control_internal_roll,\n settings_control.roll_active, control_locked.roll,\n rate_var=lambda: self.roll_rate, speed=speed)\n\n self.pids = [velx_pid, vely_pid, depth_pid, heading_pid, pitch_pid, roll_pid]\n\n self.clean()\n self.last_q_error = 0\n self.last_quat_time = time.time()\n self.speed = speed\n\n def clean(self):\n \"\"\" Clean the controller state; init all variables \"\"\"\n self.last_time = time.time()\n [pid.reset() for pid in self.pids]\n control_internal_depth.desire.set(desires.depth.get())\n\n # Added by Christopher\n self.integral = 0\n\n def quat_pid(self):\n # TODO Figure out how to formalize this and use generic PID class.\n g = kalman.get()\n d = desires.get()\n\n a = Quaternion(q=[g.q0, g.q1, g.q2, g.q3])\n b = Quaternion(hpr=(d.heading, d.pitch, d.roll))\n\n current_time = time.time()\n dt = (current_time - self.last_quat_time) * self.speed\n self.last_quat_time = current_time\n\n q_diff = b * a.conjugate()\n if abs(abs(q_diff[0]) - 1) < 1e-15:\n self.last_q_error = np.array((0, 0, 0))\n return np.array((0, 0, 0))\n\n ax = q_diff.axis()\n ang = q_diff.angle()\n\n # Ensure we are taking the shortest path around.\n if ang > math.pi:\n ax = -ax\n ang = 2*math.pi - ang\n\n ang_accel = ang * ax * settings_quat.kP.get()\n\n diff = (ang * ax - self.last_q_error) / dt\n self.last_q_error = ang * ax\n ang_accel += settings_quat.kD.get() * diff\n\n # added by Christopher\n #self.integral = self.integral + ax * dt\n #ax += settings_quat.kI.get() * self.integral\n\n return ang_accel\n\n def update_angular_rates(self, tm):\n # Convert from body frame to spitz space (heading-adjusted world space).\n ang_rates_sub = np.array((kalman.roll_rate.get(), kalman.pitch_rate.get(), kalman.heading_rate.get()))\n ang_rates_spitz = tm.spitz_to_sub_quat.conjugate() * ang_rates_sub\n self.roll_rate = ang_rates_spitz[0]\n self.pitch_rate = ang_rates_spitz[1]\n self.heading_rate = ang_rates_spitz[2]\n\n def step(self, tm):\n self.update_angular_rates(tm)\n for pid in self.pids:\n pid.tick()\n\n now_time = time.time()\n dt = (now_time - self.last_time) * self.speed\n self.last_time = now_time\n\n ### Depth ramping\n # TODO: Experiment with ramping other controllers\n if control_internal_depth.desire.get() != desires.depth.get():\n diff = desires.depth.get() - control_internal_depth.desire.get()\n step = dt * settings_depth.ramp_speed.get()\n\n if step > abs(diff) or abs(diff) > 10: #TODO: Work on this too-large-change feature\n control_internal_depth.desire.set(desires.depth.get())\n else:\n d = step if diff > 0 else -step\n control_internal_depth.desire.set(control_internal_depth.desire.get() + d)\n", "#!/usr/bin/env python3\nimport os\nimport sys\nimport shm\nimport cv2\nimport numpy as np\nfrom collections import namedtuple\n\nfrom conf.vehicle import VEHICLE, is_mainsub\nfrom vision import options\nfrom vision.modules.base import ModuleBase\nfrom vision.framework.feature import outer_contours, contour_area, contour_centroid, min_enclosing_circle, min_enclosing_rect\nfrom vision.framework.transform import resize, simple_gaussian_blur, morph_remove_noise, morph_close_holes, dilate, erode, rect_kernel\nfrom vision.framework.helpers import to_umat, from_umat, to_odd\nfrom vision.framework.color import bgr_to_lab, gray_to_bgr, range_threshold\nfrom vision.framework.draw import draw_contours, draw_circle, draw_text\n\n\nCUAUV_LOCALE = os.environ['CUAUV_LOCALE']\n\nOPTS_ODYSSEUS = [\n options.IntOption('lab_l_ref', 255, 0, 255),\n options.IntOption('lab_a_ref', 163, 0, 255),\n options.IntOption('lab_b_ref', 180, 0, 255),\n options.IntOption('color_dist_thresh', 45, 0, 255),\n options.IntOption('blur_kernel', 3, 0, 255),\n options.IntOption('blur_std', 10, 0, 500),\n options.DoubleOption('resize_width_scale', 0.5, 0, 1),\n options.DoubleOption('resize_height_scale', 0.5, 0, 1),\n options.IntOption('dilate_kernel', 1, 0, 255),\n options.IntOption('erode_kernel', 1, 0, 255),\n options.IntOption('min_contour_area', 30, 0, 500),\n options.DoubleOption('min_contour_rect', 0.4, 0, 1),\n options.DoubleOption('min_contour_ratio', 4.5, 0, 10),\n options.DoubleOption('max_angle_from_vertical', 15, 0, 90),\n options.DoubleOption('min_length', 15, 0, 500),\n options.IntOption('auto_distance_percentile', 25, 0, 100),\n options.IntOption('nonblack_thresh', 900, 0, 10000),\n options.IntOption('water_a_thresh', 20, 0, 255),\n options.IntOption('water_b_thresh', 25, 0, 255),\n options.BoolOption('debug', True),\n]\n\nOPTS_AJAX = [\n options.IntOption('lab_l_ref', 255, 0, 255),\n options.IntOption('lab_a_ref', 175, 0, 255),\n options.IntOption('lab_b_ref', 169, 0, 255),\n options.IntOption('color_dist_thresh', 40, 0, 255),\n options.IntOption('blur_kernel', 3, 0, 255),\n options.IntOption('blur_std', 10, 0, 500),\n options.DoubleOption('resize_width_scale', 0.25, 0, 1),\n options.DoubleOption('resize_height_scale', 0.25, 0, 1),\n options.IntOption('dilate_kernel', 1, 0, 255),\n options.IntOption('erode_kernel', 1, 0, 255),\n options.IntOption('min_contour_area', 30, 0, 500),\n options.DoubleOption('min_contour_ratio', 4.5, 0, 10),\n options.DoubleOption('min_contour_rect', 0.4, 0, 1),\n options.DoubleOption('max_angle_from_vertical', 15, 0, 90),\n options.DoubleOption('min_length', 15, 0, 500),\n options.IntOption('auto_distance_percentile', 25, 0, 100),\n options.IntOption('nonblack_thresh', 600, 0, 10000),\n options.IntOption('water_a_thresh', 10, 0, 255),\n options.IntOption('water_b_thresh', 10, 0, 255),\n options.BoolOption('debug', True),\n]\n\n#OPTS_SIM = [\n# options.IntOption('lab_l_ref', 0, 0, 255),\n# options.IntOption('lab_a_ref', 170, 0, 255),\n# options.IntOption('lab_b_ref', 180, 0, 255),\n# options.IntOption('color_dist_thresh', 35, 0, 255),\n# options.IntOption('blur_kernel', 3, 0, 255),\n# options.IntOption('blur_std', 10, 0, 500),\n# options.DoubleOption('resize_width_scale', 0.5, 0, 1),\n# options.DoubleOption('resize_height_scale', 0.5, 0, 1),\n# options.IntOption('dilate_kernel', 1, 0, 255),\n# options.IntOption('erode_kernel', 1, 0, 255),\n# options.IntOption('min_contour_area', 30, 0, 500),\n# options.DoubleOption('min_contour_rect', 0.4, 0, 1),\n# options.DoubleOption('min_contour_ratio', 5, 0, 10),\n# options.DoubleOption('max_angle_from_vertical', 15, 0, 90),\n# options.DoubleOption('min_length', 15, 0, 500),\n# options.IntOption('auto_distance_percentile', 15, 0, 100),\n# options.IntOption('nonblack_thresh', 1000, 0, 10000),\n# options.IntOption('water_a_thresh', 20, 0, 255),\n# options.IntOption('water_b_thresh', 25, 0, 255),\n# options.BoolOption('debug', True),\n#]\n\nOPTS_SIM = OPTS_ODYSSEUS if VEHICLE == 'odysseus' else OPTS_AJAX\n\nREFERENCE_BRIGHTNESS = 190 if is_mainsub else 190\nCUTOFF_SCALAR = 10 if is_mainsub else 7\n\nContourFeats = namedtuple('ContourFeats', ['contour', 'area', 'x', 'y', 'rect', 'angle', 'length', 'ratio'])\n\n\ndef try_index(arr, idx):\n if idx < len(arr):\n return arr[idx]\n return None\n\n\ndef thresh_color_distance(split, color, distance, auto_distance_percentile=None, ignore_channels=[], weights=[1, 1, 1]):\n for idx in ignore_channels:\n weights[idx] = 0\n weights /= np.linalg.norm(weights)\n dists = np.zeros(split[0].shape, dtype=np.float32)\n for i in range(3):\n if i in ignore_channels:\n continue\n dists += weights[i] * (np.float32(split[i]) - color[i])**2\n if auto_distance_percentile:\n distance = min(np.percentile(dists, auto_distance_percentile), distance**2)\n else:\n distance = distance**2\n return range_threshold(dists, 0, distance), np.uint8(np.sqrt(dists))\n\n\ndef filter_duplicates_sorted_by_x(contour_feats):\n MIN_DIST_BETWEEN_PIPES = 30\n res = []\n last_x = -MIN_DIST_BETWEEN_PIPES\n last_len = 0\n for c in contour_feats:\n if c.x - last_x > MIN_DIST_BETWEEN_PIPES:\n last_x = c.x\n last_len = c.length\n res.append(c)\n elif last_len < c.length:\n last_x = c.x\n last_len = c.length\n if res:\n res.pop(-1)\n res.append(c)\n return res\n\n\nclass Gate(ModuleBase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def post_contours(self, name, h, w, contour_feats):\n if not self.options['debug']:\n return\n tmp = np.zeros((h, w, 3))\n draw_contours(tmp, [c.contour for c in contour_feats], color=(255, 0, 0), thickness=-1)\n self.post(name, tmp)\n\n def process(self, *mats):\n results = shm.gate_vision.get()\n h, w, _ = mats[0].shape\n h = int(h * self.options['resize_height_scale'])\n w = int(w * self.options['resize_width_scale'])\n results.img_height = h\n results.img_width = w\n mat = resize(mats[0], w, h)\n #print(np.mean(mat))\n avg_brightness_ratio = np.mean(mat) / REFERENCE_BRIGHTNESS\n nonblack_thresh_dist = self.options['nonblack_thresh'] * avg_brightness_ratio\n\n lab, lab_split = bgr_to_lab(mat)\n median_a = np.median(lab_split[1])\n median_b = np.median(lab_split[2])\n median_filter_a = range_threshold(lab_split[1], median_a - self.options['water_a_thresh'], median_a + self.options['water_a_thresh'])\n median_filter_b = range_threshold(lab_split[2], median_b - self.options['water_b_thresh'], median_b + self.options['water_b_thresh'])\n if self.options['debug']:\n self.post('median filter a', median_filter_a)\n self.post('median filter b', median_filter_b)\n nonwater_mask, _ = gray_to_bgr(255 - (median_filter_a & median_filter_b))\n self.post('nonwater', nonwater_mask)\n # Tuned for a 320x256 image\n vehicle_depth = shm.kalman.depth.get()\n reflection_cutoff = min(h, int(max(0, 3 - vehicle_depth)**2 * CUTOFF_SCALAR))\n mat[:reflection_cutoff] *= 0\n tmp = mat.copy()\n draw_text(tmp, 'Depth: {:.2f}'.format(vehicle_depth), (30, 30), 0.5, color=(255, 255, 255))\n self.post('mat', tmp)\n #lab, lab_split = bgr_to_lab(mat)\n #nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (lab_split[0] > self.options['nonblack_thresh'])))\n nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (np.var(mat, axis=2) > nonblack_thresh_dist)))\n self.post('nonblack', nonblack_mask)\n mat &= nonblack_mask\n mat &= nonwater_mask\n mat = to_umat(mat)\n mat = simple_gaussian_blur(mat, to_odd(self.options['blur_kernel']),\n self.options['blur_std'])\n lab, lab_split = bgr_to_lab(mat)\n threshed, dists = thresh_color_distance([lab_split[0], lab_split[1], lab_split[2]],\n [self.options['lab_l_ref'], self.options['lab_a_ref'],\n self.options['lab_b_ref']],\n self.options['color_dist_thresh'], auto_distance_percentile=self.options['auto_distance_percentile'],\n ignore_channels=[0], weights=[2, 0, 15])\n if self.options['debug']:\n self.post('threshed', threshed)\n self.post('dists', dists)\n dilated = dilate(threshed, rect_kernel(self.options['dilate_kernel']))\n if self.options['debug']:\n self.post('dilated', dilated)\n eroded = erode(dilated, rect_kernel(self.options['erode_kernel']))\n if self.options['debug']:\n self.post('eroded', eroded)\n contours = outer_contours(eroded)\n areas = [*map(contour_area, contours)]\n centroids = [*map(contour_centroid, contours)]\n xs = [c[0] for c in centroids]\n ys = [c[1] for c in centroids]\n rects = [*map(min_enclosing_rect, contours)]\n lengths = [max(r[1]) for r in rects]\n ratios = [max(r[1]) / (1e-30 + min(r[1])) for r in rects]\n vehicle_roll = shm.kalman.roll.get()\n lines = [cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) for c in contours]\n angles = [np.degrees(np.arctan2(line[1], line[0]))[0] for line in lines]\n angles = [min(abs(90 - a - vehicle_roll), abs(-90 - a - vehicle_roll)) for a in angles]\n rectangularities = [a / (1e-30 + rect[1][0] * rect[1][1]) for (c, a, rect) in zip(contours, areas, rects)]\n contours = [ContourFeats(*feats) for feats in zip(contours, areas, xs, ys, rectangularities, angles, lengths, ratios)]\n contours = [*filter(lambda c: c.area > self.options['min_contour_area'], contours)]\n self.post_contours('area', h, w, contours)\n contours = [*filter(lambda c: c.angle < self.options['max_angle_from_vertical'], contours)]\n self.post_contours('angle', h, w, contours)\n contours = [*filter(lambda c: c.length > self.options['min_length'], contours)]\n self.post_contours('length', h, w, contours)\n #contours = [*filter(lambda c: c.rect > self.options['min_contour_rect'], contours)]\n #self.post_contours('rect', h, w, contours)\n contours = [*filter(lambda c: c.ratio > self.options['min_contour_ratio'], contours)]\n self.post_contours('ratio', h, w, contours)\n contours = sorted(contours, key=lambda c: c.area)[:6]\n contours_by_x = sorted(contours, key=lambda c: c.x)\n contours_by_x = filter_duplicates_sorted_by_x(contours_by_x)\n leftmost = try_index(contours_by_x, 0)\n middle = try_index(contours_by_x, 1)\n rightmost = try_index(contours_by_x, 2)\n tmp = np.zeros((h, w, 3))\n results.leftmost_visible = leftmost is not None\n results.middle_visible = middle is not None\n results.rightmost_visible = rightmost is not None\n draw_text(tmp, 'Roll: {:.2f}'.format(vehicle_roll), (30, 30), 0.5, color=(255, 255, 255))\n if leftmost is not None:\n draw_contours(tmp, [leftmost.contour], color=(255, 0, 0), thickness=-1)\n draw_circle(tmp, (leftmost.x, leftmost.y), 5, color=(255, 255, 255), thickness=-1)\n results.leftmost_x = leftmost.x\n results.leftmost_y = leftmost.y\n results.leftmost_len = leftmost.length\n if middle is not None:\n draw_contours(tmp, [middle.contour], color=(0, 255, 0), thickness=-1)\n draw_circle(tmp, (middle.x, middle.y), 5, color=(255, 255, 255), thickness=-1)\n results.middle_x = middle.x\n results.middle_y = middle.y\n results.middle_len = middle.length\n if rightmost is not None:\n draw_contours(tmp, [rightmost.contour], color=(0, 0, 255), thickness=-1)\n draw_circle(tmp, (rightmost.x, rightmost.y), 5, color=(255, 255, 255), thickness=-1)\n results.rightmost_x = rightmost.x\n results.rightmost_y = rightmost.y\n results.rightmost_len = rightmost.length\n shm.gate_vision.set(results)\n self.post('contours', tmp)\n\n\nif __name__ == '__main__':\n Gate('forward', OPTS_SIM if CUAUV_LOCALE == 'simulator' else OPTS_ODYSSEUS if VEHICLE == 'odysseus' else OPTS_AJAX)()\n" ]
[ [ "numpy.array" ], [ "numpy.sqrt", "numpy.median", "numpy.linalg.norm", "numpy.percentile", "numpy.arctan2", "numpy.mean", "numpy.float32", "numpy.var", "numpy.zeros" ] ]
caiosba/covid-19
[ "2a0f43f5004e7e39bd982eaa36185859cd9db88f" ]
[ "covid/data/mortality.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom .cia_factbook import age_distribution\nfrom .data import DATA_PATH\n\n\ndef covid_mortality():\n \"\"\"\n Return a dataframe with COVID-19 mortality data from Neil M Ferguson, et. al.\n \"\"\"\n path = DATA_PATH / \"covid-mortality-imperial-college.csv\"\n return pd.read_csv(path, index_col=0) / 100\n\n\ndef covid_mean_mortality(region, year=2020):\n \"\"\"\n Uses demography and mortality rates from NM Ferguson et. al. to infer mean\n mortality ratios for a population. Values can be plugged directly on the\n p_h, p_c and p_f parameters of a RSEICHA simulation.\n\n Args:\n region:\n String with country or region name or a data frame with compatible\n demography.\n year:\n Reference year (1950-2020).\n\n Returns:\n p_h:\n Fraction of patients that require hospitalization.\n p_c:\n Fraction of hospitalized patients that require ICUs.\n p_f:\n Fraction of critical patients that die.\n \"\"\"\n if isinstance(region, str):\n df = age_distribution(region, year, coarse=True)\n elif isinstance(region, (pd.DataFrame, pd.Series, np.ndarray)):\n df = region\n else:\n tname = type(region).__name__\n raise TypeError(f\"invalid argument type for region: {tname}\")\n dm = covid_mortality()\n total = df.sum()\n\n h = dm[\"hospitalization\"]\n c = dm[\"icu\"]\n f = dm[\"fatality\"]\n\n p_h = (h * df).sum() / total\n p_c = (c * df).sum() / total\n p_f = (f / h / c * df).sum() / total\n return p_h, p_c, p_f\n" ]
[ [ "pandas.read_csv" ] ]
samuelduchesne/archetypal
[ "2a7af343cc8fc6ee6aa34d3d87fa56384f2b0633" ]
[ "archetypal/template/dhw.py" ]
[ "\"\"\"archetypal DomesticHotWaterSetting.\"\"\"\n\nimport collections\nfrom statistics import mean\n\nimport numpy as np\nfrom eppy import modeleditor\nfrom sigfig import round\nfrom validator_collection import validators\n\nfrom archetypal import settings\nfrom archetypal.template.schedule import UmiSchedule\nfrom archetypal.template.umi_base import UmiBase\nfrom archetypal.utils import log, reduce, timeit\n\n\nclass DomesticHotWaterSetting(UmiBase):\n \"\"\"Domestic Hot Water settings.\n\n .. image:: ../images/template/zoneinfo-dhw.png\n \"\"\"\n\n __slots__ = (\n \"_flow_rate_per_floor_area\",\n \"_is_on\",\n \"_water_schedule\",\n \"_water_supply_temperature\",\n \"_water_temperature_inlet\",\n \"_area\",\n )\n\n def __init__(\n self,\n Name,\n WaterSchedule=None,\n IsOn=True,\n FlowRatePerFloorArea=0.03,\n WaterSupplyTemperature=65,\n WaterTemperatureInlet=10,\n area=1,\n **kwargs,\n ):\n \"\"\"Initialize object with parameters.\n\n Args:\n area (float): The area the zone associated to this object.\n IsOn (bool): If True, dhw is on.\n WaterSchedule (UmiSchedule): Schedule that modulates the\n FlowRatePerFloorArea.\n FlowRatePerFloorArea (float): The flow rate per flow area [m³/(hr·m²)].\n WaterSupplyTemperature (float): The water supply temperature [degC].\n WaterTemperatureInlet (float): The water temperature intel from the water\n mains [degC].\n **kwargs: keywords passed to parent constructors.\n \"\"\"\n super(DomesticHotWaterSetting, self).__init__(Name, **kwargs)\n self.FlowRatePerFloorArea = FlowRatePerFloorArea\n self.IsOn = IsOn\n self.WaterSupplyTemperature = WaterSupplyTemperature\n self.WaterTemperatureInlet = WaterTemperatureInlet\n self.WaterSchedule = WaterSchedule\n self.area = area\n\n @property\n def FlowRatePerFloorArea(self):\n \"\"\"Get or set the flow rate per flow area [m³/(hr·m²)].\"\"\"\n return self._flow_rate_per_floor_area\n\n @FlowRatePerFloorArea.setter\n def FlowRatePerFloorArea(self, value):\n self._flow_rate_per_floor_area = validators.float(value, minimum=0)\n\n @property\n def IsOn(self):\n \"\"\"Get or set the availability of the domestic hot water [bool].\"\"\"\n return self._is_on\n\n @IsOn.setter\n def IsOn(self, value):\n assert isinstance(value, bool), (\n f\"Input error with value {value}. IsOn must \"\n f\"be a boolean, not a {type(value)}\"\n )\n self._is_on = value\n\n @property\n def WaterSchedule(self):\n \"\"\"Get or set the schedule which modulates the FlowRatePerFloorArea.\"\"\"\n return self._water_schedule\n\n @WaterSchedule.setter\n def WaterSchedule(self, value):\n if value is not None:\n assert isinstance(value, UmiSchedule), (\n f\"Input error with value {value}. WaterSchedule must \"\n f\"be an UmiSchedule, not a {type(value)}\"\n )\n self._water_schedule = value\n\n @property\n def WaterSupplyTemperature(self):\n \"\"\"Get or set the water supply temperature [degC].\"\"\"\n return self._water_supply_temperature\n\n @WaterSupplyTemperature.setter\n def WaterSupplyTemperature(self, value):\n self._water_supply_temperature = validators.float(value)\n\n @property\n def WaterTemperatureInlet(self):\n \"\"\"Get or set the water temperature intel from the water mains [degC].\"\"\"\n return self._water_temperature_inlet\n\n @WaterTemperatureInlet.setter\n def WaterTemperatureInlet(self, value):\n self._water_temperature_inlet = validators.float(value)\n\n @property\n def area(self):\n \"\"\"Get or set the area of the zone associated to this object [m²].\"\"\"\n return self._area\n\n @area.setter\n def area(self, value):\n self._area = validators.float(value, minimum=0)\n\n @classmethod\n def from_dict(cls, data, schedules, **kwargs):\n \"\"\"Create a DomesticHotWaterSetting from a dictionary.\n\n Args:\n data (dict): The python dictionary.\n schedules (dict): A dictionary of UmiSchedules with their id as keys.\n **kwargs: keywords passed the MaterialBase constructor.\n \"\"\"\n _id = data.pop(\"$id\")\n wat_sch = data.pop(\"WaterSchedule\", None)\n schedule = schedules[wat_sch[\"$ref\"]]\n return cls(id=_id, WaterSchedule=schedule, **data, **kwargs)\n\n def to_dict(self):\n \"\"\"Return DomesticHotWaterSetting dictionary representation.\"\"\"\n self.validate() # Validate object before trying to get json format\n\n data_dict = collections.OrderedDict()\n\n data_dict[\"$id\"] = str(self.id)\n data_dict[\"FlowRatePerFloorArea\"] = round(self.FlowRatePerFloorArea, sigfigs=4)\n data_dict[\"IsOn\"] = self.IsOn\n data_dict[\"WaterSchedule\"] = self.WaterSchedule.to_ref()\n data_dict[\"WaterSupplyTemperature\"] = round(\n self.WaterSupplyTemperature, sigfigs=4\n )\n data_dict[\"WaterTemperatureInlet\"] = round(\n self.WaterTemperatureInlet, sigfigs=4\n )\n data_dict[\"Category\"] = self.Category\n data_dict[\"Comments\"] = validators.string(self.Comments, allow_empty=True)\n data_dict[\"DataSource\"] = self.DataSource\n data_dict[\"Name\"] = self.Name\n\n return data_dict\n\n @classmethod\n @timeit\n def from_zone(cls, zone_epbunch, **kwargs):\n \"\"\"Create object from a zone EpBunch.\n\n WaterUse:Equipment objects referring to this zone will be parsed.\n\n Args:\n zone (EpBunch): The zone object.\n \"\"\"\n # If Zone is not part of Conditioned Area, it should not have a DHW object.\n if zone_epbunch.Part_of_Total_Floor_Area.lower() == \"no\":\n return None\n\n # First, find the WaterUse:Equipment assigned to this zone\n dhw_objs = zone_epbunch.getreferingobjs(\n iddgroups=[\"Water Systems\"], fields=[\"Zone_Name\"]\n )\n if not dhw_objs:\n # Sometimes, some of the WaterUse:Equipment objects are not assigned to\n # any zone. Therefore, to account for their water usage, we can try to\n # assign dangling WaterUse:Equipments by looking for the zone name in the\n # object name.\n dhw_objs.extend(\n [\n dhw\n for dhw in zone_epbunch.theidf.idfobjects[\"WATERUSE:EQUIPMENT\"]\n if zone_epbunch.Name.lower() in dhw.Name.lower()\n ]\n )\n\n if dhw_objs:\n # This zone has more than one WaterUse:Equipment object\n zone_area = modeleditor.zonearea(zone_epbunch.theidf, zone_epbunch.Name)\n total_flow_rate = cls._do_flow_rate(dhw_objs, zone_area)\n water_schedule = cls._do_water_schedule(dhw_objs)\n inlet_temp = cls._do_inlet_temp(dhw_objs)\n supply_temp = cls._do_hot_temp(dhw_objs)\n\n name = zone_epbunch.Name + \"_DHW\"\n z_dhw = cls(\n Name=name,\n FlowRatePerFloorArea=total_flow_rate,\n IsOn=bool(total_flow_rate > 0),\n WaterSchedule=water_schedule,\n WaterSupplyTemperature=supply_temp,\n WaterTemperatureInlet=inlet_temp,\n Category=zone_epbunch.theidf.name,\n area=zone_area,\n **kwargs,\n )\n return z_dhw\n else:\n log(f\"No 'Water Systems' found in zone '{zone_epbunch.Name}'\")\n return None\n\n @classmethod\n def _do_hot_temp(cls, dhw_objs):\n \"\"\"Resolve hot water temperature.\n\n Args:\n dhw_objs:\n \"\"\"\n hot_schds = []\n for obj in dhw_objs:\n # Reference to the schedule object specifying the target water\n # temperature [C]. If blank, the target temperature defaults to\n # the hot water supply temperature.\n schedule_name = (\n obj.Target_Temperature_Schedule_Name\n if obj.Target_Temperature_Schedule_Name != \"\"\n else obj.Hot_Water_Supply_Temperature_Schedule_Name\n )\n epbunch = obj.theidf.schedules_dict[schedule_name.upper()]\n hot_schd = UmiSchedule.from_epbunch(epbunch)\n hot_schds.append(hot_schd)\n\n return np.array([sched.all_values.mean() for sched in hot_schds]).mean()\n\n @classmethod\n def _do_inlet_temp(cls, dhw_objs):\n \"\"\"Calculate inlet water temperature.\"\"\"\n WaterTemperatureInlet = []\n for obj in dhw_objs:\n if obj.Cold_Water_Supply_Temperature_Schedule_Name != \"\":\n # If a cold water supply schedule is provided, create the\n # schedule\n epbunch = obj.theidf.schedules_dict[\n obj.Cold_Water_Supply_Temperature_Schedule_Name.upper()\n ]\n cold_schd_names = UmiSchedule.from_epbunch(epbunch)\n WaterTemperatureInlet.append(cold_schd_names.mean)\n else:\n # If blank, water temperatures are calculated by the\n # Site:WaterMainsTemperature object.\n water_mains_temps = obj.theidf.idfobjects[\n \"Site:WaterMainsTemperature\".upper()\n ]\n if water_mains_temps:\n # If a \"Site:WaterMainsTemperature\" object exists,\n # do water depending on calc method:\n water_mains_temp = water_mains_temps[0]\n if water_mains_temp.Calculation_Method.lower() == \"schedule\":\n # From Schedule method\n mains_scd = UmiSchedule.from_epbunch(\n obj.theidf.schedules_dict[\n water_mains_temp.Schedule_Name.upper()\n ]\n )\n WaterTemperatureInlet.append(mains_scd.mean())\n elif water_mains_temp.Calculation_Method.lower() == \"correlation\":\n # From Correlation method\n mean_outair_temp = (\n water_mains_temp.Annual_Average_Outdoor_Air_Temperature\n )\n max_dif = (\n water_mains_temp.Maximum_Difference_In_Monthly_Average_Outdoor_Air_Temperatures\n )\n\n WaterTemperatureInlet.append(\n water_main_correlation(mean_outair_temp, max_dif).mean()\n )\n else:\n # Else, there is no Site:WaterMainsTemperature object in\n # the input file, a default constant value of 10 C is\n # assumed.\n WaterTemperatureInlet.append(float(10))\n return mean(WaterTemperatureInlet) if WaterTemperatureInlet else 10\n\n @classmethod\n def _do_water_schedule(cls, dhw_objs):\n \"\"\"Return the WaterSchedule for a list of WaterUse:Equipment objects.\n\n If more than one objects are passed, a combined schedule is returned\n\n Args:\n dhw_objs (list of EpBunch): List of WaterUse:Equipment objects.\n\n Returns:\n UmiSchedule: The WaterSchedule\n \"\"\"\n water_schds = [\n UmiSchedule.from_epbunch(\n obj.theidf.schedules_dict[obj.Flow_Rate_Fraction_Schedule_Name.upper()],\n quantity=obj.Peak_Flow_Rate,\n )\n for obj in dhw_objs\n ]\n\n return reduce(\n UmiSchedule.combine,\n water_schds,\n weights=None,\n quantity=True,\n )\n\n @classmethod\n def _do_flow_rate(cls, dhw_objs, area):\n \"\"\"Calculate total flow rate from list of WaterUse:Equipment objects.\n\n The zone's net_conditioned_building_area property is used to normalize the\n flow rate.\n\n Args:\n dhw_objs (Idf_MSequence):\n \"\"\"\n total_flow_rate = 0\n for obj in dhw_objs:\n total_flow_rate += obj.Peak_Flow_Rate # m3/s\n total_flow_rate /= area # m3/s/m2\n total_flow_rate *= 3600.0 # m3/h/m2\n return total_flow_rate\n\n def combine(self, other, **kwargs):\n \"\"\"Combine two DomesticHotWaterSetting objects together.\n\n Notes:\n When combining 2 DomesticHotWater Settings objects, the WaterSchedule\n must be averaged via the final quantity which is the peak floor rate\n [m3/hr/m2] * area [m2].\n\n .. code-block:: python\n\n (\n np.average(\n [zone_1.WaterSchedule.all_values, zone_2.WaterSchedule.all_values],\n axis=0,\n weights=[\n zone_1.FlowRatePerFloorArea * zone_1.area,\n zone_2.FlowRatePerFloorArea * zone_2.area,\n ],\n )\n * (combined.FlowRatePerFloorArea * 100)\n ).sum()\n\n Args:\n other (DomesticHotWaterSetting): The other object.\n **kwargs: keywords passed to the constructor.\n\n Returns:\n (DomesticHotWaterSetting): a new combined object.\n \"\"\"\n # Check if other is None. Simply return self\n if not other:\n return self\n\n if not self:\n return other\n\n # Check if other is the same type as self\n if not isinstance(other, self.__class__):\n msg = \"Cannot combine %s with %s\" % (\n self.__class__.__name__,\n other.__class__.__name__,\n )\n raise NotImplementedError(msg)\n\n # Check if other is not the same as self\n if self == other:\n return self\n\n meta = self._get_predecessors_meta(other)\n new_obj = DomesticHotWaterSetting(\n WaterSchedule=UmiSchedule.combine(\n self.WaterSchedule,\n other.WaterSchedule,\n weights=[\n self.FlowRatePerFloorArea * self.area,\n other.FlowRatePerFloorArea * other.area,\n ],\n ),\n IsOn=any((self.IsOn, other.IsOn)),\n FlowRatePerFloorArea=self.float_mean(\n other, \"FlowRatePerFloorArea\", [self.area, other.area]\n ),\n WaterSupplyTemperature=self.float_mean(\n other, \"WaterSupplyTemperature\", [self.area, other.area]\n ),\n WaterTemperatureInlet=self.float_mean(\n other, \"WaterTemperatureInlet\", [self.area, other.area]\n ),\n area=self.area + other.area,\n **meta,\n )\n new_obj.predecessors.update(self.predecessors + other.predecessors)\n return new_obj\n\n def validate(self):\n \"\"\"Validate object and fill in missing values.\"\"\"\n return self\n\n @classmethod\n def whole_building(cls, idf):\n \"\"\"Create one DomesticHotWaterSetting for whole building model.\n\n Args:\n idf (IDF): The idf model.\n\n Returns:\n DomesticHotWaterSetting: The DomesticHotWaterSetting object.\n \"\"\"\n # Unconditioned area could be zero, therefore taking max of both\n area = max(idf.net_conditioned_building_area, idf.unconditioned_building_area)\n\n z_dhw_list = []\n dhw_objs = idf.idfobjects[\"WaterUse:Equipment\".upper()]\n if not dhw_objs:\n # defaults with 0 flow rate.\n total_flow_rate = 0\n water_schedule = UmiSchedule.constant_schedule()\n supply_temp = 60\n inlet_temp = 10\n\n name = idf.name + \"_DHW\"\n z_dhw = DomesticHotWaterSetting(\n WaterSchedule=water_schedule,\n IsOn=bool(total_flow_rate > 0),\n FlowRatePerFloorArea=total_flow_rate,\n WaterSupplyTemperature=supply_temp,\n WaterTemperatureInlet=inlet_temp,\n area=area,\n Name=name,\n Category=idf.name,\n )\n z_dhw_list.append(z_dhw)\n else:\n total_flow_rate = DomesticHotWaterSetting._do_flow_rate(dhw_objs, area)\n water_schedule = DomesticHotWaterSetting._do_water_schedule(dhw_objs)\n water_schedule.quantity = total_flow_rate\n inlet_temp = DomesticHotWaterSetting._do_inlet_temp(dhw_objs)\n supply_temp = DomesticHotWaterSetting._do_hot_temp(dhw_objs)\n z_dhw = DomesticHotWaterSetting(\n WaterSchedule=water_schedule,\n IsOn=bool(total_flow_rate > 0),\n FlowRatePerFloorArea=total_flow_rate,\n WaterSupplyTemperature=supply_temp,\n WaterTemperatureInlet=inlet_temp,\n area=area,\n Name=\"Whole Building WaterUse:Equipment\",\n Category=idf.name,\n )\n z_dhw_list.append(z_dhw)\n\n return reduce(DomesticHotWaterSetting.combine, z_dhw_list)\n\n def mapping(self, validate=True):\n \"\"\"Get a dict based on the object properties, useful for dict repr.\n\n Args:\n validate (bool): If True, try to validate object before returning the\n mapping.\n \"\"\"\n if validate:\n self.validate()\n\n return dict(\n FlowRatePerFloorArea=self.FlowRatePerFloorArea,\n IsOn=self.IsOn,\n WaterSchedule=self.WaterSchedule,\n WaterSupplyTemperature=self.WaterSupplyTemperature,\n WaterTemperatureInlet=self.WaterTemperatureInlet,\n Category=self.Category,\n Comments=self.Comments,\n DataSource=self.DataSource,\n Name=self.Name,\n )\n\n def duplicate(self):\n \"\"\"Get copy of self.\"\"\"\n return self.__copy__()\n\n def __add__(self, other):\n \"\"\"Overload + to implement self.combine.\n\n Args:\n other (DomesticHotWaterSetting):\n \"\"\"\n return self.combine(\n other,\n )\n\n def __hash__(self):\n \"\"\"Return the hash value of self.\"\"\"\n return hash(\n (self.__class__.__name__, getattr(self, \"Name\", None), self.DataSource)\n )\n\n def __key__(self):\n \"\"\"Get a tuple of attributes. Useful for hashing and comparing.\"\"\"\n return (\n self.IsOn,\n self.FlowRatePerFloorArea,\n self.WaterSupplyTemperature,\n self.WaterTemperatureInlet,\n self.WaterSchedule,\n )\n\n def __eq__(self, other):\n \"\"\"Assert self is equivalent to other.\"\"\"\n if not isinstance(other, DomesticHotWaterSetting):\n return NotImplemented\n else:\n return self.__key__() == other.__key__()\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return (\n f\"{str(self.id)}: {str(self.Name)} \"\n f\"PeakFlow {self.FlowRatePerFloorArea:.5f} m3/hr/m2\"\n )\n\n def __copy__(self):\n \"\"\"Create a copy of self.\"\"\"\n return self.__class__(**self.mapping(validate=False))\n\n\ndef water_main_correlation(t_out_avg, max_diff):\n \"\"\"Based on the correlation developed by Craig Christensen and Jay Burch.\n\n Returns a 365 days temperature profile.\n\n Info:\n https://bigladdersoftware.com/epx/docs/8-9/engineering-reference\n /water-systems.html#water-mains-temperatures\n\n Args:\n t_out_avg (float): average annual outdoor air temperature (°C).\n max_diff (float): maximum difference in monthly average outdoor air\n temperatures (°C).\n\n Returns:\n (pd.Series): water mains temperature profile.\n \"\"\"\n import numpy as np\n import pandas as pd\n\n Q_ = settings.unit_registry.Quantity\n t_out_avg_F = Q_(t_out_avg, \"degC\").to(\"degF\")\n max_diff_F = Q_(max_diff, \"delta_degC\").to(\"delta_degF\")\n ratio = 0.4 + 0.01 * (t_out_avg_F.m - 44)\n lag = 35 - 1.0 * (t_out_avg_F.m - 44)\n days = np.arange(1, 365)\n\n def function(t_out_avg, day, max_diff):\n return (t_out_avg + 6) + ratio * (max_diff / 2) * np.sin(\n np.deg2rad(0.986 * (day - 15 - lag) - 90)\n )\n\n mains = [Q_(function(t_out_avg_F.m, day, max_diff_F.m), \"degF\") for day in days]\n series = pd.Series([temp.to(\"degC\").m for temp in mains])\n return series\n" ]
[ [ "numpy.arange", "numpy.deg2rad" ] ]
ccrafael/tf-faster-rcnn
[ "4f446a4c1ebefcf6d92b5e01d2b6396bcbbf1a8d" ]
[ "lib/model/train_val.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen and Zheqi He\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom model.config import cfg\nimport roi_data_layer.roidb as rdl_roidb\nfrom roi_data_layer.layer import RoIDataLayer\nfrom utils.timer import Timer\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport numpy as np\nimport os\nimport sys\nimport glob\nimport time\n\nimport tensorflow as tf\nfrom tensorflow.python import pywrap_tensorflow\n\n\nclass SolverWrapper(object):\n \"\"\"\n A wrapper class for the training process\n \"\"\"\n\n def __init__(self, sess, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):\n self.net = network\n self.imdb = imdb\n self.roidb = roidb\n self.valroidb = valroidb\n self.output_dir = output_dir\n self.tbdir = tbdir\n # Simply put '_val' at the end to save the summaries from the validation set\n self.tbvaldir = tbdir + '_val'\n if not os.path.exists(self.tbvaldir):\n os.makedirs(self.tbvaldir)\n self.pretrained_model = pretrained_model\n\n def snapshot(self, sess, iter):\n net = self.net\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # Store the model snapshot\n filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.ckpt'\n filename = os.path.join(self.output_dir, filename)\n self.saver.save(sess, filename)\n print('Wrote snapshot to: {:s}'.format(filename))\n\n # Also store some meta information, random state, etc.\n nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'\n nfilename = os.path.join(self.output_dir, nfilename)\n # current state of numpy random\n st0 = np.random.get_state()\n # current position in the database\n cur = self.data_layer._cur\n # current shuffled indexes of the database\n perm = self.data_layer._perm\n # current position in the validation database\n cur_val = self.data_layer_val._cur\n # current shuffled indexes of the validation database\n perm_val = self.data_layer_val._perm\n\n # Dump the meta info\n with open(nfilename, 'wb') as fid:\n pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)\n pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)\n pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)\n pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)\n pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)\n pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)\n\n return filename, nfilename\n\n def from_snapshot(self, sess, sfile, nfile):\n print('Restoring model snapshots from {:s}'.format(sfile))\n self.saver.restore(sess, sfile)\n print('Restored.')\n # Needs to restore the other hyper-parameters/states for training, (TODO xinlei) I have\n # tried my best to find the random states so that it can be recovered exactly\n # However the Tensorflow state is currently not available\n with open(nfile, 'rb') as fid:\n st0 = pickle.load(fid)\n cur = pickle.load(fid)\n perm = pickle.load(fid)\n cur_val = pickle.load(fid)\n perm_val = pickle.load(fid)\n last_snapshot_iter = pickle.load(fid)\n\n np.random.set_state(st0)\n self.data_layer._cur = cur\n self.data_layer._perm = perm\n self.data_layer_val._cur = cur_val\n self.data_layer_val._perm = perm_val\n\n return last_snapshot_iter\n\n def get_variables_in_checkpoint_file(self, file_name):\n try:\n reader = pywrap_tensorflow.NewCheckpointReader(file_name)\n var_to_shape_map = reader.get_variable_to_shape_map()\n return var_to_shape_map\n except Exception as e: # pylint: disable=broad-except\n print(str(e))\n if \"corrupted compressed block contents\" in str(e):\n print(\"It's likely that your checkpoint file has been compressed \"\n \"with SNAPPY.\")\n\n def construct_graph(self, sess):\n with sess.graph.as_default():\n # Set the random seed for tensorflow\n tf.set_random_seed(cfg.RNG_SEED)\n # Build the main computation graph\n layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default',\n anchor_scales=cfg.ANCHOR_SCALES,\n anchor_ratios=cfg.ANCHOR_RATIOS)\n # Define the loss\n loss = layers['total_loss']\n # Set learning rate and momentum\n lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)\n self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)\n\n # Compute the gradients with regard to the loss\n gvs = self.optimizer.compute_gradients(loss)\n # Double the gradient of the bias if set\n if cfg.TRAIN.DOUBLE_BIAS:\n final_gvs = []\n with tf.variable_scope('Gradient_Mult') as scope:\n for grad, var in gvs:\n scale = 1.\n if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:\n scale *= 2.\n if not np.allclose(scale, 1.0):\n grad = tf.multiply(grad, scale)\n final_gvs.append((grad, var))\n train_op = self.optimizer.apply_gradients(final_gvs)\n else:\n train_op = self.optimizer.apply_gradients(gvs)\n\n # We will handle the snapshots ourselves\n self.saver = tf.train.Saver(max_to_keep=100000)\n # Write the train and validation information to tensorboard\n self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)\n self.valwriter = tf.summary.FileWriter(self.tbvaldir)\n\n return lr, train_op\n\n def find_previous(self):\n sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.ckpt.meta')\n sfiles = glob.glob(sfiles)\n sfiles.sort(key=os.path.getmtime)\n # Get the snapshot name in TensorFlow\n redfiles = []\n for stepsize in cfg.TRAIN.STEPSIZE:\n redfiles.append(os.path.join(self.output_dir,\n cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.ckpt.meta'.format(stepsize + 1)))\n sfiles = [ss.replace('.meta', '') for ss in sfiles if ss not in redfiles]\n\n nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')\n nfiles = glob.glob(nfiles)\n nfiles.sort(key=os.path.getmtime)\n redfiles = [redfile.replace('.ckpt.meta', '.pkl') for redfile in redfiles]\n nfiles = [nn for nn in nfiles if nn not in redfiles]\n\n lsf = len(sfiles)\n assert len(nfiles) == lsf\n\n return lsf, nfiles, sfiles\n\n def initialize(self, sess):\n # Initial file lists are empty\n np_paths = []\n ss_paths = []\n # Fresh train directly from ImageNet weights\n print('Loading initial model weights from {:s}'.format(self.pretrained_model))\n variables = tf.global_variables()\n # Initialize all variables first\n sess.run(tf.variables_initializer(variables, name='init'))\n var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)\n # Get the variables to restore, ignoring the variables to fix\n variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)\n\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, self.pretrained_model)\n print('Loaded.')\n # Need to fix the variables before loading, so that the RGB weights are changed to BGR\n # For VGG16 it also changes the convolutional weights fc6 and fc7 to\n # fully connected weights\n self.net.fix_variables(sess, self.pretrained_model)\n print('Fixed.')\n last_snapshot_iter = 0\n rate = cfg.TRAIN.LEARNING_RATE\n stepsizes = list(cfg.TRAIN.STEPSIZE)\n\n return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths\n\n def restore(self, sess, sfile, nfile):\n # Get the most recent snapshot and restore\n np_paths = [nfile]\n ss_paths = [sfile]\n # Restore model from snapshots\n last_snapshot_iter = self.from_snapshot(sess, sfile, nfile)\n # Set the learning rate\n rate = cfg.TRAIN.LEARNING_RATE\n stepsizes = []\n for stepsize in cfg.TRAIN.STEPSIZE:\n if last_snapshot_iter > stepsize:\n rate *= cfg.TRAIN.GAMMA\n else:\n stepsizes.append(stepsize)\n\n return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths\n\n def remove_snapshot(self, np_paths, ss_paths):\n to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT\n for c in range(to_remove):\n nfile = np_paths[0]\n os.remove(str(nfile))\n np_paths.remove(nfile)\n\n to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT\n for c in range(to_remove):\n sfile = ss_paths[0]\n # To make the code compatible to earlier versions of Tensorflow,\n # where the naming tradition for checkpoints are different\n if os.path.exists(str(sfile)):\n os.remove(str(sfile))\n else:\n os.remove(str(sfile + '.data-00000-of-00001'))\n os.remove(str(sfile + '.index'))\n sfile_meta = sfile + '.meta'\n os.remove(str(sfile_meta))\n ss_paths.remove(sfile)\n\n def train_model(self, sess, max_iters):\n # Build data layers for both training and validation set\n self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)\n self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)\n\n # Construct the computation graph\n lr, train_op = self.construct_graph(sess)\n\n # Find previous snapshots if there is any to restore from\n lsf, nfiles, sfiles = self.find_previous()\n\n # Initialize the variables or restore them from the last snapshot\n if lsf == 0:\n rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize(sess)\n else:\n rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(sess,\n str(sfiles[-1]),\n str(nfiles[-1]))\n timer = Timer()\n iter = last_snapshot_iter + 1\n last_summary_time = time.time()\n # Make sure the lists are not empty\n stepsizes.append(max_iters)\n stepsizes.reverse()\n next_stepsize = stepsizes.pop()\n while iter < max_iters + 1:\n # Learning rate\n if iter == next_stepsize + 1:\n # Add snapshot here before reducing the learning rate\n self.snapshot(sess, iter)\n rate *= cfg.TRAIN.GAMMA\n sess.run(tf.assign(lr, rate))\n next_stepsize = stepsizes.pop()\n\n timer.tic()\n # Get training data, one batch at a time\n blobs = self.data_layer.forward()\n\n now = time.time()\n if iter == 1 or now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:\n # Compute the graph with summary\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary = \\\n self.net.train_step_with_summary(sess, blobs, train_op)\n self.writer.add_summary(summary, float(iter))\n # Also check the summary on the validation set\n blobs_val = self.data_layer_val.forward()\n summary_val = self.net.get_summary(sess, blobs_val)\n self.valwriter.add_summary(summary_val, float(iter))\n last_summary_time = now\n else:\n # Compute the graph without summary\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = \\\n self.net.train_step(sess, blobs, train_op)\n timer.toc()\n\n # Display training information\n if iter % (cfg.TRAIN.DISPLAY) == 0:\n print('iter: %d / %d, total loss: %.6f\\n >>> rpn_loss_cls: %.6f\\n '\n '>>> rpn_loss_box: %.6f\\n >>> loss_cls: %.6f\\n >>> loss_box: %.6f\\n >>> lr: %f' % \\\n (iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr.eval()))\n print('speed: {:.3f}s / iter'.format(timer.average_time))\n\n # Snapshotting\n if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:\n last_snapshot_iter = iter\n ss_path, np_path = self.snapshot(sess, iter)\n np_paths.append(np_path)\n ss_paths.append(ss_path)\n\n # Remove the old snapshots if there are too many\n if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:\n self.remove_snapshot(np_paths, ss_paths)\n\n iter += 1\n\n if last_snapshot_iter != iter - 1:\n self.snapshot(sess, iter - 1)\n\n self.writer.close()\n self.valwriter.close()\n\n\ndef get_training_roidb(imdb):\n \"\"\"Returns a roidb (Region of Interest database) for use in training.\"\"\"\n if cfg.TRAIN.USE_FLIPPED:\n print('Appending horizontally-flipped training examples...')\n imdb.append_flipped_images()\n print('done')\n\n print('Preparing training data...')\n rdl_roidb.prepare_roidb(imdb)\n print('done')\n\n return imdb.roidb\n\n\ndef filter_roidb(roidb):\n \"\"\"Remove roidb entries that have no usable RoIs.\"\"\"\n\n def is_valid(entry):\n # Valid images have:\n # (1) At least one foreground RoI OR\n # (2) At least one background RoI\n overlaps = entry['max_overlaps']\n # find boxes with sufficient overlap\n fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n # image is only valid if such boxes exist\n valid = len(fg_inds) > 0 or len(bg_inds) > 0\n return valid\n\n num = len(roidb)\n filtered_roidb = [entry for entry in roidb if is_valid(entry)]\n num_after = len(filtered_roidb)\n print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,\n num, num_after))\n return filtered_roidb\n\n\ndef train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,\n pretrained_model=None,\n max_iters=40000):\n \"\"\"Train a Faster R-CNN network.\"\"\"\n roidb = filter_roidb(roidb)\n valroidb = filter_roidb(valroidb)\n\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n\n with tf.Session(config=tfconfig) as sess:\n sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,\n pretrained_model=pretrained_model)\n print('Solving...')\n sw.train_model(sess, max_iters)\n print('done solving')\n" ]
[ [ "numpy.random.get_state", "tensorflow.multiply", "tensorflow.summary.FileWriter", "numpy.allclose", "tensorflow.Variable", "tensorflow.global_variables", "tensorflow.variables_initializer", "tensorflow.assign", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.set_random_seed", "tensorflow.ConfigProto", "numpy.random.set_state", "tensorflow.train.MomentumOptimizer", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.train.Saver", "numpy.where" ] ]
opticspy/lightpipes-python
[ "dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb" ]
[ "LightPipes/core.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as _np\nfrom scipy.special import hermite, genlaguerre\nfrom scipy.interpolate import RectBivariateSpline\nfrom .misc import backward_compatible\n\nUSE_CV2 = False\n\nif USE_CV2:\n import cv2\n\nUSE_SKIMAGE = False\nif USE_SKIMAGE:\n from skimage.restoration import unwrap_phase as _unwrap_phase\nelse:\n #used in PhaseUnwrap\n # own implementation currently slower, but seems a little more stable\n # with jumpy phases and of course removes dependency on the extra package\n from .unwrap import unwrap_phase as _unwrap_phase\n\nfrom .units import deg\nfrom .field import Field\nfrom .subs import Inv_Squares\n\n\ndef BeamMix(Fin1, Fin2):\n \"\"\"\n *Addition of the fields Fin1 and Fin2.*\n \n :param Fin1: First field.\n :type Fin1: Field\n :param Fin2: Second field\n :param Fin2: Field\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = BeamMix(F1 , F2)\n\n .. seealso::\n \n * :ref:`Manual: Splitting and mixing beams. <Splitting and mixing beams.>`\n\n * :ref:`Examples: Young's experiment. <Young's experiment.>`\n \"\"\"\n if Fin1.field.shape != Fin2.field.shape:\n raise ValueError('Field sizes do not match')\n Fout = Field.copy(Fin1)\n Fout.field += Fin2.field\n Fout._IsGauss=False\n return Fout\n\ndef Centroid(Fin):\n \"\"\"\n *Returns the centroid of the intensity distribution.*\n \n :param Fin: input field.\n :type Fin: Field\n :return: the coordinates and the closests array indices of the centroid: Xc, Yc, NXc, NYc\n :rtype: Tuple[float, float, int, int]\n :Example:\n \n .. code-block::\n \n from LightPipes import *\n wavelength = 500*nm\n size = 25*mm\n N = 500\n F = Begin(size, wavelength, N)\n F = CircAperture(F, 2*mm, x_shift = 5*mm, y_shift = 3*mm)\n F = Fresnel(F, 10*m)\n Xc,Yc, NXc, NYc = Centroid(F)\n print(\"Xc = {:4.2f} mm, Yc = {:4.2f} mm\".format(Xc/mm, Yc/mm))\n \n :Answer:\n \n .. code-block::\n \n Xc = 4.96 mm, Yc = 2.97 mm\n NXc = 349, NYc = 309\n\n .. seealso::\n \n * :ref:`Manual: Diagnostics: Centroid.<Centroid.>`\n \"\"\"\n Y,X=Fin.mgrid_cartesian\n I=Intensity(Fin)\n Xc=_np.average(X,weights = I)\n Yc=_np.average(Y,weights = I)\n # Find the array indices close to Xc and Yc:\n NXc =(_np.abs(Fin.xvalues - Xc)).argmin()\n NYc =(_np.abs(Fin.yvalues - Yc)).argmin()\n return Xc, Yc, NXc, NYc\n\ndef D4sigma(Fin):\n \"\"\"\n *Returns the width (* :math:`D4\\\\sigma` *) of the intensity distribution.*\n \n :param Fin: input field.\n :type Fin: Field\n :return: widths in X and Y direction.\n :rtype: Tuple[float, float]\n :Example:\n \n .. code-block::\n \n from LightPipes import *\n wavelength = 500*nm\n size = 25*mm\n N = 500\n F = Begin(size, wavelength, N)\n F = CircAperture(F, 2*mm, x_shift = 5*mm, y_shift = 3*mm)\n F = Fresnel(F, 10*m)\n sx, sy = Centroid(F)\n print(\"sx = {:4.2f} mm, sy = {:4.2f} mm\".format(sx/mm, sy/mm))\n \n :Answer:\n \n .. code-block::\n \n sx = 6.19 mm, sy = 6.30 mm\n \n .. seealso::\n \n * :ref:`Manual => Diagnostics => Beam width => D4sigma<d4-sigma>`\n \"\"\"\n \n Y,X=Fin.mgrid_cartesian\n I=Intensity(Fin)\n Xc,Yc, NXc, NYc = Centroid(Fin)\n return 4*_np.sqrt(_np.average((X-Xc)*(X-Xc), weights = I)), 4*_np.sqrt(_np.average((Y-Yc)*(Y-Yc), weights = I))\n\n@backward_compatible\ndef CircAperture(Fin, R, x_shift = 0.0, y_shift = 0.0):\n \"\"\"\n *Inserts a circular aperture in the field.*\n \n :param R: radius of the aperture\n :type R: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param Fin: input field\n :type Fin: Field\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = CircAperture(F, 3*mm) # A 3 mm radius circular aperture in the center of the grid.\n >>> # alternative notations:\n >>> F = CircAperture(F, 3*mm, 0, -3*mm) # Shifted -3 mm in the y-direction.\n >>> F = CircAperture(F, R = 3*mm, y_shift = -3*mm) # Idem\n >>> F = CircAperture(3*mm, 0.0, -3*mm, F) # Idem, old order of arguments for backward compatibility.\n \n .. seealso::\n \n * :ref:`Manual: Apertures and screens<Apertures and screens.>`\n \n * :ref:`Examples: Diffraction from a circular aperture.<Diffraction from a circular aperture.>`\n \"\"\"\n #from\n #https://stackoverflow.com/questions/44865023/\n # circular-masking-an-image-in-python-using-numpy-arrays\n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n Y = Y - y_shift\n X = X - x_shift\n \n dist_sq = X**2 + Y**2 #squared, no need for sqrt\n \n Fout.field[dist_sq > R**2] = 0.0\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef CircScreen(Fin, R, x_shift=0.0, y_shift=0.0):\n \"\"\"\n *Inserts a circular screen in the field.*\n \n :param Fin: input field\n :type Fin: Field \n :param R: radius of the screen\n :type R: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = CircScreen(F, 3*mm) # A 3 mm radius circular screen in the center of the grid.\n >>> # alternative notations:\n >>> F = CircScreen(F, 3*mm, 0, -3*mm) # Shifted -3 mm in the y-direction.\n >>> F = CircScreen(F, R = 3*mm, y_shift = -3*mm) # Idem\n >>> F = CircScreen(3*mm, 0.0, -3*mm, F) # Idem, old order of arguments for backward compatibility.\n \n .. seealso::\n \n * :ref:`Manual: Apertures and screens<Apertures and screens.>`\n \n * :ref:`Examples: Spot of Poisson <Spot of Poisson.>`\n \"\"\"\n #from\n #https://stackoverflow.com/questions/44865023/\n # circular-masking-an-image-in-python-using-numpy-arrays\n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n Y = Y - y_shift\n X = X - x_shift\n dist_sq = X**2 + Y**2 #squared, no need for sqrt\n \n Fout.field[dist_sq <= R**2] = 0.0\n Fout._IsGauss=False\n return Fout\n\n\n\n@backward_compatible\ndef GaussAperture(Fin, w, x_shift = 0.0, y_shift = 0.0, T = 1.0, ):\n \"\"\"\n *Inserts an aperture with a Gaussian shape in the field.*\n \n :math:`F_{out}(x,y)= \\\\sqrt{T}e^{ -\\\\frac{ x^{2}+y^{2} }{2w^{2}} } F_{in}(x,y)`\n\n :param Fin: input field\n :type Fin: Field\n :param w: 1/e intensity width\n :type w: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param T: center intensity transmission (default = 1.0)\n :type T: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = GaussAperture(Fin, w) # centered, T=1.0, width = w\n >>> F = GaussAperture(Fin, w, T = 0.5) # idem, transmission = 0.5\n >>> F = GaussAperture(Fin, w, T = 0.5, y_shift = -3 *mm) # idem, shifted in y direction\n >>> F = GaussAperture(Fin, w, 0.0, -3.0*mm, 0.5) # idem\n\n .. seealso::\n \n * :ref:`Manual: Apertures and screens.<Apertures and screens.>`\n \"\"\" \n\n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n Y = Y - y_shift\n X = X - x_shift\n\n w2=w*w*2\n SqrtT=_np.sqrt(T)\n Fout.field*=SqrtT*_np.exp(-(X*X+Y*Y)/w2)\n Fout._IsGauss=False\n return Fout\n\ndef SuperGaussAperture(Fin, w, n = 2.0, x_shift = 0.0, y_shift = 0.0, T = 1.0 ):\n \"\"\"\n *Inserts an aperture with a super-Gaussian shape in the field.*\n \n :math:`F_{out}(x,y)= \\\\sqrt{T}e^{ -\\\\left [ \\\\frac{ x^{2}+y^{2} }{2w^{2}} \\\\right ]^n } F_{in}(x,y)`\n\n :param Fin: input field\n :type Fin: Field\n :param w: 1/e intensity width\n :type w: int, float\n :param n: power of the super Gauss (default = 2.0)\n :type n: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param T: center intensity transmission (default = 1.0)\n :type T: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = SuperGaussAperture(Fin, w) # centered, T=1.0, width = w, power = 2.0\n >>> F = SuperGaussAperture(Fin, w, n = 21) # idem, power = 21\n >>> F = SuperGaussAperture(Fin, w, n = 21, y_shift = -3 *mm) # idem, shifted in y direction\n >>> F = SuperGaussAperture(Fin, w, 21, 0.0, -3.0*mm, 0.5) # idem\n\n .. seealso::\n \n * :ref:`Manual: Apertures and screens.<Apertures and screens.>`\n \"\"\" \n\n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n Y = Y - y_shift\n X = X - x_shift\n\n w2=w*w*2\n SqrtT=_np.sqrt(T)\n Fout.field*=SqrtT*_np.exp(-((X*X+Y*Y)/w2)**n)\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef GaussScreen(Fin, w, x_shift = 0.0, y_shift = 0.0, T = 0.0 ):\n \"\"\" \n *Inserts a screen with a Gaussian shape in the field.*\n\n :math:`F_{out}(x,y)= \\\\sqrt{1-(1-T)e^{ -\\\\frac{ x^{2}+y^{2} }{w^{2}} }} F_{in}(x,y)`\n\n :param Fin: input field\n :type Fin: Field\n :param w: 1/e intensity width\n :type w: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param T: center intensity transmission (default = 0.0)\n :type T: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = GaussAperture(Fin, w) # centered, T=1.0, width = w\n >>> F = GaussAperture(Fin, w, T = 0.5) # idem, transmission = 0.5\n >>> F = GaussAperture(Fin, w, T = 0.5, y_shift = -3 *mm) # idem, shifted in y direction\n >>> F = GaussAperture(Fin, w, 0.0, -3.0*mm, 0.5) # idem\n\n .. seealso::\n \n * :ref:`Manual: Apertures and screens.<Apertures and screens.>`\n \"\"\" \n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n Y = Y - y_shift\n X = X - x_shift\n\n w2=w*w\n Fout.field*=_np.sqrt(1-(1-T)*_np.exp(-(X*X+Y*Y)/w2))\n Fout._IsGauss=False\n return Fout\n \ndef GaussHermite(Fin, w0, m = 0, n = 0, A = 1.0):\n \"\"\"\n *Substitutes a Hermite-Gauss mode (beam waist) in the field.*\n\n :math:`F_{m,n}(x,y,z=0) = A H_m\\\\left(\\\\dfrac{\\\\sqrt{2}x}{w_0}\\\\right)H_n\\\\left(\\\\dfrac{\\\\sqrt{2}y}{w_0}\\\\right)e^{-\\\\frac{x^2+y^2}{w_0^2}}`\n\n :param Fin: input field\n :type Fin: Field\n :param w0: Gaussian spot size parameter in the beam waist (1/e amplitude point)\n :type w0: int, float\n :param m: mode index (default = 0.0)\n :param n: mode index (default = 0.0)\n :type m: int, float\n :type n: int, float\n :param A: amplitude (default = 1.0)\n :type A: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = GaussHermite(F, 3*mm) # Fundamental Gauss mode, HG0,0 with a beam radius of 3 mm\n >>> F = GaussHermite(F, 3*mm, m=3) # Idem, HG3,0\n >>> F = GaussHermite(F, 3*mm, m=3, n=1, A=2.0) # Idem, HG3,1, amplitude 2.0\n >>> F = GaussHermite(F, 3*mm, 3, 1, 2.0) # Idem\n \n .. seealso::\n \n * :ref:`Examples: Hermite Gauss modes.<Hermite Gauss modes.>`\n \n Reference::\n \n A. Siegman, \"Lasers\", p. 642\n \"\"\"\n # ************* Backward compatibility section ****************\n #The general backward_compatible decorator does not work for this command,\n #because of the positional argument w0.\n _using_oldstyle = False\n if not isinstance(Fin, Field):\n #first arg is not a field, either backward compat syntax or\n # complete usage error -> find out if Field is last, else error\n if isinstance(A, Field):\n #found field in last arg\n _using_oldstyle = True #just in case code wants to know this later\n # in function\n Fin, w0, m, n, A = A, n, Fin, w0, m\n #caution: python can swap the values only if written on single\n # line, if split up a temporary assignment is necessary\n # (since a=b, b=a would not work, only temp=a, a=b, b=temp)\n #-> now all the variables contain what is expected in new style\n else:\n raise ValueError('GaussHermite: Field is neither first nor '\n + 'last parameter (backward compatibility check)'\n + ', please check syntax/usage.')\n # ************* end of Backward compatibility section *********\n Fout = Field.copy(Fin)\n \n Y, X = Fout.mgrid_cartesian\n #Y = Y - y_shift\n #X = X - x_shift\n\n sqrt2w0=_np.sqrt(2.0)/w0\n w02=w0*w0\n\n Fout.field = A * hermite(m)(sqrt2w0*X)*hermite(n)(sqrt2w0*Y)*_np.exp(-(X*X+Y*Y)/w02)\n Fout._IsGauss=True\n return Fout\n\ndef GaussLaguerre(Fin, w0, p = 0, l = 0, A = 1.0 ):\n \"\"\"\n *Substitutes a Laguerre-Gauss mode (beam waist) in the field.*\n\n :math:`F_{p,l}(x,y,z=0) = A \\\\left(\\\\frac{\\\\rho}{2}\\\\right)^{\\\\frac{|l|}{2} }L^p_l\\\\left(\\\\rho\\\\right)e^{-\\\\frac{\\\\rho}{2}}\\\\cos(l\\\\theta)`,\n \n with: :math:`\\\\rho=\\\\frac{2(x^2+y^2)}{w_0^2}`\n\n :param Fin: input field\n :type Fin: Field\n :param w0: Gaussian spot size parameter in the beam waist (1/e amplitude point)\n :type w0: int, float\n :param p: mode index (default = 0.0)\n :param l: mode index (default = 0.0)\n :type p: int, float\n :type l: int, float\n :param A: amplitude (default = 1.0)\n :type A: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = GaussLaguerre(F, 3*mm) # Fundamental Gauss mode, LG0,0 with a beam radius of 3 mm\n >>> F = GaussLaguerre(F, 3*mm, m=3) # Idem, LG3,0\n >>> F = GaussLaguerre(F, 3*mm, m=3, n=1, A=2.0) # Idem, LG3,1, amplitude 2.0\n >>> F = GaussLaguerre(F, 3*mm, 3, 1, 2.0) # Idem\n \n .. seealso::\n \n * :ref:`Examples: Laguerre Gauss modes.<Laguerre Gauss modes.>`\n \n Reference::\n \n A. Siegman, \"Lasers\", p. 642\n \"\"\"\n # ************* Backward compatibility section ****************\n #The general backward_compatible decorator does not work for this command,\n #because of the positional argument w0.\n #Old style: GaussLaguerre(p, l, A, w0,Fin)\n #New style: GaussLaguerre(Fin, w0, p=0, l=0, A=1.0)\n _using_oldstyle = False\n if not isinstance(Fin, Field):\n #first arg is not a field, either backward compat syntax or\n # complete usage error -> find out if Field is last, else error\n if isinstance(A, Field):\n #found field in last arg\n _using_oldstyle = True #just in case code wants to know this later\n # in function\n Fin, w0, p, l, A = A, l, Fin, w0, p\n #caution: python can swap the values only if written on single\n # line, if split up a temporary assignment is necessary\n # (since a=b, b=a would not work, only temp=a, a=b, b=temp)\n #-> now all the variables contain what is expected in new style\n else:\n raise ValueError('GaussLaguerre: Field is neither first nor '\n + 'last parameter (backward compatibility check)'\n + ', please check syntax/usage.')\n # ************* end of Backward compatibility section *********\n Fout = Field.copy(Fin)\n R, Phi = Fout.mgrid_polar\n w02=w0*w0\n la=abs(l)\n rho = 2*R*R/w02\n Fout.field = A * rho**(la/2) * genlaguerre(p,la)(rho) * _np.exp(-rho/2) * _np.cos(l*Phi)\n Fout._IsGauss=False\n return Fout\n\n\n\n@backward_compatible\ndef IntAttenuator(Fin, att = 0.5 ):\n \"\"\"\n *Attenuates the intensity of the field.*\n \n :math:`F_{out}(x,y)=\\\\sqrt{att}F_{in}(x,y)`\n\n :param Fin: input field\n :type Fin: Field\n :param att: intensity attenuation factor (default = 0.5)\n :type att: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = IntAttenuator(F) # attenuates the intensity of the field with a factor 0.5\n >>> F = IntAttenuator(F, att = 0.2) # Idem, with a factor 0.2\n >>> F = IntAttenuator(F, 0.2) # Idem\n \n .. seealso::\n \n * :ref:`Manual: Splitting and mixing beams.<Splitting and mixing beams.>`\n \n * :ref:`Examples: Michelson interferometer.<Michelson interferometer.>`\n \"\"\"\n Efactor = _np.sqrt(att) #att. given as intensity\n Fout = Field.copy(Fin)\n Fout.field *= Efactor\n return Fout\n\n@backward_compatible\ndef Intensity(Fin, flag = 0):\n \"\"\"\n *Calculates the intensity of the field.*\n \n :math:`I(x,y)=F_{in}(x,y).F_{in}(x,y)^*`\n \n :param Fin: input field\n :type Fin: Field\n :param flag: 0: no normalisation, 1: normalisation to 1, 2: normalized to 255 (for bitmaps) (default = 0)\n :type flag: int, float\n :return: output intensity distribution (N x N square array of real numbers).\n :rtype: `numpy.ndarray`\n :Example:\n \n >>> I = Intensity(F) # intensity of the field, no normalisation\n >>> I = Intensity(F, flag=1) # Idem, normalized to 1\n >>> I = Intensity(F, 2) # Idem, normalized to 255\n \n .. seealso::\n \n * :ref:`Manual: Graphing and visualisation.<Graphing and visualisation.>`\n \"\"\"\n I = _np.abs(Fin.field)**2\n if flag > 0:\n Imax = I.max()\n if Imax == 0.0:\n raise ValueError('Cannot normalize because of 0 beam power.')\n I = I/Imax\n if flag == 2:\n I = I*255\n return I\n\n@backward_compatible\ndef Interpol(Fin, new_size, new_N, x_shift = 0.0, y_shift = 0.0, angle = 0.0, magnif = 1.0 ):\n \"\"\"\n *Interpolates the field to a new grid size, grid dimension.*\n \n :param Fin: input field\n :type Fin: Field\n :param new_size: new grid size\n :type new_size: int, float\n :param new_N: new grid dimension\n :type new_N: int, float\n :param x_shift: shift of the field in x direction (default = 0.0)\n :type x_shift: int, float\n :param y_shift: shift of the field in y direction (default = 0.0)\n :type y_shift: int, float\n :param angle: rotation of the field in degrees (default = 0.0)\n :type angle: int, float\n :param magnif: magnification of the field amplitude (default = 1.0)\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n\n >>> F = Interpol(F, 50*mm, 200) # interpolates the field to a grid size of 50 mm and a grid dimension of 200\n >>> F = Interpol(F, 50*mm, 200, y_shift = 2*mm) # Idem, shifted 2 mm in the y direction\n >>> F = Interpol(F, 50*mm, 200, y_shift = 2*mm, magnif = 2.0) # Idem, magnifizes the field a factor 2.0\n >>> F = Interpol(F, 50*mm, 200, 0.0, 2*mm, 0.0, 2.0) # Idem\n \n .. seealso::\n \n * :ref:`Manual: Interpolation.<Interpolation.>`\n \n \"\"\"\n\n Fout = Field.begin(new_size, Fin.lam, new_N, Fin._dtype)\n Fout.field[:,:] = 0.0\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #compare Cpp results numerically\n else:\n Pi = _np.pi #more accurate, but slightly different results\n angle *= Pi/180.\n cc=_np.cos(angle)\n ss=_np.sin(angle)\n \n if legacy:\n #dx defined differently\n size_old = Fin.siz\n old_number = Fin.N\n dx_old = size_old/(old_number-1)\n on21 = int(old_number/2)\n Xold = dx_old * _np.arange(-on21, old_number-on21)\n Yold = dx_old * _np.arange(-on21, old_number-on21)\n else:\n Xold = Fin.xvalues\n Yold = Fin.yvalues\n \n if legacy:\n dx_new = new_size/(new_N-1) #TODO legacy, once again without -1 seems correct\n nn21 = int(new_N/2)\n X0 = dx_new * _np.arange(-nn21, new_N-nn21)\n Y0 = dx_new * _np.arange(-nn21, new_N-nn21)\n X0, Y0 = _np.meshgrid(X0, Y0)\n else:\n dx_new = Fout.dx\n Y0, X0 = Fout.mgrid_cartesian #note swapped order!\n X0 -= x_shift\n Y0 -= y_shift\n Xnew = (X0*cc + Y0*ss)/magnif\n Ynew = (X0*(-ss) + Y0* cc)/magnif\n \n xmin, xmax = Xold[0], Xold[-1]\n ymin, ymax = Yold[0], Yold[-1]\n #filter strictly inside (not <=) since edge pixels seem wrong in interp\n filtmask = ((Xnew > xmin) & (Xnew < xmax) &\n (Ynew > ymin) & (Ynew < ymax))\n # same goes for Cpp lightpipes, interpolating a 20x20 grid to a 20x20 grid\n # of same size will have 0s along the edges and only 18x18 useful pixels\n \n #instead of calling interp for all pixels, only call for those new pixels\n # who's coordinates (transformed to old) are inside old grid box\n Xmask = Xnew[filtmask] #flat list of X-values, not meshgrid anymore\n Ymask = Ynew[filtmask]\n \n use_scipy_interp = False\n if use_scipy_interp:\n ks = 1 #spline order: linear or higher\n interp_real = RectBivariateSpline(Xold, Yold, Fin.field.real,\n kx=ks, ky=ks)\n interp_imag = RectBivariateSpline(Xold, Yold, Fin.field.imag,\n kx=ks, ky=ks)\n \n out_real = interp_real(Xmask, Ymask, grid=False)\n out_imag = interp_imag(Xmask, Ymask, grid=False)\n out_comp = out_real + 1j* out_imag\n Fout.field[filtmask] = out_comp\n else:\n out_z = Inv_Squares(Xmask, Ymask, Fin.field, dx_old)\n Fout.field[filtmask] = out_z\n Fout.field /= magnif\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef MultIntensity( Fin, Intens):\n \"\"\"\n *Multiplies the field with a given intensity distribution.*\n \n :param Fin: input field\n :type Fin: Field\n :param Intens: N x N square array of real numbers or scalar\n :type Intens: numpy.ndarray, float, int\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> import numpy as np\n >>> Int=np.empty([N,N])\n >>> for i in range(1,N):\n >>> for j in range(1,N):\n >>> Int[i][j]=math.fabs(math.sin(i/10.0)*math.cos(j/5.0))\n >>> F = MultIntensity(F, Int)\n\n .. seealso::\n \n * :ref:`Manual: User defined phase and intensity filters.<User defined phase and intensity filters.>`\n \"\"\"\n if not _np.isscalar(Intens):\n if Intens.shape != Fin.field.shape:\n raise ValueError('Intensity pattern shape does not match field size')\n Fout = Field.copy(Fin)\n Efield = _np.sqrt(Intens)\n Fout.field *= Efield\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef MultPhase( Fin, Phi):\n \"\"\"\n *Multiplies the field with a given phase distribution.*\n \n :param Fin: input field\n :type Fin: Field\n :param Phi: N x N square array of real numbers or scalar\n :type Phi: numpy.ndarray, int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> # multiply with a phase distribution:\n >>> #\n >>> import numpy as np\n >>> Phi=np.empty([N,N])\n >>> for i in range(1,N):\n >>> for j in range(1,N):\n >>> Phi[i][j]=math.fabs(math.sin(i/10.0)*math.cos(j/5.0))\n >>> F = MultPhase(F, Phi)\n >>> #\n >>> # multiply with a scalar:\n >>> F = MultPhase(F, 0.12345*rad) # multiplies the field with a constant phase factor of 0.12345 rad\n\n .. seealso::\n \n * :ref:`Manual: User defined phase and intensity filters.<User defined phase and intensity filters.>`\n \"\"\"\n if not _np.isscalar(Phi):\n if Phi.shape != Fin.field.shape:\n raise ValueError('Phase pattern shape does not match field size')\n Fout = Field.copy(Fin)\n Fout.field *= _np.exp(1j*Phi)\n Fout._IsGauss=False\n return Fout\n\n\ndef Normal(Fin):\n \"\"\"\n *Normalizes the field using beam power.*\n \n :math:`F_{out}(x,y)= \\\\frac{F_{in}(x,y)}{\\\\sqrt{P}}`\n\n with: :math:`P=\\\\int \\\\int F_{in}(x,y)^2 dx dy`\n \n :param Fin: input field\n :type Fin: Field\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = Normal(F)\n \n .. seealso::\n \n * :ref:`Manual: Diagnostics: Field normalization.<Field normalization.>`\n \"\"\"\n Fabs = _np.abs(Fin.field)**2\n Fabs *= Fin.dx**2\n Ptot = Fabs.sum()\n if Ptot == 0.0:\n raise ValueError('Error in Normal(Fin): Zero beam power!')\n Fout = Field.copy(Fin)\n Fout.field *= _np.sqrt(1/Ptot)\n return Fout\n\n\ndef Phase(Fin, unwrap = False, units='rad', blank_eps=0):\n \"\"\"\n *Calculates the phase of the field.*\n \n :param Fin: input field\n :type Fin: Field\n :param unwrap: Call PhaseUnwrap on the extracted Phase (default = False)\n :type unwrap: bool\n :param units: 'opd': returned in [meters] of optical path length\n 'lam': returned in multiples of lambda\n 'rad': returned in multiples of 2pi phase jumps (default)\n :type units: string\n :param blank_eps: [fraction] of max. Intensity at which to blank the phase\n and replace the value with numpy.nan (e.g. 1e-3==0.1%)\n Set to 0 or None to disable\n :type blank_eps: int, None\n :return: output phase distribution (N x N square array of real numbers).\n :rtype: `numpy.ndarray`\n :Example:\n \n >>> Phi = Phase(F) # returns phase distribution\n >>> Phi = Phase(F, unwrap = True) # Idem, phase unwrapped\n >>> Phi = Phase(F, units = 'lam') # phase in multiples of wavelength\n \n .. seealso::\n \n * :ref:`Manual: Graphing and visualisation.<Graphing and visualisation.>`\n \"\"\"\n _2pi = 2*_np.pi\n Phi = _np.angle(Fin.field)\n if unwrap:\n Phi = PhaseUnwrap(Phi)\n \n if units=='opd':\n Phi = Phi/_2pi*Fin.lam #a PtV of 2pi will yield e.g. 1*lam=1e-6=1um\n elif units=='lam':\n Phi = Phi/_2pi #a PtV of 2pi=6.28 will yield 1 (as in 1 lambda)\n elif units=='rad':\n pass #a PtV of 2pi will yield 6.28 as requested\n else:\n raise ValueError('Unknown value for option units={}'.format(units))\n \n if blank_eps:\n I = Intensity(0,Fin)\n Phi[I<blank_eps*I.max()] = _np.nan\n \n return Phi\n\ndef PhaseSpiral(Fin, m = 1):\n \"\"\"\n *Multiplies Fin with a spiral phase distribution.*\n \n :param Fin: input field\n :type Fin: Field\n :param m: Order of the spiral (default = 1)\n :type m: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> order = 2\n >>> F=PhaseSpiral(F,m=order) # multiplies the field with a spiral phase distribution of order 2\n \n \"\"\"\n Fout = Field.copy(Fin) \n R, Phi = Fout.mgrid_polar \n Fout.field *= _np.exp(1j * m * Phi)\n Fout._IsGauss=False\n return Fout\n\ndef PhaseUnwrap(Phi):\n \"\"\"\n *Unwraps (removes jumps of pi radians) the phase.*\n \n :param Phi: input phase distribution\n :type Phi: numpy\n :param Phi: Order of the spiral (default = 1)\n :type m: int, float\n :return: output phase distribution (N x N square array of real numbers).\n :rtype: `numpy.ndarray`\n :Example:\n \n >>> Phi = PhaseUnwrap(Phi) # unwraps the phase distribution Phi\n \"\"\"\n PhiU = _unwrap_phase(Phi)\n return PhiU\n\n\ndef Power(Fin):\n \"\"\"\n *Calculates the total power.*\n \n .. math:: P=\\\\int \\\\int(|F_{in}(x,y)|)^2dxdy\n \n :param Fin: input field\n :type Fin: Field\n :return: output power\n :rtype: float\n :Example:\n \n >>> P = Power(F) # returns the power of the field F\n \n \"\"\"\n #TODO why does Normal() also sum dx**2 (==integral) while this does not??\n I = _np.abs(Fin.field)**2\n return I.sum()\n\n@backward_compatible\ndef RandomIntensity(Fin, seed = 123, noise = 1.0, ):\n \"\"\"\n *Adds random intensity to the field*\n \n :param Fin: input field\n :type Fin: Field\n :param seed: seed number for the random noise generator (default = 123)\n :type seed: int, float\n :param noise: level of the noise (default = 1.0)\n :type noise: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = RandomIntensity(F) # adds noise to the field\n >>> F = RandomIntensity(F, seed = 49) # Idem, with seed 49\n >>> F = RandomIntensity(F, noise = 0.1) # adds noise to the field with amplitude 0.1\n\n .. seealso::\n \n * :ref:`Manual: Random filters.<Random filters.>`\n \"\"\"\n #TODO implementation error in original LP: field error, not I error!\n # need to sqrt for that\n Fout = Field.copy(Fin)\n _np.random.seed(int(seed))\n N = Fout.N\n ranint = _np.random.rand(N, N)*noise\n Fout.field += ranint\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef RandomPhase(Fin, seed =456, maxPhase = _np.pi ):\n \"\"\"\n *Adds random phase to the field*\n \n :param Fin: input field\n :type Fin: Field\n :param seed: seed number for the random noise generator (default = 456)\n :type seed: int, float\n :param maxPhase: max value of the phase (default = 3.1415 (pi))\n :type maxPhase: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = RandomPhase(F) # adds noise to the phase of the field\n >>> F = RandomPhase(F, seed = 49) # Idem, with seed 49\n >>> F = RandomPhase(F, maxPhase = 0.1) # adds phase-noise to the field with maximum value 0.1\n\n .. seealso::\n \n * :ref:`Manual: Random filters.<Random filters.>`\n \"\"\"\n #2020023 - ldo - tested similar result as Cpp version, although not \n # 1:1 since seed is different in numpy\n Fout = Field.copy(Fin)\n _np.random.seed(int(seed))\n N = Fout.N\n ranphase = (_np.random.rand(N, N)-0.5)*maxPhase\n Fout.field *= _np.exp(1j * ranphase)\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef RectAperture(Fin, sx, sy, x_shift = 0.0, y_shift = 0.0, angle = 0.0 ):\n \"\"\"\n *Inserts a rectangular aperture in the field.*\n \n :param Fin: input field\n :type Fin: Field \n :param sx: width of the aperture\n :type sx: int, float\n :param sy: height of the aperture\n :type sy: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param angle: rotation angle in degrees (default = 0.0)\n :type angle: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = RectAperture(F, 3*mm, 4*mm) # A 3 x 4 mm rectangular aperture in the center of the grid.\n >>> F = RectAperture(F, 3*mm, 4*mm, 0, -3*mm) # Idem, shifted -3 mm in the y-direction.\n >>> F = RectAperture(F, 3*mm, 4*mm, y_shift = -3*mm) # Idem\n \n .. seealso::\n \n * :ref:`Manual: Apertures and screens<Apertures and screens.>`\n \"\"\"\n Fout = Field.copy(Fin)\n yy, xx = Fout.mgrid_cartesian\n yy = yy - y_shift\n xx = xx - x_shift\n if angle!=0.0:\n ang_rad = -1*angle*deg #-1 copied from Cpp convention\n cc = _np.cos(ang_rad)\n ss = _np.sin(ang_rad)\n xxr = cc * xx + ss * yy\n yyr = -ss * xx + cc * yy\n yy, xx = yyr, xxr\n matchx = _np.abs(xx) > sx/2\n matchy = _np.abs(yy) > sy/2\n Fout.field[matchx | matchy] = 0.0\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef RectScreen(Fin, sx, sy, x_shift = 0.0, y_shift = 0.0, angle = 0.0 ):\n \"\"\"\n *Inserts a rectangular screen in the field.*\n \n :param Fin: input field\n :type Fin: Field \n :param sx: width of the screen\n :type sx: int, float\n :param sy: height of the screen\n :type sy: int, float\n :param x_shift: shift in x direction (default = 0.0)\n :param y_shift: shift in y direction (default = 0.0)\n :type x_shift: int, float\n :type y_shift: int, float\n :param angle: rotation angle in degrees (default = 0.0)\n :type angle: int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n >>> F = RectScreen(F, 3*mm, 4*mm) # A 3 x 4 mm rectangular screen in the center of the grid.\n >>> F = RectScreen(F, 3*mm, 4*mm, 0, -3*mm) # Idem, shifted -3 mm in the y-direction.\n >>> F = RectScreen(F, 3*mm, 4*mm, y_shift = -3*mm) # Idem\n \n .. seealso::\n \n * :ref:`Manual: Apertures and screens<Apertures and screens.>`\n \"\"\"\n Fout = Field.copy(Fin)\n yy, xx = Fout.mgrid_cartesian\n yy = yy - y_shift\n xx = xx - x_shift\n if angle!=0.0:\n ang_rad = -1*angle*deg #-1 copied from Cpp convention\n cc = _np.cos(ang_rad)\n ss = _np.sin(ang_rad)\n xxr = cc * xx + ss * yy\n yyr = -ss * xx + cc * yy\n yy, xx = yyr, xxr\n matchx = _np.abs(xx) <= sx/2\n matchy = _np.abs(yy) <= sy/2\n Fout.field[matchx & matchy] = 0.0\n Fout._IsGauss=False\n return Fout\n\n\ndef Strehl(Fin):\n \"\"\"\n *Calculates the Strehl value of the field*\n \n :param Fin: input field\n :type Fin: Field \n :return: Strehl value of the field\n :rtype: float\n :Example:\n \n >>> S = Strehl(F) # returns the Strehl value of the field\n \n .. seealso::\n \n * :ref:`Manual: Diagnostics: Strehl ratio.<Strehl ratio.>`\n \"\"\"\n normsq = _np.abs(Fin.field).sum()**2\n if normsq == 0.0:\n raise ValueError('Error in Strehl: Zero beam power')\n strehl = _np.real(Fin.field).sum()**2 + _np.imag(Fin.field).sum()**2\n strehl = strehl/normsq\n return strehl\n\n@backward_compatible\ndef SubIntensity(Fin, Intens ):\n \"\"\"\n *Substitutes a given intensity distribution in the field with.*\n \n :param Fin: input field\n :type Fin: Field\n :param Intens: N x N square array of real numbers or scalar\n :type Intens: numpy.ndarray, int, float \n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n .. seealso::\n \n * :ref:`Matlab: User defined phase and intensity filters.<User defined phase and intensity filters.>`\n \"\"\"\n Fout = Field.copy(Fin)\n Intens = _np.asarray(Intens)\n if Intens.shape != Fout.field.shape:\n raise ValueError('Intensity map has wrong shape')\n phi = _np.angle(Fout.field)\n Efield = _np.sqrt(Intens)\n Fout.field = Efield * _np.exp(1j * phi)\n Fout._IsGauss=False\n return Fout\n\n@backward_compatible\ndef SubPhase( Fin, Phi):\n \"\"\"\n *Substitutes a given phase distribution in the field with.*\n \n :param Phi: N x N square array of real numbers or scalar\n :type Phi: numpy.ndarray, int, float\n :return: output field (N x N square array of complex numbers).\n :rtype: `LightPipes.field.Field`\n :Example:\n \n .. seealso::\n \n * :ref:`Manual: User defined phase and intensity filters.<User defined phase and intensity filters.>`\n \"\"\"\n Fout = Field.copy(Fin)\n if not _np.isscalar(Phi):\n Phi = _np.asarray(Phi)\n if Phi.shape != Fin.field.shape:\n raise ValueError('Phase map has wrong shape')\n oldabs = _np.abs(Fout.field)\n Fout.field = oldabs * _np.exp(1j * Phi)\n Fout._IsGauss=False\n return Fout\n\n\n" ]
[ [ "numpy.imag", "numpy.sqrt", "numpy.abs", "numpy.meshgrid", "numpy.asarray", "scipy.interpolate.RectBivariateSpline", "numpy.arange", "scipy.special.genlaguerre", "numpy.cos", "numpy.sin", "scipy.special.hermite", "numpy.real", "numpy.random.rand", "numpy.isscalar", "numpy.average", "numpy.angle", "numpy.exp" ] ]