repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Nishantjannu/dedo
|
[
"cb6bcbc9e3049d7de9385842b8d35f02c946fb94"
] |
[
"dedo/internal/datasets.py"
] |
[
"import sys\nimport time\nimport torch\nfrom torch.utils.data import IterableDataset\nimport numpy as np\n\n\ndef worker_init_fn(worker_id):\n '''Helper function to launch each worker's env with a different seed'''\n\n gym = __import__('gym')\n worker_info = torch.utils.data.get_worker_info()\n ds = worker_info.dataset\n\n # np.random.seed(worker_info.seed)\n np.random.seed(np.random.randint(1212))\n\n args = ds.args\n args.seed = np.random.randint(1212)\n ds.env = gym.make(args.env, args=args)\n ds.env.reset()\n\n print('worker rand', worker_id, np.random.randint(1212))\n\n\nclass DeformEnvDataset(IterableDataset):\n def __init__(self, args):\n super().__init__()\n self.args = args\n print('random_number', np.random.randint(10000))\n worker_info = torch.utils.data.get_worker_info()\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n print('worker_info iter', worker_info)\n args = self.args\n args.seed = worker_info.seed\n return self\n\n def __next__(self):\n print('random_number_iter', np.random.randint(10000))\n worker_info = torch.utils.data.get_worker_info()\n print('worker_info next', worker_info)\n next_obs, rwd, done, info = self.env.step(np.array([0, 0, 0, 0, 0, 0], dtype=np.float16))\n if done:\n raise StopIteration\n return next_obs\n"
] |
[
[
"numpy.array",
"numpy.random.randint",
"torch.utils.data.get_worker_info"
]
] |
pjrigali/Poker-Now-Analysis
|
[
"be33d04e246f0b80beaebf394bc1eed3eb56d0c6"
] |
[
"poker/poker_class.py"
] |
[
"from typing import List, Optional, Union\nfrom dataclasses import dataclass\nimport pandas as pd\nimport datetime\nfrom os import walk\nfrom poker.base import flatten, unique_values, round_to, native_max\nfrom poker.game_class import Game\n\n\ndef _poker_convert_shape(data: List[str]) -> list:\n \"\"\"Converts card icons into shapes\"\"\"\n return [row.replace(\"â£\", \" Clubs\").replace(\"â¦\", \" Diamonds\").replace(\"â¥\", \" Hearts\").replace(\"â\", \" Spades\") for row in data]\n\n\ndef _poker_convert_timestamp(data: List[str]) -> list:\n \"\"\"Converts strs to timestamps\"\"\"\n return [datetime.datetime.strptime(i.replace('T', ' ').split('.')[0], '%Y-%m-%d %H:%M:%S') for i in data]\n\n\ndef _poker_collect_data(repo_location: str) -> dict:\n \"\"\"Open file, clean data and return a dict\"\"\"\n files = next(walk(repo_location))[2]\n file_dic = {}\n for file in files:\n df = pd.read_csv(repo_location + file, encoding='latin1')\n time_lst = _poker_convert_timestamp(data=df['at'].tolist())\n entry_lst = _poker_convert_shape(data=df['entry'].tolist())\n time_lst.reverse()\n entry_lst.reverse()\n hands, hand_lst = [], []\n for ind, item in enumerate(entry_lst):\n if ' starting hand ' in item:\n if ' hand #1 ' in item:\n hands.append(hand_lst)\n hand_lst = [ind]\n hands.append(hand_lst)\n else:\n hand_lst.append(ind)\n hand_dic = []\n for hand in hands:\n temp_entry_lst, temp_time_lst = [], []\n for ind in hand:\n temp_entry_lst.append(entry_lst[ind]), temp_time_lst.append(time_lst[ind])\n hand_dic.append({'lines': temp_entry_lst, 'times': temp_time_lst})\n file_dic[file.split(\".\")[0]] = hand_dic\n return file_dic\n\n\ndef _poker_build_player_dic(data: dict, matches: list) -> dict:\n \"\"\"Updates Player Class\"\"\"\n player_dic = {}\n for match in matches:\n for player_index in data.keys():\n for key in match.players_data[player_index].player_money_info.keys():\n temp_df = match.players_data[player_index].player_money_info[key]\n if player_index in player_dic.keys():\n if key not in player_dic[player_index]['Games']:\n val = player_dic[player_index]\n val['Player Names'] = list(set(val['Player Names'] + list(temp_df['Player Names'][0])))\n val['Player Ids'] = list(set(val['Player Ids'] + [player_index]))\n val['Buy in Total'] += int(temp_df['Buy in Total'])\n val['Loss Count'] += int(temp_df['Loss Count'][0])\n val['Leave Table Amount'] += temp_df['Leave Table Amount'][0]\n val['Game Count'] += 1\n val['Games'].append(key)\n else:\n player_dic[player_index] = {'Player Names': list(temp_df['Player Names'][0]),\n 'Player Ids': [player_index],\n 'Buy in Total': int(temp_df['Buy in Total'][0]),\n 'Loss Count': int(temp_df['Loss Count'][0]),\n 'Leave Table Amount': temp_df['Leave Table Amount'][0],\n 'Game Count': 1,\n 'Games': [key]}\n return player_dic\n\n\ndef _poker_group_money(data: dict, grouped: Union[list, None], multi: Union[int, None]) -> pd.DataFrame:\n \"\"\"Groups players by id and tally's earnings\"\"\"\n data = pd.DataFrame.from_dict(data, orient='index')\n if grouped is not None:\n final_lst = []\n for ind_group in grouped:\n temp_df = data.loc[ind_group]\n temp_dic = {}\n for col in temp_df.columns:\n if col in ['Player Names', 'Player Ids', 'Games']:\n vals = []\n for item in list(temp_df[col]):\n if type(item) == list:\n vals.append(item)\n elif type(item) == str:\n vals.append([item])\n temp_dic[col] = unique_values(data=flatten(data=vals))\n else:\n temp_dic[col] = sum(temp_df[col].tolist())\n final_lst.append(temp_dic)\n\n grouped_lst = flatten(data=grouped)\n for ind in list(data.index):\n if ind not in grouped_lst:\n temp_dic = {}\n for col in data.columns:\n val = data.loc[ind][col]\n if col in ['Player Names', 'Player Ids', 'Games']:\n if type(val) == list:\n temp_dic[col] = val\n elif type(val) == str:\n temp_dic[col] = [val]\n else:\n temp_dic[col] = int(val)\n final_lst.append(temp_dic)\n final_df = pd.DataFrame(final_lst).set_index('Player Ids', drop=False)\n else:\n final_df = data\n\n final_df['Profit'] = final_df['Leave Table Amount'] - final_df['Buy in Total']\n if multi:\n final_df['Buy in Total'] = (final_df['Buy in Total'] / 100).astype(int)\n final_df['Leave Table Amount'] = (final_df['Leave Table Amount'] / 100).astype(int)\n final_df['Profit'] = (final_df['Profit'] / 100).astype(int)\n return final_df.sort_values('Profit', ascending=False).reset_index(drop=True)\n\n\ndef _poker_get_dist(matches: list) -> List[pd.DataFrame]:\n \"\"\"Calculate distributions\"\"\"\n hand_ind = unique_values(data=flatten(data=[list(match.winning_hand_distribution.keys()) for match in matches]))\n hand_dic = {item: 0 for item in hand_ind}\n card_dic = {item: {} for item in ['Flop Count', 'Turn Count', 'River Count', 'Win Count', 'My Cards Count']}\n for match in matches:\n for key, val in match.winning_hand_distribution.items():\n hand_dic[key] += val\n for item in card_dic.keys():\n if item in match.card_distribution.keys():\n for key, val in match.card_distribution[item].items():\n if key in card_dic[item].keys():\n card_dic[item][key] += val\n else:\n card_dic[item][key] = val\n\n card_distribution = pd.DataFrame.from_dict(card_dic).dropna()\n for col in card_distribution.columns:\n s = sum(card_distribution[col].tolist())\n arr = round_to(data=[val / s if val != 0 else 0 for val in card_distribution[col]], val=1000, remainder=True)\n card_distribution[col.replace(\"Count\", \"Percent\")] = arr\n\n winning_hand_dist = pd.DataFrame.from_dict(hand_dic,\n orient='index',\n columns=['Count']).sort_values('Count', ascending=False)\n winning_hand_dist['Percent'] = (winning_hand_dist / winning_hand_dist.sum()).round(3)\n return [card_distribution, winning_hand_dist]\n\n\ndef _poker_build_players(data: dict, money_df: pd.DataFrame) -> None:\n \"\"\"Update Player Class\"\"\"\n for key1, val1 in data.items():\n for i, j in enumerate(money_df['Player Ids']):\n if key1 in j:\n val1.player_index = j\n val1.player_name = list(money_df['Player Names'])[i]\n\n for key2, val2 in val1.moves_dic.items():\n val1.win_percent = [key2, round(len((val2[val2['Win'] == True])) / len(val2), 3)]\n val1.win_count = [key2, len(val2[val2['Win'] == True])]\n val1.largest_win = [key2, native_max(data=val2[val2['Win'] == True]['Win Stack'])]\n temp_df = val2[val2['Class'] == 'Player Stacks']\n temp_player_index_lst = temp_df['Player Index'].tolist()\n temp_player_stack_lst = temp_df['Player Current Chips'].tolist()\n index_lst = []\n for i in temp_player_index_lst:\n for j in i:\n if key1 == j:\n index_lst.append(i.index(key1))\n temp = 0\n for i, j in enumerate(temp_player_stack_lst):\n val = j[index_lst[i]]\n if val > 1:\n previous = temp_player_stack_lst[i - 1][index_lst[i - 1]]\n if val - previous < temp:\n temp = val - previous\n val1.largest_loss = [key2, temp]\n val1.hand_count = [key2, max(val2['Round'].tolist())]\n val1.all_in = [key2, list(val2[val2['All In'] == True]['Bet Amount'])]\n\n\ndef _poker_combine_dic(data: dict, grouped: list) -> dict:\n \"\"\"Setter function\"\"\"\n completed_lst = []\n completed_dic = {}\n for key1, val in data.items():\n for gr in grouped:\n if key1 in gr and key1 not in completed_lst:\n completed_lst += gr\n for key2 in gr:\n if key2 != key1:\n for key3 in data[key2].win_percent.keys():\n data[key1].win_percent = [key3, data[key2].win_percent[key3]]\n data[key1].win_count = [key3, data[key2].win_count[key3]]\n data[key1].largest_win = [key3, data[key2].largest_win[key3]]\n data[key1].largest_loss = [key3, data[key2].largest_loss[key3]]\n data[key1].hand_count = [key3, data[key2].hand_count[key3]]\n data[key1].all_in = [key3, data[key2].all_in[key3]]\n data[key1].player_money_info = [key3, data[key2].player_money_info[key3]]\n data[key1].hand_dic = [key3, data[key2].hand_dic[key3]]\n data[key1].card_dic = [key3, data[key2].card_dic[key3]]\n data[key1].line_dic = [key3, data[key2].line_dic[key3]]\n data[key1].moves_dic = [key3, data[key2].moves_dic[key3]]\n completed_dic[key1] = data[key1]\n if key1 not in flatten(data=grouped):\n completed_dic[key1] = data[key1]\n return completed_dic\n\n\ndef _poker_add_merged_moves(player_dic: dict):\n \"\"\"Flattens all Player.moves_dic into one\"\"\"\n for key, val in player_dic.items():\n if player_dic[key].merged_moves is None:\n player_dic[key].merged_moves = {}\n all_dic, win_dic, loss_dic = {}, {}, {}\n for key1, val1 in val.moves_dic.items():\n for key2, val2 in val1.to_dict(orient='list').items():\n if key2 in all_dic.keys():\n all_dic[key2] += val2\n else:\n all_dic[key2] = val2\n player_dic[key].merged_moves['All'] = all_dic\n for key1, val1 in val.moves_dic.items():\n for key2, val2 in val1[val1['Win'] == True].to_dict(orient='list').items():\n if key2 in win_dic.keys():\n win_dic[key2] += val2\n else:\n win_dic[key2] = val2\n player_dic[key].merged_moves['Win'] = win_dic\n for key1, val1 in val.moves_dic.items():\n for key2, val2 in val1[val1['Win'] == False].to_dict(orient='list').items():\n if key2 in loss_dic.keys():\n loss_dic[key2] += val2\n else:\n loss_dic[key2] = val2\n player_dic[key].merged_moves['Loss'] = loss_dic\n\n\n@dataclass\nclass Poker:\n \"\"\"\n\n Calculate stats for all games and players.\n\n :param repo_location: Location of data folder.\n :type repo_location: str\n :param grouped: List of lists, filled with unique player Ids that are related to the same person. *Optional*\n :type grouped: str\n :param money_multi: Multiple to divide the money amounts to translate them to dollars *Optional*\n :type money_multi: int\n :example:\n >>> from poker.poker_class import Poker\n >>> repo = 'location of your previous game'\n >>> grouped = [['YEtsj6CMK4', 'M_ODMJ-3Je', 'DZy-22KNBS'],\n >>> ['48QVRRsiae', 'u8_FUbXpAz']]\n >>> poker = Poker(repo_location=repo, grouped=grouped)\n :note: Grouped will need to be figured out by the player.\n The grouped stats are only taken into account within this class\n\n \"\"\"\n def __init__(self, repo_location: str, grouped: Optional[list] = None, money_multi: Optional[int] = 100):\n self._repo_location = repo_location\n\n self._grouped = None\n if grouped:\n self._grouped = grouped\n\n x = _poker_collect_data(repo_location=repo_location)\n self._files = list(x.keys())\n players_data = {}\n self._matches = {file: Game(hand_lst=x[file], file_id=file, players_data=players_data) for file in self._files}\n player_dic = _poker_build_player_dic(data=players_data, matches=list(self._matches.values()))\n self._player_money_df = _poker_group_money(data=player_dic, grouped=self._grouped, multi=money_multi)\n self._card_distribution, self._winning_hand_dist = _poker_get_dist(matches=list(self._matches.values()))\n _poker_build_players(data=players_data, money_df=self._player_money_df)\n self._players = _poker_combine_dic(data=players_data, grouped=self._grouped)\n _poker_add_merged_moves(player_dic=self._players)\n\n def __repr__(self):\n return \"Poker\"\n\n @property\n def files(self) -> List[str]:\n \"\"\"Returns list of data files\"\"\"\n return self._files\n\n @property\n def matches(self) -> dict:\n \"\"\"Returns list of games\"\"\"\n return self._matches\n\n @property\n def players_money_overview(self) -> pd.DataFrame:\n \"\"\"Returns summary info for each player across games\"\"\"\n return self._player_money_df\n\n @property\n def card_distribution(self) -> pd.DataFrame:\n \"\"\"Returns count and percent for each card that showed up across games\"\"\"\n return self._card_distribution\n\n @property\n def winning_hand_distribution(self) -> pd.DataFrame:\n \"\"\"Returns count and percent of each type of winning hand across games\"\"\"\n return self._winning_hand_dist\n\n @property\n def players_history(self) -> dict:\n \"\"\"Collects player stats for all matches and groups based on grouper input\"\"\"\n return self._players\n"
] |
[
[
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.read_csv"
]
] |
guyhwilson/ssm
|
[
"4349d6b1fad93f49508e5ca1f8326d24d3cfb097"
] |
[
"ssm/emissions.py"
] |
[
"from warnings import warn\n\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\nfrom autograd.scipy.special import gammaln\nfrom autograd import hessian\n\nfrom ssm.util import ensure_args_are_lists, \\\n logistic, logit, softplus, inv_softplus\nfrom ssm.preprocessing import interpolate_data, pca_with_imputation\nfrom ssm.optimizers import adam, bfgs, rmsprop, sgd, lbfgs\nfrom ssm.stats import independent_studentst_logpdf, bernoulli_logpdf\n\n\n# Observation models for SLDS\nclass Emissions(object):\n def __init__(self, N, K, D, M=0, single_subspace=True):\n self.N, self.K, self.D, self.M, self.single_subspace = \\\n N, K, D, M, single_subspace\n\n @property\n def params(self):\n raise NotImplementedError\n\n @params.setter\n def params(self, value):\n raise NotImplementedError\n\n def permute(self, perm):\n pass\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None, num_em_iters=25):\n pass\n\n def initialize_from_arhmm(self, arhmm, pca):\n pass\n\n def log_prior(self):\n return 0\n\n def log_likelihoods(self, data, input, mask, tag, x):\n raise NotImplementedError\n\n def forward(self, x, input=None, tag=None):\n raise NotImplementedError\n\n def invert(self, data, input=None, mask=None, tag=None):\n raise NotImplementedError\n\n def sample(self, z, x, input=None, tag=None):\n raise NotImplementedError\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n \"\"\"\n Compute the mean observation under the posterior distribution\n of latent discrete states.\n \"\"\"\n raise NotImplementedError\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n if self.single_subspace is False:\n raise Exception(\"Multiple subspaces are not supported for this Emissions class.\")\n warn(\"Analytical Hessian is not implemented for this Emissions class. \\\n Optimization via Laplace-EM may be slow. Consider using an \\\n alternative posterior and inference method.\")\n # Return (T, D, D) array of blocks for the diagonal of the Hessian\n obj = lambda xt, datat, inputt, maskt: \\\n self.log_likelihoods(datat[None,:], inputt[None,:], maskt[None,:], tag, xt[None,:])[0, 0]\n hess = hessian(obj)\n terms = np.array([np.squeeze(hess(xt, datat, inputt, maskt))\n for xt, datat, inputt, maskt in zip(x, data, input, mask)])\n return -1 * terms\n\n def m_step(self, discrete_expectations, continuous_expectations,\n datas, inputs, masks, tags,\n optimizer=\"bfgs\", maxiter=100, **kwargs):\n \"\"\"\n If M-step in Laplace-EM cannot be done in closed form for the emissions, default to SGD.\n \"\"\"\n optimizer = dict(adam=adam, bfgs=bfgs, lbfgs=lbfgs, rmsprop=rmsprop, sgd=sgd)[optimizer]\n\n # expected log likelihood\n T = sum([data.shape[0] for data in datas])\n def _objective(params, itr):\n self.params = params\n obj = 0\n obj += self.log_prior()\n for data, input, mask, tag, x, (Ez, _, _) in \\\n zip(datas, inputs, masks, tags, continuous_expectations, discrete_expectations):\n obj += np.sum(Ez * self.log_likelihoods(data, input, mask, tag, x))\n return -obj / T\n\n # Optimize emissions log-likelihood\n self.params = optimizer(_objective, self.params,\n num_iters=maxiter,\n suppress_warnings=True,\n **kwargs)\n\n\n# Many emissions models start with a linear layer\nclass _LinearEmissions(Emissions):\n \"\"\"\n A simple linear mapping from continuous states x to data y.\n\n E[y | x] = Cx + d + Fu\n\n where C is an emission matrix, d is a bias, F an input matrix,\n and u is an input.\n \"\"\"\n def __init__(self, N, K, D, M=0, single_subspace=True):\n super(_LinearEmissions, self).__init__(N, K, D, M=M, single_subspace=single_subspace)\n\n # Initialize linear layer. Set _Cs to be private so that it can be\n # changed in subclasses.\n self._Cs = npr.randn(1, N, D) if single_subspace else npr.randn(K, N, D)\n self.Fs = npr.randn(1, N, M) if single_subspace else npr.randn(K, N, M)\n self.ds = npr.randn(1, N) if single_subspace else npr.randn(K, N)\n\n @property\n def Cs(self):\n return self._Cs\n\n @Cs.setter\n def Cs(self, value):\n K, N, D = self.K, self.N, self.D\n assert value.shape == (1, N, D) if self.single_subspace else (K, N, D)\n self._Cs = value\n\n @property\n def params(self):\n return self.Cs, self.Fs, self.ds\n\n @params.setter\n def params(self, value):\n self.Cs, self.Fs, self.ds = value\n\n def permute(self, perm):\n if not self.single_subspace:\n self.Cs = self.Cs[perm]\n self.Fs = self.Fs[perm]\n self.ds = self.ds[perm]\n\n def _invert(self, data, input=None, mask=None, tag=None):\n \"\"\"\n Approximate invert the linear emission model with the pseudoinverse\n\n y = Cx + d + noise; C orthogonal.\n xhat = (C^T C)^{-1} C^T (y-d)\n \"\"\"\n assert self.single_subspace, \"Can only invert with a single emission model\"\n\n C, F, d = self.Cs[0], self.Fs[0], self.ds[0]\n C_pseudoinv = np.linalg.solve(C.T.dot(C), C.T).T\n\n # Account for the bias\n bias = input.dot(F.T) + d\n\n if not np.all(mask):\n data = interpolate_data(data, mask)\n # We would like to find the PCA coordinates in the face of missing data\n # To do so, alternate between running PCA and imputing the missing entries\n for itr in range(25):\n mu = (data - bias).dot(C_pseudoinv)\n data[:, ~mask[0]] = (mu.dot(C.T) + bias)[:, ~mask[0]]\n\n # Project data to get the mean\n return (data - bias).dot(C_pseudoinv)\n\n def forward(self, x, input, tag):\n return np.matmul(self.Cs[None, ...], x[:, None, :, None])[:, :, :, 0] \\\n + np.matmul(self.Fs[None, ...], input[:, None, :, None])[:, :, :, 0] \\\n + self.ds\n\n @ensure_args_are_lists\n def _initialize_with_pca(self, datas, inputs=None, masks=None, tags=None, num_iters=20):\n Keff = 1 if self.single_subspace else self.K\n\n # First solve a linear regression for data given input\n if self.M > 0:\n from sklearn.linear_model import LinearRegression\n lr = LinearRegression(fit_intercept=False)\n lr.fit(np.vstack(inputs), np.vstack(datas))\n self.Fs = np.tile(lr.coef_[None, :, :], (Keff, 1, 1))\n\n # Compute residual after accounting for input\n resids = [data - np.dot(input, self.Fs[0].T) for data, input in zip(datas, inputs)]\n\n # Run PCA to get a linear embedding of the data\n pca, xs, ll = pca_with_imputation(self.D, resids, masks, num_iters=num_iters)\n\n self.Cs = np.tile(pca.components_.T[None, :, :], (Keff, 1, 1))\n self.ds = np.tile(pca.mean_[None, :], (Keff, 1))\n\n return pca\n\n\nclass _OrthogonalLinearEmissions(_LinearEmissions):\n \"\"\"\n A linear emissions matrix constrained such that the emissions matrix\n is orthogonal. Use the rational Cayley transform to parameterize\n the set of orthogonal emission matrices. See\n https://pubs.acs.org/doi/pdf/10.1021/acs.jpca.5b02015\n for a derivation of the rational Cayley transform.\n \"\"\"\n def __init__(self, N, K, D, M=0, single_subspace=True):\n super(_OrthogonalLinearEmissions, self).__init__(N, K, D, M=M, single_subspace=single_subspace)\n\n # Initialize linear layer\n assert N > D\n self._Ms = npr.randn(1, D, D) if single_subspace else npr.randn(K, D, D)\n self._As = npr.randn(1, N-D, D) if single_subspace else npr.randn(K, N-D, D)\n self.Fs = npr.randn(1, N, M) if single_subspace else npr.randn(K, N, M)\n self.ds = npr.randn(1, N) if single_subspace else npr.randn(K, N)\n\n # Set the emission matrix to be a random orthogonal matrix\n C0 = npr.randn(1, N, D) if single_subspace else npr.randn(K, N, D)\n for k in range(C0.shape[0]):\n C0[k] = np.linalg.svd(C0[k], full_matrices=False)[0]\n self.Cs = C0\n\n @property\n def Cs(self):\n D = self.D\n T = lambda X: np.swapaxes(X, -1, -2)\n\n Bs = 0.5 * (self._Ms - T(self._Ms)) # Bs is skew symmetric\n Fs = np.matmul(T(self._As), self._As) - Bs\n trm1 = np.concatenate((np.eye(D) - Fs, 2 * self._As), axis=1)\n trm2 = np.eye(D) + Fs\n Cs = T(np.linalg.solve(T(trm2), T(trm1)))\n assert np.allclose(\n np.matmul(T(Cs), Cs),\n np.tile(np.eye(D)[None, :, :], (Cs.shape[0], 1, 1))\n )\n return Cs\n\n @Cs.setter\n def Cs(self, value):\n N, D = self.N, self.D\n T = lambda X: np.swapaxes(X, -1, -2)\n\n # Make sure value is the right shape and orthogonal\n Keff = 1 if self.single_subspace else self.K\n assert value.shape == (Keff, N, D)\n assert np.allclose(\n np.matmul(T(value), value),\n np.tile(np.eye(D)[None, :, :], (Keff, 1, 1))\n )\n\n Q1s, Q2s = value[:, :D, :], value[:, D:, :]\n Fs = T(np.linalg.solve(T(np.eye(D) + Q1s), T(np.eye(D) - Q1s)))\n # Bs = 0.5 * (T(Fs) - Fs) = 0.5 * (self._Ms - T(self._Ms)) -> _Ms = T(Fs)\n self._Ms = T(Fs)\n self._As = 0.5 * np.matmul(Q2s, np.eye(D) + Fs)\n assert np.allclose(self.Cs, value)\n\n @property\n def params(self):\n return self._As, self._Ms, self.Fs, self.ds\n\n @params.setter\n def params(self, value):\n self._As, self._Ms, self.Fs, self.ds = value\n\n def permute(self, perm):\n if not self.single_subspace:\n self._As = self._As[perm]\n self._Ms = self._Ms[perm]\n self.Fs = self.Fs[perm]\n self.ds = self.ds[perm]\n\n\n# Sometimes we just want a bit of additive noise on the observations\nclass _IdentityEmissions(Emissions):\n def __init__(self, N, K, D, M=0, single_subspace=True):\n super(_IdentityEmissions, self).__init__(N, K, D, M=M, single_subspace=single_subspace)\n assert N == D\n\n @property\n def params(self):\n return ()\n\n @params.setter\n def params(self, value):\n pass\n\n def forward(self, x, input, tag):\n return x[:, None, :]\n\n def _invert(self, data, input=None, mask=None, tag=None):\n \"\"\"\n Inverse is just the data\n \"\"\"\n return np.copy(data)\n\n\n# Allow general nonlinear emission models with neural networks\nclass _NeuralNetworkEmissions(Emissions):\n def __init__(self, N, K, D, M=0, hidden_layer_sizes=(50,), single_subspace=True):\n assert single_subspace, \"_NeuralNetworkEmissions only supports `single_subspace=True`\"\n super(_NeuralNetworkEmissions, self).__init__(N, K, D, M=M, single_subspace=True)\n\n # Initialize the neural network weights\n assert N > D\n layer_sizes = (D + M,) + hidden_layer_sizes + (N,)\n self.weights = [npr.randn(m, n) for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\n self.biases = [npr.randn(n) for n in layer_sizes[1:]]\n\n @property\n def params(self):\n return self.weights, self.biases\n\n @params.setter\n def params(self, value):\n self.weights, self.biases = value\n\n def permute(self, perm):\n pass\n\n def forward(self, x, input, tag):\n inputs = np.column_stack((x, input))\n for W, b in zip(self.weights, self.biases):\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs[:, None, :]\n\n def _invert(self, data, input=None, mask=None, tag=None):\n \"\"\"\n Inverse is... who knows!\n \"\"\"\n return npr.randn(data.shape[0], self.D)\n\n\n# Observation models for SLDS\nclass _GaussianEmissionsMixin(object):\n def __init__(self, N, K, D, M=0, single_subspace=True, **kwargs):\n super(_GaussianEmissionsMixin, self).__init__(N, K, D, M=M, single_subspace=single_subspace, **kwargs)\n self.inv_etas = -1 + npr.randn(1, N) if single_subspace else npr.randn(K, N)\n\n @property\n def params(self):\n return super(_GaussianEmissionsMixin, self).params + (self.inv_etas,)\n\n @params.setter\n def params(self, value):\n self.inv_etas = value[-1]\n super(_GaussianEmissionsMixin, self.__class__).params.fset(self, value[:-1])\n\n def permute(self, perm):\n super(_GaussianEmissionsMixin, self).permute(perm)\n if not self.single_subspace:\n self.inv_etas = self.inv_etas[perm]\n\n def log_likelihoods(self, data, input, mask, tag, x):\n mus = self.forward(x, input, tag)\n etas = np.exp(self.inv_etas)\n lls = -0.5 * np.log(2 * np.pi * etas) - 0.5 * (data[:, None, :] - mus)**2 / etas\n return np.sum(lls * mask[:, None, :], axis=2)\n\n def invert(self, data, input=None, mask=None, tag=None):\n return self._invert(data, input=input, mask=mask, tag=tag)\n\n def sample(self, z, x, input=None, tag=None):\n T = z.shape[0]\n z = np.zeros_like(z, dtype=int) if self.single_subspace else z\n mus = self.forward(x, input, tag)\n etas = np.exp(self.inv_etas)\n return mus[np.arange(T), z, :] + np.sqrt(etas[z]) * npr.randn(T, self.N)\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n mus = self.forward(variational_mean, input, tag)\n return mus[:, 0, :] if self.single_subspace else np.sum(mus * expected_states[:,:,None], axis=1)\n\n\nclass GaussianEmissions(_GaussianEmissionsMixin, _LinearEmissions):\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n # datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n # pca = self._initialize_with_pca(datas, inputs=inputs, masks=masks, tags=tags)\n # self.inv_etas[:,...] = np.log(pca.noise_variance_)\n pass\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n # Return (T, D, D) array of blocks for the diagonal of the Hessian\n T, D = data.shape\n if self.single_subspace:\n block = -1.0 * self.Cs[0].T@np.diag( 1.0 / np.exp(self.inv_etas[0]) )@self.Cs[0]\n hess = np.tile(block[None,:,:], (T, 1, 1))\n else:\n blocks = np.array([-1.0 * C.T@np.diag(1.0/np.exp(inv_eta))@C\n for C, inv_eta in zip(self.Cs, self.inv_etas)])\n hess = np.sum(Ez[:,:,None,None] * blocks, axis=1)\n return -1 * hess\n\n def m_step(self, discrete_expectations, continuous_expectations,\n datas, inputs, masks, tags,\n optimizer=\"bfgs\", maxiter=100, **kwargs):\n\n if self.single_subspace and np.all(masks):\n # Return exact m-step updates for C, F, d, and inv_etas\n # stack across all datas\n x = np.vstack(continuous_expectations)\n u = np.vstack(inputs)\n y = np.vstack(datas)\n T, D = np.shape(x)\n xb = np.hstack((np.ones((T,1)),x,u)) # design matrix\n params = np.linalg.lstsq(xb.T@xb, xb.T@y, rcond=None)[0].T\n self.ds = params[:,0].reshape((1,self.N))\n self.Cs = params[:,1:D+1].reshape((1,self.N,self.D))\n if self.M > 0:\n self.Fs = params[:,D+1:].reshape((1,self.N,self.M))\n mu = np.dot(xb, params.T)\n Sigma = (y-mu).T@(y-mu) / T\n self.inv_etas = np.log(np.diag(Sigma)).reshape((1,self.N))\n else:\n Emissions.m_step(self, discrete_expectations, continuous_expectations,\n datas, inputs, masks, tags,\n optimizer=optimizer, maxiter=maxiter, **kwargs)\n\n\nclass GaussianOrthogonalEmissions(_GaussianEmissionsMixin, _OrthogonalLinearEmissions):\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n pca = self._initialize_with_pca(datas, inputs=inputs, masks=masks, tags=tags)\n self.inv_etas[:,...] = np.log(pca.noise_variance_)\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n # Return (T, D, D) array of blocks for the diagonal of the Hessian\n T, D = data.shape\n if self.single_subspace:\n block = -1.0 * self.Cs[0].T@np.diag( 1.0 / np.exp(self.inv_etas[0]) )@self.Cs[0]\n hess = np.tile(block[None,:,:], (T, 1, 1))\n else:\n blocks = np.array([-1.0 * C.T@np.diag(1.0/np.exp(inv_eta))@C\n for C, inv_eta in zip(self.Cs, self.inv_etas)])\n hess = np.sum(Ez[:,:,None,None] * blocks, axis=1)\n return -1 * hess\n\n\nclass GaussianIdentityEmissions(_GaussianEmissionsMixin, _IdentityEmissions):\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n # Return (T, D, D) array of blocks for the diagonal of the Hessian\n T, D = data.shape\n if self.single_subspace:\n block = -1.0 * np.diag(1.0 / np.exp(self.inv_etas[0]))\n hess = np.tile(block[None, :, :], (T, 1, 1))\n else:\n raise NotImplementedError\n\n return -1 * hess\n\n\nclass GaussianNeuralNetworkEmissions(_GaussianEmissionsMixin, _NeuralNetworkEmissions):\n pass\n\n\nclass _StudentsTEmissionsMixin(object):\n def __init__(self, N, K, D, M=0, single_subspace=True, **kwargs):\n super(_StudentsTEmissionsMixin, self).__init__(N, K, D, M, single_subspace=single_subspace, **kwargs)\n self.inv_etas = -4 + npr.randn(1, N) if single_subspace else npr.randn(K, N)\n self.inv_nus = np.log(4) * np.ones((1, N)) if single_subspace else np.log(4) * np.ones(K, N)\n\n @property\n def params(self):\n return super(_StudentsTEmissionsMixin, self).params + (self.inv_etas, self.inv_nus)\n\n @params.setter\n def params(self, value):\n super(_StudentsTEmissionsMixin, self.__class__).params.fset(self, value[:-2])\n\n def permute(self, perm):\n super(_StudentsTEmissionsMixin, self).permute(perm)\n if not self.single_subspace:\n self.inv_etas = self.inv_etas[perm]\n self.inv_nus = self.inv_nus[perm]\n\n def log_likelihoods(self, data, input, mask, tag, x):\n etas, nus = np.exp(self.inv_etas), np.exp(self.inv_nus)\n mus = self.forward(x, input, tag)\n return independent_studentst_logpdf(data[:, None, :],\n mus, etas, nus, mask=mask[:, None, :])\n\n def invert(self, data, input=None, mask=None, tag=None):\n return self._invert(data, input=input, mask=mask, tag=tag)\n\n def sample(self, z, x, input=None, tag=None):\n T = z.shape[0]\n z = np.zeros_like(z, dtype=int) if self.single_subspace else z\n mus = self.forward(x, input, tag)\n nus = np.exp(self.inv_nus)\n etas = np.exp(self.inv_etas)\n taus = npr.gamma(nus[z] / 2.0, 2.0 / nus[z])\n return mus[np.arange(T), z, :] + np.sqrt(etas[z] / taus) * npr.randn(T, self.N)\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n mus = self.forward(variational_mean, input, tag)\n return mus[:,0,:] if self.single_subspace else np.sum(mus * expected_states[:,:,None], axis=1)\n\n\nclass StudentsTEmissions(_StudentsTEmissionsMixin, _LinearEmissions):\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n pca = self._initialize_with_pca(datas, inputs=inputs, masks=masks, tags=tags)\n self.inv_etas[:,...] = np.log(pca.noise_variance_)\n\n\nclass StudentsTOrthogonalEmissions(_StudentsTEmissionsMixin, _OrthogonalLinearEmissions):\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n pca = self._initialize_with_pca(datas, inputs=inputs, masks=masks, tags=tags)\n self.inv_etas[:,...] = np.log(pca.noise_variance_)\n\n\nclass StudentsTIdentityEmissions(_StudentsTEmissionsMixin, _IdentityEmissions):\n pass\n\n\nclass StudentsTNeuralNetworkEmissions(_StudentsTEmissionsMixin, _NeuralNetworkEmissions):\n pass\n\n\nclass _BernoulliEmissionsMixin(object):\n def __init__(self, N, K, D, M=0, single_subspace=True, link=\"logit\", **kwargs):\n super(_BernoulliEmissionsMixin, self).__init__(N, K, D, M, single_subspace=single_subspace, **kwargs)\n\n self.link_name = link\n mean_functions = dict(\n logit=logistic\n )\n self.mean = mean_functions[link]\n\n link_functions = dict(\n logit=logit\n )\n self.link = link_functions[link]\n\n def log_likelihoods(self, data, input, mask, tag, x):\n assert data.dtype == bool or (data.dtype == int and data.min() >= 0 and data.max() <= 1)\n assert self.link_name == \"logit\", \"Log likelihood is only implemented for logit link.\"\n logit_ps = self.forward(x, input, tag)\n mask = np.ones_like(data, dtype=bool) if mask is None else mask\n return bernoulli_logpdf(data[:, None, :], logit_ps, mask=mask[:, None, :])\n\n def invert(self, data, input=None, mask=None, tag=None):\n yhat = self.link(np.clip(data, .1, .9))\n return self._invert(yhat, input=input, mask=mask, tag=tag)\n\n def sample(self, z, x, input=None, tag=None):\n T = z.shape[0]\n z = np.zeros_like(z, dtype=int) if self.single_subspace else z\n ps = self.mean(self.forward(x, input, tag))\n return (npr.rand(T, self.N) < ps[np.arange(T), z,:]).astype(int)\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n ps = self.mean(self.forward(variational_mean, input, tag))\n return ps[:,0,:] if self.single_subspace else np.sum(ps * expected_states[:,:,None], axis=1)\n\n\nclass BernoulliEmissions(_BernoulliEmissionsMixin, _LinearEmissions):\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n yhats = [self.link(np.clip(d, .1, .9)) for d in datas]\n self._initialize_with_pca(yhats, inputs=inputs, masks=masks, tags=tags)\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n \"\"\"\n d/dx (y - p) * C\n = -dpsi/dx (dp/d\\psi) C\n = -C p (1-p) C\n \"\"\"\n if self.single_subspace is False:\n raise Exception(\"Multiple subspaces are not supported for this Emissions class.\")\n assert self.link_name == \"logit\"\n psi = self.forward(x, input, tag)[:, 0, :]\n p = self.mean(psi)\n dp_dpsi = p * (1 - p)\n hess = np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.Cs[0], self.Cs[0])\n return -1 * hess\n\n\nclass BernoulliOrthogonalEmissions(_BernoulliEmissionsMixin, _OrthogonalLinearEmissions):\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n yhats = [self.link(np.clip(d, .1, .9)) for d in datas]\n self._initialize_with_pca(yhats, inputs=inputs, masks=masks, tags=tags)\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n \"\"\"\n d/dx (y - p) * C\n = -dpsi/dx (dp/d\\psi) C\n = -C p (1-p) C\n \"\"\"\n if self.single_subspace is False:\n raise Exception(\"Multiple subspaces are not supported for this Emissions class.\")\n assert self.link_name == \"logit\"\n psi = self.forward(x, input, tag)[:, 0, :]\n p = self.mean(psi)\n dp_dpsi = p * (1 - p)\n hess = np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.Cs[0], self.Cs[0])\n return -1 * hess\n\n\nclass BernoulliIdentityEmissions(_BernoulliEmissionsMixin, _IdentityEmissions):\n pass\n\n\nclass BernoulliNeuralNetworkEmissions(_BernoulliEmissionsMixin, _NeuralNetworkEmissions):\n pass\n\n\nclass _PoissonEmissionsMixin(object):\n def __init__(self, N, K, D, M=0, single_subspace=True, link=\"softplus\", bin_size=1.0, **kwargs):\n\n super(_PoissonEmissionsMixin, self).__init__(N, K, D, M, single_subspace=single_subspace, **kwargs)\n\n self.link_name = link\n self.bin_size = bin_size\n mean_functions = dict(\n log=self._log_mean,\n softplus=self._softplus_mean\n )\n self.mean = mean_functions[link]\n link_functions = dict(\n log=self._log_link,\n softplus=self._softplus_link\n )\n self.link = link_functions[link]\n\n # Set the bias to be small if using log link\n if link == \"log\":\n self.ds = -3 + .5 * npr.randn(1, N) if single_subspace else npr.randn(K, N)\n\n def _log_mean(self, x):\n return np.exp(x) * self.bin_size\n\n def _softplus_mean(self, x):\n return softplus(x) * self.bin_size\n\n def _log_link(self, rate):\n return np.log(rate) - np.log(self.bin_size)\n\n def _softplus_link(self, rate):\n return inv_softplus(rate / self.bin_size)\n\n def log_likelihoods(self, data, input, mask, tag, x):\n assert data.dtype == int\n lambdas = self.mean(self.forward(x, input, tag))\n mask = np.ones_like(data, dtype=bool) if mask is None else mask\n lls = -gammaln(data[:,None,:] + 1) -lambdas + data[:,None,:] * np.log(lambdas)\n return np.sum(lls * mask[:, None, :], axis=2)\n\n def invert(self, data, input=None, mask=None, tag=None):\n yhat = self.link(np.clip(data, .1, np.inf))\n return self._invert(yhat, input=input, mask=mask, tag=tag)\n\n def sample(self, z, x, input=None, tag=None):\n T = z.shape[0]\n z = np.zeros_like(z, dtype=int) if self.single_subspace else z\n lambdas = self.mean(self.forward(x, input, tag))\n y = npr.poisson(lambdas[np.arange(T), z, :])\n return y\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n lambdas = self.mean(self.forward(variational_mean, input, tag))\n return lambdas[:,0,:] if self.single_subspace else np.sum(lambdas * expected_states[:,:,None], axis=1)\n\nclass PoissonEmissions(_PoissonEmissionsMixin, _LinearEmissions):\n def __init__(self, N, K, D, M=0, single_subspace=True, **kwargs):\n super(PoissonEmissions, self).__init__(N, K, D, M=M, single_subspace=single_subspace, **kwargs)\n # Scale down the measurement and control matrices so that\n # rate params don't explode when exponentiated.\n if self.link_name == \"log\":\n self.Cs /= np.exp(np.linalg.norm(self.Cs, axis=2)[:,:,None])\n self.Fs /= np.exp(np.linalg.norm(self.Fs, axis=2)[:,:,None])\n\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n yhats = [self.link(np.clip(d, .1, np.inf)) for d in datas]\n self._initialize_with_pca(yhats, inputs=inputs, masks=masks, tags=tags)\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n \"\"\"\n d/dx log p(y | x) = d/dx [y * (Cx + Fu + d) - exp(Cx + Fu + d)\n = y * C - lmbda * C\n = (y - lmbda) * C\n\n d/dx (y - lmbda)^T C = d/dx -exp(Cx + Fu + d)^T C\n = -C^T exp(Cx + Fu + d)^T C\n \"\"\"\n if self.single_subspace is False:\n raise Exception(\"Multiple subspaces are not supported for this Emissions class.\")\n\n if self.link_name == \"log\":\n lambdas = self.mean(self.forward(x, input, tag))\n hess = np.einsum('tn, ni, nj ->tij', -lambdas[:, 0, :], self.Cs[0], self.Cs[0])\n return -1 * hess\n\n elif self.link_name == \"softplus\":\n lambdas = self.mean(self.forward(x, input, tag))[:, 0, :] / self.bin_size\n expterms = np.exp(-np.dot(x,self.Cs[0].T)-np.dot(input,self.Fs[0].T)-self.ds[0])\n diags = (data / lambdas * (expterms - 1.0 / lambdas) - expterms * self.bin_size) / (1.0+expterms)**2\n hess = np.einsum('tn, ni, nj ->tij', diags, self.Cs[0], self.Cs[0])\n return -1 * hess\n\n else:\n raise Exception(\"No Hessian calculation for link: {}\".format(self.link_name))\n\n\nclass PoissonOrthogonalEmissions(_PoissonEmissionsMixin, _OrthogonalLinearEmissions):\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None):\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n yhats = [self.link(np.clip(d, .1, np.inf)) for d in datas]\n self._initialize_with_pca(yhats, inputs=inputs, masks=masks, tags=tags)\n\n def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x, Ez):\n \"\"\"\n d/dx log p(y | x) = d/dx [y * (Cx + Fu + d) - exp(Cx + Fu + d)\n = y * C - lmbda * C\n = (y - lmbda) * C\n\n d/dx (y - lmbda)^T C = d/dx -exp(Cx + Fu + d)^T C\n = -C^T exp(Cx + Fu + d)^T C\n \"\"\"\n if self.single_subspace is False:\n raise Exception(\"Multiple subspaces are not supported for this Emissions class.\")\n\n if self.link_name == \"log\":\n lambdas = self.mean(self.forward(x, input, tag))\n hess = np.einsum('tn, ni, nj ->tij', -lambdas[:, 0, :], self.Cs[0], self.Cs[0])\n return -1 * hess\n\n elif self.link_name == \"softplus\":\n lambdas = self.mean(self.forward(x, input, tag))[:, 0, :] / self.bin_size\n expterms = np.exp(-np.dot(x,self.Cs[0].T)-np.dot(input,self.Fs[0].T)-self.ds[0])\n diags = (data / lambdas * (expterms - 1.0 / lambdas) - expterms * self.bin_size) / (1.0+expterms)**2\n hess = np.einsum('tn, ni, nj ->tij', diags, self.Cs[0], self.Cs[0])\n return -1 * hess\n\n else:\n raise Exception(\"No Hessian calculation for link: {}\".format(self.link_name))\n\nclass PoissonIdentityEmissions(_PoissonEmissionsMixin, _IdentityEmissions):\n pass\n\n\nclass PoissonNeuralNetworkEmissions(_PoissonEmissionsMixin, _NeuralNetworkEmissions):\n pass\n\n\nclass _AutoRegressiveEmissionsMixin(object):\n \"\"\"\n Include past observations as a covariate in the SLDS emissions.\n The AR model is restricted to be diagonal.\n \"\"\"\n def __init__(self, N, K, D, M=0, single_subspace=True, **kwargs):\n super(_AutoRegressiveEmissionsMixin, self).__init__(N, K, D, M=M, single_subspace=single_subspace, **kwargs)\n\n # Initialize AR component of the model\n self.As = npr.randn(1, N) if single_subspace else npr.randn(K, N)\n self.inv_etas = -4 + npr.randn(1, N) if single_subspace else npr.randn(K, N)\n\n # Shrink the eigenvalues of the A matrices to avoid instability.\n # Since the As are diagonal, this is just a clip.\n self.As = np.clip(self.As, -1.0 + 1e-8, 1 - 1e-8)\n\n @property\n def params(self):\n return super(_AutoRegressiveEmissionsMixin, self).params + (self.As, self.inv_etas)\n\n @params.setter\n def params(self, value):\n self.As, self.inv_etas = value[-2:]\n super(_AutoRegressiveEmissionsMixin, self.__class__).params.fset(self, value[:-2])\n\n def permute(self, perm):\n super(_AutoRegressiveEmissionsMixin, self).permute(perm)\n if not self.single_subspace:\n self.As = self.inv_nus[perm]\n self.inv_etas = self.inv_etas[perm]\n\n def log_likelihoods(self, data, input, mask, tag, x):\n mus = self.forward(x, input, tag)\n pad = np.zeros((1, 1, self.N)) if self.single_subspace else np.zeros((1, self.K, self.N))\n mus = mus + np.concatenate((pad, self.As[None, :, :] * data[:-1, None, :]))\n\n etas = np.exp(self.inv_etas)\n lls = -0.5 * np.log(2 * np.pi * etas) - 0.5 * (data[:, None, :] - mus)**2 / etas\n return np.sum(lls * mask[:, None, :], axis=2)\n\n def invert(self, data, input=None, mask=None, tag=None):\n assert self.single_subspace, \"Can only invert with a single emission model\"\n pad = np.zeros((1, self.N))\n resid = data - np.concatenate((pad, self.As * data[:-1]))\n return self._invert(resid, input=input, mask=mask, tag=tag)\n\n def sample(self, z, x, input=None, tag=None):\n T, N = z.shape[0], self.N\n z = np.zeros_like(z, dtype=int) if self.single_subspace else z\n mus = self.forward(x, input, tag)\n etas = np.exp(self.inv_etas)\n\n y = np.zeros((T, N))\n y[0] = mus[0, z[0], :] + np.sqrt(etas[z[0]]) * npr.randn(N)\n for t in range(1, T):\n y[t] = mus[t, z[t], :] + self.As[z[t]] * y[t-1] + np.sqrt(etas[z[0]]) * npr.randn(N)\n return y\n\n def smooth(self, expected_states, variational_mean, data, input=None, mask=None, tag=None):\n mus = self.forward(variational_mean, input, tag)\n mus[1:] += self.As[None, :, :] * data[:-1, None, :]\n return mus[:,0,:] if self.single_subspace else np.sum(mus * expected_states, axis=1)\n\n\nclass AutoRegressiveEmissions(_AutoRegressiveEmissionsMixin, _LinearEmissions):\n @ensure_args_are_lists\n def initialize(self, datas, inputs=None, masks=None, tags=None, num_em_iters=25):\n # Initialize the subspace with PCA\n from sklearn.decomposition import PCA\n datas = [interpolate_data(data, mask) for data, mask in zip(datas, masks)]\n\n # Solve a linear regression for the AR coefficients.\n from sklearn.linear_model import LinearRegression\n for n in range(self.N):\n lr = LinearRegression()\n lr.fit(np.concatenate([d[:-1, n] for d in datas])[:,None],\n np.concatenate([d[1:, n] for d in datas]))\n self.As[:,n] = lr.coef_[0]\n\n # Compute the residuals of the AR model\n pad = np.zeros((1,self.N))\n mus = [np.concatenate((pad, self.As[0] * d[:-1])) for d in datas]\n residuals = [data - mu for data, mu in zip(datas, mus)]\n\n # Run PCA on the residuals to initialize C and d\n pca = self._initialize_with_pca(residuals, inputs=inputs, masks=masks, tags=tags)\n self.inv_etas[:,...] = np.log(pca.noise_variance_)\n\n\nclass AutoRegressiveOrthogonalEmissions(_AutoRegressiveEmissionsMixin, _OrthogonalLinearEmissions):\n pass\n\n\nclass AutoRegressiveIdentityEmissions(_AutoRegressiveEmissionsMixin, _IdentityEmissions):\n pass\n\n\nclass AutoRegressiveNeuralNetworkEmissions(_AutoRegressiveEmissionsMixin, _NeuralNetworkEmissions):\n pass\n"
] |
[
[
"sklearn.linear_model.LinearRegression"
]
] |
mihaTrajbaric/application-optimisation
|
[
"45767c8b10c18645f71d96e275165c68c2479117"
] |
[
"use-cases/SnowUC-SkylineExtraction/peaklens-gpuopt_training.py"
] |
[
"# %%\n\"\"\"\n# Notebook to train PeakLens original model \n\"\"\"\n\n# %%\nimport sys\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport glob\nimport multiprocessing\nimport time \n\n# %%\nprint(tf.__version__)\n\n# %%\n\"\"\"\n## Training parameters \n\"\"\"\n\n# %%\nMODEL_NAME = \"PeakLens_original\"\n#DATASET_PATH = \"/mnt/nfs/home/hpccrnm/soda/data/skyline/skyline-extraction-patches-dataset\"\nDATASET_PATH = \"/workspace/hpccrnm/skyline/skyline-extraction-patches-dataset\"\n\n# only grow the memory usage as it is needed by the process \ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n print(gpu)\n except RuntimeError as e:\n print(e)\n exit()\n\n# %%\nBATCH_SIZE = 256\nEPOCHS = 100\nLEARNING_RATE = 0.0001\nEARLY_STOPPING = 10\n\n# %%\n\"\"\"\n## Methods to load the dataset\n\"\"\"\n\n# %%\ndef load_image_and_label(path, label):\n image = tf.io.read_file(path)\n image_decoded = tf.image.decode_jpeg(image)\n image_decoded = tf.image.random_flip_left_right(image_decoded) \n return image_decoded, label\n\ndef get_dataset_split(split):\n positive_paths = glob.glob(\"{}/{}/patches/positive/*.jpg\".format(DATASET_PATH, split))\n negative_paths = glob.glob(\"{}/{}/patches/negative/*.jpg\".format(DATASET_PATH, split))\n \n positive_labels = [1] * len(positive_paths)\n negative_labels = [0] * len(negative_paths)\n \n paths = positive_paths + negative_paths\n labels = positive_labels + negative_labels\n\n tf_paths = tf.constant(paths)\n tf_labels = tf.constant(labels)\n\n dataset = tf.data.Dataset.from_tensor_slices((tf_paths, tf_labels))\n dataset = dataset.map(load_image_and_label, num_parallel_calls=multiprocessing.cpu_count())\\\n .shuffle(len(paths)).batch(BATCH_SIZE).prefetch(2)\n dataset = dataset.apply(tf.data.experimental.prefetch_to_device('/gpu:0'))\n return dataset, len(paths)//BATCH_SIZE\n\n# %%\n\"\"\"\n## Load dataset splits\n\"\"\"\n\n# %%\ntraining_dataset, training_steps = get_dataset_split(\"training\")\nprint(\"Training split loaded ({:,} images).\".format(training_steps*BATCH_SIZE))\n\nvalidation_dataset, validation_steps = get_dataset_split(\"validation\")\nprint(\"Validation split loaded ({:,} images).\".format(validation_steps*BATCH_SIZE))\n\ntest_dataset, test_steps = get_dataset_split(\"testing\")\nprint(\"Test split loaded ({:,} images).\".format(test_steps*BATCH_SIZE))\n\n# %%\n\"\"\"\n## Model definition \n\"\"\"\n\n# %%\nfrom tensorflow.keras.models import Sequential\n\nclass DeepNN(Sequential):\n \n def __init__(self, input_shape, lr_param):\n super().__init__()\n \n conv1 = tf.keras.layers.Conv2D(filters=20,kernel_size=[6, 6], input_shape=input_shape)\n self.add(conv1)\n \n pool1 = tf.keras.layers.MaxPooling2D( pool_size=[2, 2], strides=2)\n self.add(pool1)\n \n conv2 = tf.keras.layers.Conv2D(filters=50,kernel_size=[5, 5])\n self.add(conv2)\n\n pool2 = tf.keras.layers.MaxPooling2D( pool_size=[2, 2], strides=2)\n self.add(pool2)\n \n conv3 = tf.keras.layers.Conv2D(filters=500, kernel_size=[4, 4])\n self.add(conv3)\n \n relu = tf.keras.layers.ReLU()\n self.add(relu)\n \n conv4 = tf.keras.layers.Conv2D(filters=2, kernel_size=[1, 1])\n self.add(conv4)\n \n output = tf.keras.layers.Reshape([-1, 1 * 1 * 2])\n self.add(output)\n \n softmax = tf.keras.layers.Softmax()\n self.add(softmax)\n \n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_param)\n loss = tf.keras.losses.SparseCategoricalCrossentropy()\n self.compile(optimizer=optimizer,\n loss=loss,\n metrics=['accuracy'])\n \nmodel = DeepNN((29, 29, 3), LEARNING_RATE)\nmodel.summary()\n\n# %%\n\"\"\"\n## Define Callbacks for training\n\"\"\"\n\n# %%\nimport shutil\nfrom tensorflow.keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint\n\n# Early Stopping\nearly_stopping_callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=EARLY_STOPPING,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=False)\n\n# TensorBoard\nlog_dir='./graphs/{}/train'.format(MODEL_NAME)\ntensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# Checkpoint saver\ncheckpoint_path = \"./checkpoint/original/weights.{epoch:02d}-{val_loss:.2f}.hdf5\"\ncheckpoint_callback = ModelCheckpoint(checkpoint_path, monitor='val_loss', verbose=1,\n save_best_only=True, save_weights_only=False, mode='auto', save_freq='epoch')\n\n# %%\n\"\"\"\n# Training & Validation\n\"\"\"\n\n# %%\n\"\"\"\nIt is possible to visualize the training with tensorboard by executing the following command with the corresponding logdir path\n\"\"\"\n\n# %%\nprint(\"tensorboard --logdir={}\".format(log_dir))\n\n# %%\nstart = time.time()\nmodel.fit(training_dataset, \n epochs=EPOCHS, \n callbacks=[early_stopping_callback, tensorboard_callback, checkpoint_callback],\n verbose=1,\n validation_data=validation_dataset)\n\nelapsed = time.time() - start\nprint('Elapsed %.3f seconds.' % elapsed)\n\n# %%\n\"\"\"\n# Test\n\"\"\"\n\n# %%\nimport glob\nmodels = glob.glob(\"./checkpoint/original/*.hdf5\")\nmodels.sort()\nmodels\n\n# %%\n# Choose one from the previous\nmodel_path = './checkpoint/original/weights.01-0.18.hdf5'\n\n# %%\nfrom keras.models import load_model\n\n# Load the previously saved weights\ntesting_model = DeepNN((29, 29, 3), LEARNING_RATE)\ntesting_model.load_weights(model_path)\nloss, acc = testing_model.evaluate(test_dataset, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n# %%\n\"\"\"\n# Export to pb\n\"\"\"\n\n# %%\nfrom tensorflow.python.framework import convert_to_constants\n\ndef export_to_frozen_pb(model: tf.keras.models.Model, path: str) -> None:\n inference_func = tf.function(lambda input: model(input))\n\n concrete_func = inference_func.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))\n output_func = convert_to_constants.convert_variables_to_constants_v2(concrete_func)\n\n graph_def = output_func.graph.as_graph_def()\n graph_def.node[0].name = 'input'\n graph_def.node[-1].name = 'output'\n\n with open(path, 'wb') as freezed_pb:\n freezed_pb.write(graph_def.SerializeToString())\n print(\"Model saved at {}\".format(path))\n\n# %%\noutput_path = \"./protobufs/{}.pb\".format(MODEL_NAME)\n\nmodel_to_save = DeepNN((240, 320, 3), LEARNING_RATE)\nmodel_to_save.load_weights(model_path)\nexport_to_frozen_pb(model_to_save, output_path)\n\n"
] |
[
[
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.image.random_flip_left_right",
"tensorflow.io.read_file",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.image.decode_jpeg",
"tensorflow.keras.layers.Conv2D",
"tensorflow.constant",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.optimizers.Adam",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.TensorSpec",
"tensorflow.data.experimental.prefetch_to_device",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
justinjohn0306/uberduck-ml-dev
|
[
"439ec326eb7680c4fdd5ee97a09def8e355e0f7c"
] |
[
"uberduck_ml_dev/utils/utils.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.utils.ipynb (unless otherwise specified).\n\n__all__ = ['load_filepaths_and_text', 'window_sumsquare', 'griffin_lim', 'dynamic_range_compression',\n 'dynamic_range_decompression', 'to_gpu', 'get_mask_from_lengths', 'reduce_tensor', 'subsequent_mask',\n 'convert_pad_shape', 'sequence_mask', 'generate_path', 'slice_segments', 'rand_slice_segments',\n 'init_weights', 'get_padding', 'fused_add_tanh_sigmoid_multiply', 'clip_grad_value_', 'intersperse',\n 'intersperse_emphases']\n\n# Cell\n\nimport sys\nimport os\nimport soundfile as sf\nimport pandas as pd\n\nimport numpy as np\n\nimport soundfile as sf\nimport librosa\nfrom torch.nn import functional as F\n\n\ndef load_filepaths_and_text(filename: str, split: str = \"|\"):\n with open(filename, encoding=\"utf-8\") as f:\n filepaths_and_text = [line.strip().split(split) for line in f]\n return filepaths_and_text\n\n# Cell\n\nimport torch\nimport numpy as np\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\n\n\ndef window_sumsquare(\n window,\n n_frames,\n hop_length=200,\n win_length=800,\n n_fft=800,\n dtype=np.float32,\n norm=None,\n):\n \"\"\"\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n \"\"\"\n if win_length is None:\n win_length = n_fft\n\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]\n return x\n\n\ndef griffin_lim(magnitudes, stft_fn, n_iters=30):\n \"\"\"\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n \"\"\"\n\n angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))\n angles = angles.astype(np.float32)\n angles = torch.autograd.Variable(torch.from_numpy(angles))\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n\n for i in range(n_iters):\n _, angles = stft_fn.transform(signal)\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n return signal\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n \"\"\"\n PARAMS\n ------\n C: compression factor\n \"\"\"\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n \"\"\"\n PARAMS\n ------\n C: compression factor used to compress\n \"\"\"\n return torch.exp(x) / C\n\n# Cell\ndef to_gpu(x):\n x = x.contiguous()\n\n if torch.cuda.is_available():\n x = x.cuda(non_blocking=True)\n return torch.autograd.Variable(x)\n\n# Cell\n\n\ndef get_mask_from_lengths(lengths: torch.Tensor, max_len: int = 0):\n \"\"\"Return a mask matrix. Unmasked entires are true.\"\"\"\n if max_len == 0:\n max_len = int(torch.max(lengths).item())\n ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.long)\n mask = (ids < lengths.unsqueeze(1)).bool()\n return mask\n\n# Cell\nimport torch.distributed as dist\n\n\ndef reduce_tensor(tensor, n_gpus):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.ReduceOp.SUM)\n rt /= n_gpus\n return rt\n\n# Cell\ndef subsequent_mask(length):\n mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)\n return mask\n\n# Cell\ndef convert_pad_shape(pad_shape):\n \"\"\"Reverse, then flatten a list of lists.\"\"\"\n l = pad_shape[::-1]\n pad_shape = [item for sublist in l for item in sublist]\n return pad_shape\n\n# Cell\ndef sequence_mask(length, max_length=None):\n \"\"\"The same as get_mask_from_lengths\"\"\"\n if max_length is None:\n max_length = length.max()\n x = torch.arange(max_length, dtype=length.dtype, device=length.device)\n return x.unsqueeze(0) < length.unsqueeze(1)\n\n# Cell\ndef generate_path(duration, mask):\n \"\"\"\n duration: [b, 1, t_x]\n mask: [b, 1, t_y, t_x]\n \"\"\"\n device = duration.device\n\n b, _, t_y, t_x = mask.shape\n cum_duration = torch.cumsum(duration, -1)\n\n cum_duration_flat = cum_duration.view(b * t_x)\n path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)\n path = path.view(b, t_x, t_y)\n path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]\n path = path.unsqueeze(1).transpose(2, 3) * mask\n return path\n\n# Cell\n\n\ndef slice_segments(x, ids_str, segment_size=4):\n ret = torch.zeros_like(x[:, :, :segment_size])\n for i in range(x.size(0)):\n idx_str = ids_str[i]\n idx_end = idx_str + segment_size\n ret[i] = x[i, :, idx_str:idx_end]\n return ret\n\n\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\n b, d, t = x.size()\n if x_lengths is None:\n x_lengths = t\n ids_str_max = x_lengths - segment_size\n ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)\n ret = slice_segments(x, ids_str, segment_size)\n return ret, ids_str\n\n# Cell\ndef init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)\n\n# Cell\ndef get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)\n\n# Cell\n@torch.jit.script\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\n n_channels_int = n_channels[0]\n in_act = input_a + input_b\n t_act = torch.tanh(in_act[:, :n_channels_int, :])\n s_act = torch.sigmoid(in_act[:, n_channels_int:, :])\n acts = t_act * s_act\n return acts\n\n# Cell\ndef clip_grad_value_(parameters, clip_value, norm_type=2):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n norm_type = float(norm_type)\n if clip_value is not None:\n clip_value = float(clip_value)\n\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n if clip_value is not None:\n p.grad.data.clamp_(min=-clip_value, max=clip_value)\n total_norm = total_norm ** (1.0 / norm_type)\n return total_norm\n\n# Cell\n\n\ndef intersperse(lst, item):\n result = [item] * (len(lst) * 2 + 1)\n result[1::2] = lst\n return result\n\n\ndef intersperse_emphases(emphases):\n for n in range(len(emphases)):\n emphases[n][0] = 2 * emphases[n][0]\n emphases[n][1] = 2 * emphases[n][1] + 1\n return emphases"
] |
[
[
"torch.sigmoid",
"torch.rand",
"torch.arange",
"numpy.zeros",
"torch.autograd.Variable",
"torch.max",
"torch.clamp",
"torch.from_numpy",
"torch.ones",
"torch.cuda.is_available",
"scipy.signal.get_window",
"torch.distributed.all_reduce",
"torch.zeros_like",
"torch.tanh",
"torch.exp",
"torch.cumsum"
]
] |
neurorishika/babySSH
|
[
"9239ce7243250bdd0f5f57e8919c0b15dca41c5a"
] |
[
"babydes.py"
] |
[
"import numpy as np\nfrom util import *\n\n'''\nServer has list of authorized public key.\nClient has public key and private key (unique to client).\n\nServer has to make sure that the client who is in contact is authorized.\n\nAuthentication:\n---------------\n - Server selects a message.\n - Encrypts with public key.\n - Client decrypts it and adds the session key (generated by Diffie Hellman) and then hashes it.\n - Sends the hash encrypted using the algorithm.\n - Server checks if the hash is correct (since it has the original message and session key).\n\nRSA:\n----\nHave to make public and private key for client side only.\n - Select two huge prime numbers (?? digits), lets call them p and q. Let n = pq.\n - Select e such that gcd(e, (p-1)(q-1)) = 1\n - Compute d such that de = 1 (mod (p-1)(q-1))\n - Client has (n, e) as public key and (n, d) as private key.\n - Encryption of message m: c = m^e (mod n)\n - Decryption of ciphertext c: m = c^d (mod n)\n'''\n\n'''\n-------------------------------------------------------------------------------------------------------------------------\nPreviously Developed BabyDES Algorithm\n-------------------------------------------------------------------------------------------------------------------------\n'''\n\n# Expander Function\n\ndef expander(x):\n '''\n Expander function implemented by Ramya Narayanan\n '''\n x.append(x[2])\n x.append(x[3])\n order = [0, 1, 3, 2, 7, 6, 4, 5]\n x = [x[i] for i in order]\n return x\n\n\n# S-Box\n\ndef s_box(in_list, box_number):\n '''\n S-Box function implemented by Rohit Sahasrabuddhe\n in_list: a list of booleans of length 4 (4-bit)\n box_number: 1 or 2\n '''\n S1 = [ [[True , False, True], [False, True , False], [False, False, True ], [True , True , False], [False, True , True ], [True , False, False], [True , True , True ], [False, False, False]],\n [[False, False, True], [True , False, False], [True , True , False], [False, True , False], [False, False, False], [True , True , True ], [True , False, True ], [False, True , True ]]]\n\n S2 = [ [[True , False, False], [False, False, False], [True , True , False], [True , False, True], [True , True , True ], [False, False, True ], [False, True , True ], [False, True , False]],\n [[True , False, True ], [False, True , True ], [False, False, False], [True , True , True], [True , True , False], [False, True , False], [False, False, True ], [True , False, False]]]\n\n box = [S1, S2][box_number-1]\n\n return box[int(in_list[0])][int(''.join(list(map(lambda x: str(int(x)), in_list[1:]))), 2)]\n\n\n# Round Function\n\ndef round_function(Ri, Ki):\n '''\n Round Function implemented by Rishika Mohanta\n Ri: a list of booleans of length 6 (6-Bit ith round; Right Block)\n Ki: a list of booleans of length 8 (8-bit ith round; Round key)\n '''\n ERi = expander(Ri)\n ERi_Ki = np.logical_xor(ERi,Ki)\n ERi_Ki1,ERi_Ki2 = np.array_split(ERi_Ki,2)\n S1_out = s_box(ERi_Ki1,1)\n S2_out = s_box(ERi_Ki2,2)\n return list(np.concatenate([S1_out,S2_out]))\n\n\n# Single Round Function\n\ndef bDES_round(block_1, block_2, des_key, round_number):\n '''\n Single round of bDES implemented by Samarendra Pani\n block_1: a list of booleans of length 6 (The left block in case of encryption. The right block in case of decryption)\n block_2: a list of booleans of length 6 (The right block in case of encryption. The left block in case of decryption)\n des_key: a list of booleans of length 9 (9 bit DES key)\n round_number: 1,2...N for N bDES (To calculate Key Schedule)\n '''\n tmp = list(np.copy(block_2))\n key_8bit = list(np.roll(des_key, -(round_number-1)))[0:8] #Key scheduling\n block_2 = round_function(block_2, key_8bit)\n block_2 = list(np.logical_xor(block_1, block_2))\n block_1 = tmp\n\n return block_1, block_2 #Returning the blocks after 1 round of encryption\n\n\n# Encryption function\n\ndef bDES_encryption(plaintext, key):\n '''\n bDES encryption implemented by Samarendra Pani\n plaintext: A list of booleans which represents the given plaintext.\n key: A list of booleans of length 9 (9 bit DES key)\n N: The number of rounds for encrypting a block\n '''\n\n plaintext += '\\0'*((3-len(plaintext)%3)%3)\n plaintext = str2bool(text2binary(plaintext))\n\n N = 4\n output = []\n for i in range(int(len(plaintext)/12)):\n plaintext_block = plaintext[12*i:12*(i+1)]\n block_1 = plaintext_block[0:6]\n block_2 = plaintext_block[6:12]\n round_number = 1\n while(round_number <= N):\n block_1, block_2 = bDES_round(block_1, block_2, key, round_number)\n round_number += 1\n output.extend(block_1 + block_2)\n\n return bool2str(output) #Returing the CIPHERTEXT in binary form\n\n\n# Decryption function\n\ndef bDES_decryption(CIPHERTEXT, key):\n '''\n bDES decryption implemented by Samarendra Pani\n CIPHERTEXT: A list of booleans which represents the generated ciphertext.\n key: A list of booleans of length 9 (9 bit DES key)\n N: The number of rounds for decrypting a block (same as the number of rounds it took for encryption)\n '''\n CIPHERTEXT = str2bool(CIPHERTEXT)\n N = 4\n output = []\n for i in range(int(len(CIPHERTEXT)/12)):\n CIPHERTEXT_block = CIPHERTEXT[12*i:12*(i+1)]\n block_1 = CIPHERTEXT_block[6:12]\n block_2 = CIPHERTEXT_block[0:6]\n round_number = N\n while(round_number >= 1):\n block_1, block_2 = bDES_round(block_1, block_2, key, round_number)\n round_number -= 1\n output.extend(block_2 + block_1)\n\n message = binary2text(bool2str(output))\n try:\n #Removing the null padding\n message = message[:message.index(\"\\x00\")]\n except:\n pass\n return message\n"
] |
[
[
"numpy.concatenate",
"numpy.logical_xor",
"numpy.copy",
"numpy.roll",
"numpy.array_split"
]
] |
ilmcconnell/Cosmos
|
[
"84245034727c30e20ffddee9e02c7e96f3aa115e",
"84245034727c30e20ffddee9e02c7e96f3aa115e"
] |
[
"cosmos/ingestion/ingest/process/detection/src/torch_model/train/data_layer/xml_loader.py",
"cosmos/ingestion/ingest/process/postprocess/converters/pdf_extractor.py"
] |
[
"\"\"\"\nAn XML and image repo loader\nmeant to work with the GTDataset class\nAuthor: Josh McGrath\n\"\"\"\nfrom torch.utils.data import Dataset\nimport os\nfrom os.path import splitext\nfrom PIL import Image\nfrom torchvision.transforms import ToTensor\nfrom numpy import genfromtxt\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom xml.etree import ElementTree as ET\nfrom .transforms import NormalizeWrapper\nimport pickle\nfrom collections import namedtuple\nfrom uuid import uuid4\nfrom ingest.process.detection.src.torch_model.utils.bbox import BBoxes\nfrom ingest.process.detection.src.torch_model.utils.matcher import match\nfrom ingest.process.detection.src.utils.ingest_images import db_ingest, get_example_for_uuid, compute_neighborhoods, ImageDB\nfrom dataclasses import dataclass\nimport logging\nlogging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\nnormalizer = NormalizeWrapper()\n\ntens = ToTensor()\n@dataclass\nclass Example:\n center_bb: torch.Tensor\n label: torch.Tensor\n center_window: torch.Tensor\n neighbor_boxes: torch.Tensor\n neighbor_windows: torch.Tensor\n neighbor_radii: torch.Tensor\n neighbor_angles: torch.Tensor\n colorfulness: torch.Tensor\n\nBatch = namedtuple('Batch', ['center_bbs', 'labels', 'center_windows', 'neighbor_boxes', 'neighbor_windows', 'neighbor_radii', 'neighbor_angles', 'colorfulness'])\n\ndef containsNone(lst):\n flag = False\n for o in lst:\n if o is None:\n flag = True\n return flag\n\ndef get_colorfulness(window):\n diffs = window.max(dim=0)[0] - window.min(dim=0)[0]\n return torch.mean(diffs.topk(25)[0])\n\ndef get_radii(center_bbox, neighbor_bboxes):\n center_bbox = center_bbox.reshape(1,4)\n assert center_bbox.shape[0] == 1\n assert center_bbox.shape[1] == 4\n assert neighbor_bboxes.shape[1] == 4\n diffs = center_bbox - neighbor_bboxes\n return torch.norm(diffs, p=2, dim=1)\n\ndef get_angles(center_bbox, neighbor_boxes):\n radii = get_radii(center_bbox, neighbor_boxes)\n # get center coords of center box\n center_bbox = center_bbox.reshape(1,4)\n center_center = torch.stack([center_bbox[:,0] -center_bbox[:,2], center_bbox[:,1] - center_bbox[:,3]])\n neighbor_center = torch.stack([neighbor_boxes[:,0] -neighbor_boxes[:,2], neighbor_boxes[:,1] - neighbor_boxes[:,3]])\n # clamp required to not produce nan at asin\n delta_y = torch.abs(center_center[1] - neighbor_center[1])\n ratios = delta_y/radii\n out = torch.asin(ratios.clamp(-1 + 1e-4, 1- 1e-4))\n mask = out != out\n out[mask] = 0.4\n return out\n\nclass XMLLoader(Dataset):\n \"\"\"\n Loads examples and ground truth from given directories\n it is expected that the directories have no files\n other than annotations\n \"\"\"\n\n def __init__(self, ingest_objs, classes, session):\n \"\"\"\n Initialize a XML loader object\n :param xml_dir: directory to get XML from\n :param img_dir: directory to load PNGs from\n :param img_type: the image format to load in\n \"\"\"\n self.session = session\n self.uuids = ingest_objs.uuids\n self.ngt_boxes = ingest_objs.ngt_boxes\n self.nproposals = ingest_objs.nproposals\n self.class_stats = ingest_objs.class_stats\n self.classes = classes\n logger.debug(\"printing class stats\")\n self.print_stats()\n logger.debug(f\"# of gt boxes:{self.ngt_boxes}\")\n logger.debug(f\"# of proposals:{self.nproposals}\")\n\n\n def __len__(self):\n return len(self.uuids)\n\n def __getitem__(self, item):\n uuid = self.uuids[item]\n ex = get_example_for_uuid(uuid, self.session)\n neighbors = ex.neighbors(True, self.uuids, self.session)\n colorfulness = get_colorfulness(ex.window)\n if len(neighbors) == 0:\n neighbor_boxes = [torch.zeros(4), torch.zeros(4)]\n neighbor_windows = [torch.zeros(ex.window.shape), torch.zeros(ex.window.shape)]\n neighbor_radii = torch.tensor([-1*torch.ones(1)] *2)\n neighbor_angles = neighbor_radii\n else:\n neighbor_boxes = [n.bbox for n in neighbors]\n neighbor_windows = [n.window for n in neighbors]\n neighbor_radii = get_radii(ex.bbox, torch.stack(neighbor_boxes))\n neighbor_angles = get_angles(ex.bbox, torch.stack(neighbor_boxes))\n if ex.label == 'unlabelled':\n ex.label = self.classes[-1]\n label = torch.Tensor([self.classes.index(ex.label)]) if ex.label is not None else None\n return Example(ex.bbox, label, ex.window, neighbor_boxes, neighbor_windows, neighbor_radii, neighbor_angles,colorfulness)\n\n @staticmethod\n def collate(batch):\n center_bbs = torch.stack([ex.center_bb for ex in batch])\n ex_labels = [ex.label for ex in batch]\n labels = None\n if containsNone(ex_labels):\n labels = None\n else:\n labels = torch.stack([ex.label for ex in batch])\n center_windows = torch.stack([ex.center_window for ex in batch])\n colorfulness = torch.stack([ex.colorfulness for ex in batch])\n # padding will put number of neighbors before the batch size\n neighbor_boxes = pad_sequence([torch.stack(ex.neighbor_boxes) for ex in batch]).permute(1,0,2)\n neighbor_windows = [torch.stack(ex.neighbor_windows) for ex in batch]\n neighbor_windows = pad_sequence(neighbor_windows).permute(1,0,2,3,4)\n neighbor_radii = pad_sequence([ex.neighbor_radii for ex in batch], padding_value=-1).permute(1,0)\n neighbor_angles = pad_sequence([ex.neighbor_angles for ex in batch], padding_value=-1).permute(1,0)\n return Batch(center_bbs=center_bbs, labels=labels, center_windows=center_windows, neighbor_boxes=neighbor_boxes, neighbor_windows=neighbor_windows,neighbor_radii=neighbor_radii, neighbor_angles=neighbor_angles, colorfulness=colorfulness)\n\n def get_weight_vec(self, classes):\n weight_per_class = {} \n N = len(self.uuids)\n for name in classes:\n if name not in self.class_stats:\n weight_per_class[name] = 0\n else:\n weight_per_class[name] = N/float(self.class_stats[name]) \n weight = [0] * N \n for idx, uuid in enumerate(self.uuids):\n lst = get_example_for_uuid(uuid, self.session)\n weight[idx] = weight_per_class[lst.label]\n return weight\n\n\n def print_stats(self):\n tot = len(self.uuids)\n logger.debug(f\"There are {tot} objects\")\n for key in self.class_stats:\n sub = self.class_stats[key]\n percentage = float(sub)/tot\n logger.debug(f\"{key}: {sub} ({percentage})\")\n\n\n\n",
"from pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager\nfrom pdfminer.layout import LTTextBox, LTText, LTTextLine, LTChar\nfrom pdfminer.pdfinterp import PDFPageInterpreter\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.layout import LAParams\nfrom pdfminer.converter import PDFPageAggregator\n\nimport pandas as pd\n\ndef update_pos(pos1,pos2):\n \"\"\"\n Get the coordinate of the bounding box containing two parts\n :param pos1: Coordinate of the first part.\n :param pos2: Coordinate of the second part.\n :return: Coordinate of the bounding box containing the two parts\n \"\"\"\n x1 = min(pos1[0],pos2[0])\n y1 = min(pos1[1],pos2[1])\n x2 = max(pos1[2],pos2[2])\n y2 = max(pos1[3],pos2[3])\n return (x1,y1,x2,y2)\n\ndef parse_pdf(fp):\n \"\"\"\n Parse the pdf with pdfminer to get the unicode representation.\n :param fp: Input file.\n :return: Pandas frame containing the tokens and the range of the coordinates\n \"\"\"\n with open(fp, \"rb\") as fh:\n parser = PDFParser(fh)\n doc = PDFDocument(parser)\n laparams = LAParams()\n rsrcmgr = PDFResourceManager()\n device = PDFPageAggregator(rsrcmgr=rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n texts = []\n text = ''\n positions = []\n pos = (10000,10000,-1,-1)\n pages = []\n for idx, page in enumerate(PDFPage.create_pages(doc)):\n interpreter.process_page(page)\n layout = device.get_result()\n #print(layout.bbox)\n for child in layout:\n if isinstance(child, LTTextBox):\n for line in child:\n for char in line:\n if isinstance(char, LTChar):\n text += char.get_text()\n pos = update_pos(char.bbox,pos)\n page = idx\n else:\n texts.append(text)\n positions.append(pos)\n pages.append(page)\n text = ''\n pos = (10000,10000,-1,-1)\n if len(positions) == 0:\n return None, None \n x1, y1, x2, y2 = list(zip(*positions))\n df = pd.DataFrame({\n \"text\": texts,\n \"x1\": x1,\n \"y1\":y1,\n \"x2\": x2,\n \"y2\":y2,\n \"page\": pages\n })\n return df, layout.bbox\n"
] |
[
[
"torch.zeros",
"torch.stack",
"torch.norm",
"torch.nn.utils.rnn.pad_sequence",
"torch.ones",
"torch.abs"
],
[
"pandas.DataFrame"
]
] |
jercas/MLiA_Learning_Code
|
[
"1d8c09f2fcbc81342941f6af97403bd2eb07483b"
] |
[
"machinelearninginaction/Ch13/extras/createFig3.py"
] |
[
"'''\nCreated on Jun 1, 2011\n\n@author: Peter\n'''\nfrom numpy import *\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pca\n\nn = 1000 #number of points to create\nxcord0 = []; ycord0 = []\nxcord1 = []; ycord1 = []\nxcord2 = []; ycord2 = []\nmarkers =[]\ncolors =[]\nfw = open('testSet3.txt','w')\nfor i in range(n):\n groupNum = int(3*random.uniform())\n [r0,r1] = random.standard_normal(2)\n if groupNum == 0:\n x = r0 + 16.0\n y = 1.0*r1 + x\n xcord0.append(x)\n ycord0.append(y)\n elif groupNum == 1:\n x = r0 + 8.0\n y = 1.0*r1 + x\n xcord1.append(x)\n ycord1.append(y)\n elif groupNum == 2:\n x = r0 + 0.0\n y = 1.0*r1 + x\n xcord2.append(x)\n ycord2.append(y)\n fw.write(\"%f\\t%f\\t%d\\n\" % (x, y, groupNum))\n\nfw.close()\nfig = plt.figure()\nax = fig.add_subplot(211)\nax.scatter(xcord0,ycord0, marker='^', s=90)\nax.scatter(xcord1,ycord1, marker='o', s=50, c='red')\nax.scatter(xcord2,ycord2, marker='v', s=50, c='yellow')\nax = fig.add_subplot(212)\nmyDat = pca.loadDataSet('testSet3.txt')\nlowDDat,reconDat = pca.pca(myDat[:,0:2],1)\nlabel0Mat = lowDDat[nonzero(myDat[:,2]==0)[0],:2][0] #get the items with label 0\nlabel1Mat = lowDDat[nonzero(myDat[:,2]==1)[0],:2][0] #get the items with label 1\nlabel2Mat = lowDDat[nonzero(myDat[:,2]==2)[0],:2][0] #get the items with label 2\n#ax.scatter(label0Mat[:,0],label0Mat[:,1], marker='^', s=90)\n#ax.scatter(label1Mat[:,0],label1Mat[:,1], marker='o', s=50, c='red')\n#ax.scatter(label2Mat[:,0],label2Mat[:,1], marker='v', s=50, c='yellow')\nax.scatter(label0Mat[:,0],zeros(shape(label0Mat)[0]), marker='^', s=90)\nax.scatter(label1Mat[:,0],zeros(shape(label1Mat)[0]), marker='o', s=50, c='red')\nax.scatter(label2Mat[:,0],zeros(shape(label2Mat)[0]), marker='v', s=50, c='yellow')\nplt.show()"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
kant/redrock
|
[
"572d81c60a940f9c132b715dde793e2657962e00"
] |
[
"rest-api/python/resources.py"
] |
[
"#\n# (C) Copyright IBM Corp. 2015, 2016\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# coding=utf-8\nimport numpy as np\n\n###############################################################################\n# Data files for distance and cluster\n###############################################################################\ndef loadDistanceClusterData(homePath):\n global Feat, words, freqs, clusters\n ## read model data\n Feat = np.load(homePath + '/rest-api/python/distance/w2v_may1_may19_june1_june11.npy')\n words = np.load(homePath + '/rest-api/python/distance/word_may1_may19_june1_june11.npy')\n freqs = np.load(homePath + '/rest-api/python/distance/freq_may1_may19_june1_june11.npy')\n clusters = np.load(homePath + '/rest-api/python/cluster/clusterK5_may1_may19_june1_june11.npy')\n"
] |
[
[
"numpy.load"
]
] |
developmentseed/pg_mvt
|
[
"10fa599928fc96ea30fad63289e40e39e564845d"
] |
[
"tests/routes/test_tiles.py"
] |
[
"\"\"\"Test Tiles endpoints.\"\"\"\n\nimport mapbox_vector_tile\nimport numpy as np\n\n\ndef test_tilejson(app):\n \"\"\"Test TileJSON endpoint.\"\"\"\n response = app.get(\"/public.landsat_wrs/tilejson.json\")\n assert response.status_code == 200\n\n resp_json = response.json()\n assert resp_json[\"name\"] == \"public.landsat_wrs\"\n assert resp_json[\"minzoom\"] == 5\n assert resp_json[\"maxzoom\"] == 12\n\n np.testing.assert_almost_equal(\n resp_json[\"bounds\"], [-180.0, -82.6401062011719, 180.0, 82.6401062011719]\n )\n\n response = app.get(\"/public.landsat_wrs/tilejson.json?minzoom=1&maxzoom=2\")\n assert response.status_code == 200\n\n resp_json = response.json()\n assert resp_json[\"name\"] == \"public.landsat_wrs\"\n assert resp_json[\"minzoom\"] == 1\n assert resp_json[\"maxzoom\"] == 2\n\n response = app.get(\n \"/public.landsat_wrs/tilejson.json?minzoom=1&maxzoom=2&limit=1000\"\n )\n assert response.status_code == 200\n\n resp_json = response.json()\n assert resp_json[\"name\"] == \"public.landsat_wrs\"\n assert resp_json[\"minzoom\"] == 1\n assert resp_json[\"maxzoom\"] == 2\n assert \"?limit=1000\" in resp_json[\"tiles\"][0]\n\n\ndef test_tile(app):\n \"\"\"request a tile.\"\"\"\n response = app.get(\"/tiles/public.landsat_wrs/0/0/0.pbf\")\n assert response.status_code == 200\n decoded = mapbox_vector_tile.decode(response.content)\n assert len(decoded[\"default\"][\"features\"]) == 10000\n\n response = app.get(\"/tiles/public.landsat_wrs/0/0/0.pbf?limit=1000\")\n assert response.status_code == 200\n decoded = mapbox_vector_tile.decode(response.content)\n assert len(decoded[\"default\"][\"features\"]) == 1000\n assert [\"id\", \"pr\", \"row\", \"path\", \"ogc_fid\"] == list(\n decoded[\"default\"][\"features\"][0][\"properties\"]\n )\n\n response = app.get(\n \"/tiles/public.landsat_wrs/0/0/0.pbf?limit=1&columns=pr,row,path\"\n )\n assert response.status_code == 200\n decoded = mapbox_vector_tile.decode(response.content)\n assert [\"pr\", \"row\", \"path\"] == list(\n decoded[\"default\"][\"features\"][0][\"properties\"]\n )\n\n\n# def test_tile_tms(app):\n# \"\"\"request a tile with specific TMS.\"\"\"\n# response = app.get(\"/tiles/WorldCRS84Quad/public.landsat_wrs/0/0/0.pbf\")\n# assert response.status_code == 200\n# decoded = mapbox_vector_tile.decode(response.content)\n# assert len(decoded[\"default\"][\"features\"]) > 1000\n\n# response = app.get(\"/tiles/WorldCRS84Quad/public.landsat_wrs/0/0/0.pbf?limit=1000\")\n# assert response.status_code == 200\n# decoded = mapbox_vector_tile.decode(response.content)\n# assert len(decoded[\"default\"][\"features\"]) <= 1000\n# assert [\"id\", \"pr\", \"row\", \"path\", \"ogc_fid\"] == list(\n# decoded[\"default\"][\"features\"][0][\"properties\"]\n# )\n\n# response = app.get(\n# \"/tiles/WorldCRS84Quad/public.landsat_wrs/0/0/0.pbf?limit=1&columns=pr,row,path\"\n# )\n# assert response.status_code == 200\n# decoded = mapbox_vector_tile.decode(response.content)\n# assert [\"pr\", \"row\", \"path\"] == list(\n# decoded[\"default\"][\"features\"][0][\"properties\"]\n# )\n\n\ndef test_function_tilejson(app):\n \"\"\"Test TileJSON endpoint.\"\"\"\n response = app.get(\"/squares/tilejson.json\")\n assert response.status_code == 200\n resp_json = response.json()\n assert resp_json[\"name\"] == \"squares\"\n assert resp_json[\"minzoom\"] == 5\n assert resp_json[\"maxzoom\"] == 12\n np.testing.assert_almost_equal(resp_json[\"bounds\"], [-180.0, -90, 180.0, 90])\n\n response = app.get(\"/squares/tilejson.json?minzoom=1&maxzoom=2\")\n assert response.status_code == 200\n resp_json = response.json()\n assert resp_json[\"name\"] == \"squares\"\n assert resp_json[\"minzoom\"] == 1\n assert resp_json[\"maxzoom\"] == 2\n\n response = app.get(\"/squares/tilejson.json?minzoom=1&maxzoom=2&depth=4\")\n assert response.status_code == 200\n resp_json = response.json()\n assert resp_json[\"name\"] == \"squares\"\n assert resp_json[\"minzoom\"] == 1\n assert resp_json[\"maxzoom\"] == 2\n assert \"?depth=4\" in resp_json[\"tiles\"][0]\n\n\ndef test_function_tile(app):\n \"\"\"request a tile.\"\"\"\n response = app.get(\"/tiles/squares/0/0/0.pbf\")\n assert response.status_code == 200\n decoded = mapbox_vector_tile.decode(response.content)\n assert len(decoded[\"default\"][\"features\"]) == 4\n\n response = app.get(\"/tiles/squares/0/0/0.pbf?depth=4\")\n assert response.status_code == 200\n decoded = mapbox_vector_tile.decode(response.content)\n assert len(decoded[\"default\"][\"features\"]) == 16\n"
] |
[
[
"numpy.testing.assert_almost_equal"
]
] |
datvuthanh/SPIMNET
|
[
"aa1518511e2c910290b6a31b19e942b49e025b38"
] |
[
"processing.py"
] |
[
"import glob\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nfrom tensorflow.keras import layers\nimport time\nimport numpy as np\n\nfrom sklearn import metrics\nfrom scipy import interpolate\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\ndef load(image_file):\n image = tf.io.read_file(image_file)\n image = tf.image.decode_jpeg(image)\n\n w = tf.shape(image)[1]\n w = w // 4\n\n rgb_pos = image[:, :w, :]\n nir_pos = image[:, w * 1:w * 2, :]\n rgb_neg = image[:, w * 2:w * 3, :]\n nir_neg = image[:, w * 3:w * 4, :]\n\n rgb_pos = tf.cast(rgb_pos, tf.float32)\n nir_pos = tf.cast(nir_pos, tf.float32)\n rgb_neg = tf.cast(rgb_neg, tf.float32)\n nir_neg = tf.cast(nir_neg, tf.float32)\n\n return rgb_pos, nir_pos, rgb_neg, nir_neg\n\n\n# cell 3: data augmentation\n\ndef resize(input_l, input_r, target_l, target_r, height, width):\n input_l = tf.image.resize(input_l, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n input_r = tf.image.resize(input_r, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n target_l = tf.image.resize(target_l, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n target_r = tf.image.resize(target_r, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n return input_l, input_r, target_l, target_r\n\n\ndef random_crop(input_l, input_r, target_l, target_r):\n stacked_image = tf.stack([input_l, input_r, target_l, target_r], axis=0)\n cropped_image = tf.image.random_crop(stacked_image, size=[4, IMG_HEIGHT, IMG_WIDTH, 3])\n\n return cropped_image[0], cropped_image[1], cropped_image[2], cropped_image[3]\n\n\n# normalizing the images to [-1, 1]\ndef normalize(input_l, input_r, target_l, target_r):\n input_l = (input_l / 127.5) - 1\n input_r = (input_r / 127.5) - 1\n target_l = (target_l / 127.5) - 1\n target_r = (target_r / 127.5) - 1\n\n return input_l, input_r, target_l, target_r\n\n\ndef random_jitter(input_l, input_r, target_l, target_r):\n # resize to 68x68\n #input_l, input_r, target_l, target_r = resize(input_l, input_r, target_l, target_r, 68, 68)\n\n # crop\n #input_l, input_r, target_l, target_r = random_crop(input_l, input_r, target_l, target_r)\n\n # flip_left_right\n if tf.random.uniform(()) > 0.5:\n input_l = tf.image.flip_left_right(input_l)\n input_r = tf.image.flip_left_right(input_r)\n target_l = tf.image.flip_left_right(target_l)\n target_r = tf.image.flip_left_right(target_r)\n\n # flip_up_down\n if tf.random.uniform(()) > 0.5:\n input_l = tf.image.flip_up_down(input_l)\n input_r = tf.image.flip_up_down(input_r)\n target_l = tf.image.flip_up_down(target_l)\n target_r = tf.image.flip_up_down(target_r)\n\n # brighness change\n if tf.random.uniform(()) > 0.5:\n rand_value = tf.random.uniform((), minval=-5.0, maxval=5.0)\n input_l = input_l + rand_value\n\n rand_value = tf.random.uniform((), minval=-5.0, maxval=5.0)\n input_r = input_r + rand_value\n\n rand_value = tf.random.uniform((), minval=-5.0, maxval=5.0)\n target_l = target_l + rand_value\n\n rand_value = tf.random.uniform((), minval=-5.0, maxval=5.0)\n target_r = target_r + rand_value\n\n # contrast change\n if tf.random.uniform(()) > 0.5:\n rand_value = tf.random.uniform((), minval=0.8, maxval=1.2)\n mean_value = tf.reduce_mean(input_l)\n input_l = (input_l - mean_value) * rand_value + mean_value\n\n rand_value = tf.random.uniform((), minval=0.8, maxval=1.2)\n mean_value = tf.reduce_mean(input_r)\n input_r = (input_r - mean_value) * rand_value + mean_value\n\n rand_value = tf.random.uniform((), minval=0.8, maxval=1.2)\n mean_value = tf.reduce_mean(target_l)\n target_l = (target_l - mean_value) * rand_value + mean_value\n\n rand_value = tf.random.uniform((), minval=0.8, maxval=1.2)\n mean_value = tf.reduce_mean(target_r)\n target_r = (target_r - mean_value) * rand_value + mean_value\n\n # clip value\n input_l = tf.clip_by_value(input_l, clip_value_min=0.0, clip_value_max=255.0)\n input_r = tf.clip_by_value(input_r, clip_value_min=0.0, clip_value_max=255.0)\n target_l = tf.clip_by_value(target_l, clip_value_min=0.0, clip_value_max=255.0)\n target_r = tf.clip_by_value(target_r, clip_value_min=0.0, clip_value_max=255.0)\n\n # rotate positive samples for making hard positive cases\n if tf.random.uniform(()) > 0.5:\n if tf.random.uniform(()) < 0.5:\n input_l = tf.image.rot90(input_l,k=1) # 90\n input_r = tf.image.rot90(input_r,k=1) # 90\n else:\n input_l = tf.image.rot90(input_l,k=3) # 270\n input_r = tf.image.rot90(input_r,k=3) # 270\n\n return input_l, input_r, target_l, target_r\n\n\ndef load_image_train(image_file):\n input_l, input_r, target_l, target_r = load(image_file)\n input_l, input_r, target_l, target_r = random_jitter(input_l, input_r, target_l, target_r)\n input_l, input_r, target_l, target_r = normalize(input_l, input_r, target_l, target_r)\n\n return input_l, input_r, target_l, target_r\n\n\ndef load_image_test(image_file):\n input_l, input_r, target_l, target_r = load(image_file)\n input_l, input_r, target_l, target_r = resize(input_l, input_r, target_l, target_r, IMG_HEIGHT, IMG_WIDTH)\n input_l, input_r, target_l, target_r = normalize(input_l, input_r, target_l, target_r)\n\n return input_l, input_r, target_l, target_r\n"
] |
[
[
"tensorflow.shape",
"tensorflow.io.read_file",
"tensorflow.image.rot90",
"tensorflow.random.uniform",
"tensorflow.image.random_crop",
"tensorflow.clip_by_value",
"tensorflow.image.flip_left_right",
"tensorflow.stack",
"tensorflow.image.flip_up_down",
"tensorflow.image.resize",
"tensorflow.reduce_mean",
"tensorflow.image.decode_jpeg",
"tensorflow.cast"
]
] |
josepfont65/neurodsp
|
[
"a7c5b72665eed6368e29bf4f15443a28a2e18732",
"a7c5b72665eed6368e29bf4f15443a28a2e18732"
] |
[
"neurodsp/plts/time_series.py",
"neurodsp/sim/aperiodic.py"
] |
[
"\"\"\"Plots for time series.\"\"\"\n\nfrom itertools import repeat, cycle\n\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\n\nfrom neurodsp.plts.style import style_plot\nfrom neurodsp.plts.utils import check_ax, savefig\n\n###################################################################################################\n###################################################################################################\n\n@savefig\n@style_plot\ndef plot_time_series(times, sigs, labels=None, colors=None, ax=None):\n \"\"\"Plot a time series.\n\n Parameters\n ----------\n times : 1d array or list of 1d array\n Time definition(s) for the time series to be plotted.\n sigs : 1d array or list of 1d array\n Time series to plot.\n labels : list of str, optional\n Labels for each time series.\n cols : str or list of str\n Colors to use to plot lines.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n \"\"\"\n\n ax = check_ax(ax, (15, 3))\n\n times = repeat(times) if isinstance(times, np.ndarray) else times\n sigs = [sigs] if isinstance(sigs, np.ndarray) else sigs\n\n if labels is not None:\n labels = [labels] if not isinstance(labels, list) else labels\n else:\n labels = repeat(labels)\n\n if colors is not None:\n colors = repeat(colors) if not isinstance(colors, list) else cycle(colors)\n else:\n colors = cycle(['k', 'r', 'b', 'g', 'm', 'c'])\n\n for time, sig, color, label in zip(times, sigs, colors, labels):\n ax.plot(time, sig, color, label=label)\n\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Voltage (uV)')\n\n\n@savefig\n@style_plot\ndef plot_instantaneous_measure(times, sigs, measure='phase', ax=None, **plt_kwargs):\n \"\"\"Plot an instantaneous measure, of phase, amplitude or frequency.\n\n Parameters\n ----------\n times : 1d array or list of 1d array\n Time definition(s) for the time series to be plotted.\n sigs : 1d array or list of 1d array\n Time series to plot.\n measure : {'phase', 'amplitude', 'frequency'}\n Which kind of measure is being plotted.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n **plt_kwargs\n Keyword arguments to pass into `plot_time_series`.\n \"\"\"\n\n if measure not in ['phase', 'amplitude', 'frequency']:\n raise ValueError('Measure not understood.')\n\n if measure == 'phase':\n plot_time_series(times, sigs, ax=ax, ylabel='Phase (rad)', **plt_kwargs)\n plt.yticks([-np.pi, 0, np.pi], ['-$\\pi$', 0, '$\\pi$'])\n elif measure == 'amplitude':\n plot_time_series(times, sigs, ax=ax, ylabel='Amplitude', **plt_kwargs)\n elif measure == 'frequency':\n plot_time_series(times, sigs, ax=ax, ylabel='Instantaneous\\nFrequency (Hz)', **plt_kwargs)\n\n\n@savefig\n@style_plot\ndef plot_bursts(times, sig, bursting, ax=None, **plt_kwargs):\n \"\"\"Plot a time series, with labeled bursts.\n\n Parameters\n ----------\n times : 1d array\n Time definition for the time series to be plotted.\n sig : 1d array\n Time series to plot.\n bursting : 1d array\n A boolean array which indicates identified bursts.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n **plt_kwargs\n Keyword arguments to pass into `plot_time_series`.\n \"\"\"\n\n ax = check_ax(ax, (15, 3))\n\n bursts = ma.array(sig, mask=np.invert(bursting))\n plot_time_series(times, [sig, bursts], ax=ax, **plt_kwargs)\n",
"\"\"\"Simulating time series, with aperiodic activity.\"\"\"\n\nimport numpy as np\nfrom scipy.stats import zscore\n\nfrom neurodsp.filt import filter_signal, infer_passtype\nfrom neurodsp.filt.fir import compute_filter_length\nfrom neurodsp.filt.checks import check_filter_definition\nfrom neurodsp.utils import remove_nans\nfrom neurodsp.spectral import rotate_powerlaw\nfrom neurodsp.utils.data import create_times\nfrom neurodsp.utils.decorators import normalize\nfrom neurodsp.sim.transients import sim_synaptic_kernel\n\n###################################################################################################\n###################################################################################################\n\n@normalize\ndef sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):\n \"\"\"Simulate a Poisson population.\n\n Parameters\n ----------\n n_seconds : float\n Simulation time, in seconds.\n fs : float\n Sampling rate of simulated signal, in Hz.\n n_neurons : int, optional, default: 1000\n Number of neurons in the simulated population.\n firing_rate : float, optional, default: 2\n Firing rate of individual neurons in the population.\n\n Returns\n -------\n sig : 1d array\n Simulated population activity.\n\n Notes\n -----\n The simulated signal is essentially white noise, but satisfies the Poisson\n property, i.e. mean(X) = var(X).\n\n The lambda parameter of the Poisson process (total rate) is determined as\n firing rate * number of neurons, i.e. summation of Poisson processes is still\n a Poisson processes.\n\n Note that the Gaussian approximation for a sum of Poisson processes is only\n a good approximation for large lambdas.\n \"\"\"\n\n # Poisson population rate signal scales with # of neurons and individual rate\n lam = n_neurons * firing_rate\n\n # Variance is equal to the mean\n sig = np.random.normal(loc=lam, scale=lam**0.5, size=int(n_seconds * fs))\n\n # Enforce that sig is non-negative in cases of low firing rate\n sig[np.where(sig < 0.)] = 0.\n\n return sig\n\n\n@normalize\ndef sim_synaptic_current(n_seconds, fs, n_neurons=1000, firing_rate=2.,\n tau_r=0., tau_d=0.01, t_ker=None):\n \"\"\"Simulate a signal as a synaptic current, which has 1/f characteristics with a knee.\n\n Parameters\n ----------\n n_seconds : float\n Simulation time, in seconds.\n fs : float\n Sampling rate of simulated signal, in Hz.\n n_neurons : int, optional, default: 1000\n Number of neurons in the simulated population.\n firing_rate : float, optional, default: 2\n Firing rate of individual neurons in the population.\n tau_r : float, optional, default: 0.\n Rise time of synaptic kernel, in seconds.\n tau_d : float, optional, default: 0.01\n Decay time of synaptic kernel, in seconds.\n t_ker : float, optional\n Length of time of the simulated synaptic kernel, in seconds.\n\n Returns\n -------\n sig : 1d array\n Simulated synaptic current.\n\n Notes\n -----\n The resulting signal is most similar to unsigned intracellular current or conductance change.\n \"\"\"\n\n # If not provided, compute t_ker as a function of decay time constant\n if t_ker is None:\n t_ker = 5. * tau_d\n\n # Simulate an extra bit because the convolution will snip it. Turn off normalization for this sig\n sig = sim_poisson_pop((n_seconds + t_ker), fs, n_neurons, firing_rate, mean=None, variance=None)\n ker = sim_synaptic_kernel(t_ker, fs, tau_r, tau_d)\n sig = np.convolve(sig, ker, 'valid')[:-1]\n\n return sig\n\n\n@normalize\ndef sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):\n \"\"\"Simulate a mean-reverting random walk, as an Ornstein-Uhlenbeck process.\n\n Parameters\n ----------\n n_seconds : float\n Simulation time, in seconds.\n fs : float\n Sampling rate of simulated signal, in Hz.\n theta : float, optional, default: 1.0\n Memory scale parameter. Larger theta values create faster fluctuations.\n mu : float, optional, default: 0.0\n Mean of the random walk.\n sigma : float, optional, default: 5.0\n Standard deviation of the random walk.\n\n Returns\n -------\n sig: 1d array\n Simulated random walk signal.\n\n Notes\n -----\n The random walk is simulated as a discretized Ornstein-Uhlenbeck process:\n\n `dx = theta*(x-mu)*dt + sigma*dWt`\n\n Where:\n\n - mu : mean\n - sigma : standard deviation\n - theta : memory scale\n - dWt : increments of Wiener process, i.e. white noise\n\n References\n ----------\n See the wikipedia page for the integral solution:\n\n https://en.wikipedia.org/wiki/Ornstein%E2%80%93Uhlenbeck_process#Solution\n \"\"\"\n\n times = create_times(n_seconds, fs)\n\n x0 = mu\n dt = times[1] - times[0]\n ws = np.random.normal(size=len(times))\n ex = np.exp(-theta * times)\n ws[0] = 0.\n\n sig = x0 * ex + mu * (1. - ex) + sigma * ex * \\\n np.cumsum(np.exp(theta * times) * np.sqrt(dt) * ws)\n\n return sig\n\n\n@normalize\ndef sim_powerlaw(n_seconds, fs, exponent=-2.0, f_range=None, **filter_kwargs):\n \"\"\"Simulate a power law time series, with a specified exponent.\n\n Parameters\n ----------\n n_seconds : float\n Simulation time, in seconds.\n fs : float\n Sampling rate of simulated signal, in Hz.\n exponent : float, optional, default: -2\n Desired power-law exponent, of the form P(f)=f^exponent.\n f_range : list of [float, float] or None, optional\n Frequency range to filter simulated data, as [f_lo, f_hi], in Hz.\n **filter_kwargs : kwargs, optional\n Keyword arguments to pass to `filter_signal`.\n\n Returns\n -------\n sig: 1d array\n Time-series with the desired power law exponent.\n \"\"\"\n\n # Get the number of samples to simulate for the signal\n # If filter is to be filtered, with FIR, add extra to compensate for edges\n if f_range and filter_kwargs.get('filter_type', None) != 'iir':\n\n pass_type = infer_passtype(f_range)\n filt_len = compute_filter_length(fs, pass_type,\n *check_filter_definition(pass_type, f_range),\n n_seconds=filter_kwargs.get('n_seconds', None),\n n_cycles=filter_kwargs.get('n_cycles', 3))\n\n n_samples = int(n_seconds * fs) + filt_len + 1\n\n else:\n n_samples = int(n_seconds * fs)\n\n sig = _create_powerlaw(n_samples, fs, exponent)\n\n if f_range is not None:\n sig = filter_signal(sig, fs, infer_passtype(f_range), f_range,\n remove_edges=True, **filter_kwargs)\n # Drop the edges, that were compensated for, if not using IIR (using FIR)\n if not filter_kwargs.get('filter_type', None) == 'iir':\n sig, _ = remove_nans(sig)\n\n return sig\n\n\ndef _create_powerlaw(n_samples, fs, exponent):\n \"\"\"Create a power law time series.\n\n Parameters\n ----------\n n_samples : int\n The number of samples to simulate\n fs : float\n Sampling rate of simulated signal, in Hz.\n exponent : float\n Desired power-law exponent, of the form P(f)=f^exponent.\n\n Returns\n -------\n sig: 1d array\n Time-series with the desired power law exponent.\n\n Notes\n -----\n This function create variable power law exponents by spectrally rotating white noise.\n \"\"\"\n\n # Start with white noise signal, that we will rotate, in frequency space\n sig = np.random.randn(n_samples)\n\n # Compute the FFT\n fft_output = np.fft.fft(sig)\n freqs = np.fft.fftfreq(len(sig), 1. / fs)\n\n # Rotate spectrum and invert, z-score to normalize.\n # Note: the delta exponent to be applied is divided by two, as\n # the FFT output is in units of amplitude not power\n fft_output_rot = rotate_powerlaw(freqs, fft_output, -exponent/2)\n sig = zscore(np.real(np.fft.ifft(fft_output_rot)))\n\n return sig\n"
] |
[
[
"numpy.invert",
"matplotlib.pyplot.yticks"
],
[
"numpy.random.randn",
"numpy.exp",
"numpy.where",
"numpy.fft.fft",
"numpy.sqrt",
"numpy.fft.ifft",
"numpy.convolve"
]
] |
xinyandai/structural-nn
|
[
"373cec9ca2ee766ddb1d2a09eac4dd551d57e648"
] |
[
"image/imagequantizer.py"
] |
[
"import torch\nimport numpy as np\nfrom PIL import Image\nfrom glob import glob\nfrom image.dataquantizer import quantize\n\n\ndef load_image(location):\n img = Image.open(location)\n img.load()\n data = np.asarray(img, dtype=\"int32\")\n return data\n\n\ndef save_image(image, location):\n if isinstance(image, torch.Tensor):\n image = image.numpy()\n img = Image.fromarray(np.asarray(np.clip(image, 0, 255), dtype=\"uint8\"))\n img.save(location)\n\n\ndef load_imagenet():\n directory = \"/home/xinyan/program/data/youtube_frames_720p/*bmp\"\n # directory = \"data/tiny-imagenet-200/train/*/images/*.JPEG\"\n # directory = \"/research/jcheng2/xinyan/zzhang/AlexnetandVGG/ILSVRC2012/train/*/*.JPEG\"\n locations = glob(directory)[:100]\n print(locations)\n print(locations[:10])\n N = len(locations)\n first = load_image(locations[0])\n print(first.shape)\n vectors = np.empty(\n ([N] + list(first.shape)), dtype=np.float32\n )\n print(vectors.shape)\n for i, location in enumerate(locations):\n image = load_image(location)\n if image.shape == vectors[i, :].shape:\n vectors[i, :] = image.astype(np.float32)\n else:\n assert False\n return vectors\n\n\nif __name__ == \"__main__\":\n vectors = load_imagenet()\n for i in range(10):\n save_image(vectors[i, :], 'original_{}.png'.format(i))\n compressed_rq = quantize(vectors, m=1, depth=128)\n\n for i in range(10):\n save_image(compressed_rq[i, :], 'compressed_rq_{}.png'.format(i))\n\n compressed_pq = quantize(vectors, m=128, depth=1)\n for i in range(10):\n save_image(compressed_pq[i, :], 'compressed_pq_{}.png'.format(i))\n"
] |
[
[
"numpy.asarray",
"numpy.clip"
]
] |
lcampagn/vispy
|
[
"fa5e2eab9bb3d956f87ae68a56e342913e58a305"
] |
[
"vispy/visuals/rectangle.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copradiusight (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\n\"\"\"\nSimple ellipse visual based on PolygonVisual\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nfrom ..color import Color\nfrom .polygon import PolygonVisual\n\n\nclass RectangleVisual(PolygonVisual):\n \"\"\"\n Displays a 2D rectangle with optional rounded corners\n\n Parameters\n ----------\n pos : array\n Center of the rectangle\n color : instance of Color\n The fill color to use.\n border_color : instance of Color\n The border color to use.\n height : float\n Length of the rectangle along y-axis\n Defaults to 1.0\n width : float\n Length of the rectangle along x-axis\n Defaults to 1.0\n radius : float | array\n Radii of curvatures of corners in clockwise order from top-left\n Defaults to 0.\n \"\"\"\n def __init__(self, pos=None, color='black', border_color=None,\n height=1.0, width=1.0, radius=[0., 0., 0., 0.], **kwargs):\n super(RectangleVisual, self).__init__()\n self.mesh.mode = 'triangle_fan'\n self._vertices = None\n self._pos = pos\n self._color = Color(color)\n self._border_color = Color(border_color)\n self._height = height\n self._width = width\n self.radius = radius\n self._update()\n\n def _generate_vertices(self, pos, radius, height, width):\n\n half_height = self._height / 2.\n half_width = self._width / 2.\n hw = min(half_height, half_width)\n\n num_segments = (radius / hw * 500.).astype(int)\n\n bias1 = np.full(4, half_width) - radius\n bias2 = np.full(4, half_height) - radius\n\n corner1 = np.empty([num_segments[0]+1, 3], dtype=np.float32)\n corner2 = np.empty([num_segments[1]+1, 3], dtype=np.float32)\n corner3 = np.empty([num_segments[2]+1, 3], dtype=np.float32)\n corner4 = np.empty([num_segments[3]+1, 3], dtype=np.float32)\n\n start_angle = 0.\n end_angle = np.pi / 2.\n\n theta = np.linspace(end_angle, start_angle, num_segments[0]+1)\n\n corner1[:, 0] = pos[0] - bias1[0] - radius[0] * np.sin(theta)\n corner1[:, 1] = pos[1] - bias2[0] - radius[0] * np.cos(theta)\n corner1[:, 2] = 0\n\n theta = np.linspace(start_angle, end_angle, num_segments[1]+1)\n\n corner2[:, 0] = pos[0] + bias1[1] + radius[1] * np.sin(theta)\n corner2[:, 1] = pos[1] - bias2[1] - radius[1] * np.cos(theta)\n corner2[:, 2] = 0\n\n theta = np.linspace(end_angle, start_angle, num_segments[2]+1)\n\n corner3[:, 0] = pos[0] + bias1[2] + radius[2] * np.sin(theta)\n corner3[:, 1] = pos[1] + bias2[2] + radius[2] * np.cos(theta)\n corner3[:, 2] = 0\n\n theta = np.linspace(start_angle, end_angle, num_segments[3]+1)\n\n corner4[:, 0] = pos[0] - bias1[3] - radius[3] * np.sin(theta)\n corner4[:, 1] = pos[1] + bias2[3] + radius[3] * np.cos(theta)\n corner4[:, 2] = 0\n\n output = np.concatenate(([[pos[0], pos[1], 0.]],\n [[pos[0] - half_width, pos[1], 0.]],\n corner1,\n [[pos[0], pos[1] - half_height, 0.]],\n corner2,\n [[pos[0] + half_width, pos[1], 0.]],\n corner3,\n [[pos[0], pos[1] + half_height, 0.]],\n corner4,\n [[pos[0] - half_width, pos[1], 0.]]))\n\n self._vertices = np.array(output, dtype=np.float32)\n\n @property\n def height(self):\n \"\"\" The height of the rectangle.\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n if height <= 0.:\n raise ValueError('Height must be positive')\n self._height = height\n self._update()\n\n @property\n def width(self):\n \"\"\" The width of the rectangle.\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n if width <= 0.:\n raise ValueError('Width must be positive')\n self._width = width\n self._update()\n\n @property\n def radius(self):\n \"\"\" The radius of curvature of rounded corners.\n \"\"\"\n return self._radius\n\n @radius.setter\n def radius(self, radius):\n half_height = self._height / 2.\n half_width = self._width / 2.\n hw = min(half_height, half_width)\n\n if isinstance(radius, (list, tuple)):\n if len(radius) != 4:\n raise ValueError(\"radius must be float or 4 value tuple/list\"\n \" (got %s of length %d)\" % (type(radius),\n len(radius)))\n\n if (radius > np.full(4, hw)).all():\n raise ValueError('Radius of curvature cannot be greater than\\\n half of min(width, height)')\n radius = np.array(radius, dtype=np.float32)\n\n else:\n if radius > hw:\n raise ValueError('Radius of curvature cannot be greater than\\\n half of min(width, height)')\n radius = np.full(4, radius)\n\n self._radius = radius\n self._update()\n\n def _update(self):\n if self._pos is None:\n return\n self._generate_vertices(pos=self._pos, radius=self._radius,\n height=self._height, width=self._width)\n \n if not self._color.is_blank:\n self.mesh.set_data(vertices=self._vertices, \n color=self._color.rgba)\n if not self._border_color.is_blank:\n self.border.set_data(pos=self._vertices[1:, ..., :2],\n color=self._border_color.rgba)\n\n self.update()\n"
] |
[
[
"numpy.concatenate",
"numpy.full",
"numpy.array",
"numpy.sin",
"numpy.empty",
"numpy.cos",
"numpy.linspace"
]
] |
Vitalii36/full-cycle_ML_Solution
|
[
"8c449164fe6856ade851910642a149a6325337c0"
] |
[
"utils/trainer.py"
] |
[
"from sklearn.svm import SVC\n\nclass Estimator:\n @staticmethod\n def fit(train_x, train_y):\n return SVC(probability=True).fit(train_x, train_y)\n\n @staticmethod\n def predict(trained, test_x):\n return trained.predict(test_x)"
] |
[
[
"sklearn.svm.SVC"
]
] |
nuwanprabhath/kaggle-nba-career-prediction
|
[
"ff07bccd02faa7b0aa3a448884135f87927768d9"
] |
[
"src/data/imputer.py"
] |
[
"\ndef iterative_imputer(df, cols, operator, target_value ):\n \n# compare iterative imputation strategies for the horse colic dataset\n from numpy import mean\n from numpy import std\n from pandas import read_csv\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.experimental import enable_iterative_imputer\n from sklearn.impute import IterativeImputer\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import RepeatedStratifiedKFold\n from sklearn.pipeline import Pipeline\n from matplotlib import pyplot\n # load dataset\n url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/horse-colic.csv'\n dataframe = read_csv(url, header=None, na_values='?')\n # split into input and output elements\n data = dataframe.values\n ix = [i for i in range(data.shape[1]) if i != 23]\n X, y = data[:, ix], data[:, 23]\n # evaluate each strategy on the dataset\n results = list()\n strategies = ['ascending', 'descending', 'roman', 'arabic', 'random']\n for s in strategies:\n # create the modeling pipeline\n pipeline = Pipeline(steps=[('i', IterativeImputer(imputation_order=s)), ('m', RandomForestClassifier())])\n # evaluate the model\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n scores = cross_val_score(pipeline, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n # store results\n results.append(scores)\n print('>%s %.3f (%.3f)' % (s, mean(scores), std(scores)))\n # plot model performance for comparison\n pyplot.boxplot(results, labels=strategies, showmeans=True)\n pyplot.xticks(rotation=45)\n pyplot.show()"
] |
[
[
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.boxplot",
"sklearn.impute.IterativeImputer",
"numpy.mean",
"sklearn.model_selection.RepeatedStratifiedKFold",
"numpy.std",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.xticks"
]
] |
ghchen18/emnlp2021-sixt
|
[
"1081986bb1c867a64e84af186000008901294678"
] |
[
"fairseq/modules/transformer_layer.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Dict, List, Optional\nfrom numpy.random import uniform\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import utils\nfrom fairseq.modules import LayerNorm, MultiheadAttention\nfrom fairseq.modules.decoder_adapter import DecAdapter \nfrom fairseq.modules.quant_noise import quant_noise\nfrom fairseq.modules.fairseq_dropout import FairseqDropout\nfrom torch import Tensor\n\n\nclass TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, args):\n super().__init__()\n self.embed_dim = args.encoder_embed_dim\n self.quant_noise = getattr(args, \"quant_noise_pq\", 0)\n self.quant_noise_block_size = getattr(args, \"quant_noise_pq_block_size\", 8)\n self.self_attn = self.build_self_attention(self.embed_dim, args)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, \"activation_fn\", \"relu\")\n )\n activation_dropout_p = getattr(args, \"activation_dropout\", 0)\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use args.relu_dropout\n activation_dropout_p = getattr(args, \"relu_dropout\", 0)\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = self.build_fc1(\n self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n self.fc2 = self.build_fc2(\n args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n if args.xlmr_task in ['embnoft_ms']:\n self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)\n self.xlmr_attn = MultiheadAttention(\n self.embed_dim,\n args.encoder_attention_heads,\n kdim=getattr(args, \"encoder_embed_dim\", None),\n vdim=getattr(args, \"encoder_embed_dim\", None),\n dropout=args.attention_dropout,\n self_attention=False,\n encoder_decoder_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n else:\n self.xlmr_attn = None \n self.encoder_bert_dropout_ratio = 0\n \n if 'hfxlmr_2stage' in args.xlmr_task:\n self.decadapter = DecAdapter(self.embed_dim)\n else:\n self.decadapter = None \n\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)\n\n def get_ratio(self):\n frand = float(uniform(0, 1))\n if frand < self.encoder_bert_dropout_ratio and self.training:\n return [1, 0]\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\n return [0, 1]\n else:\n return [0.5, 0.5]\n\n\n def build_self_attention(self, embed_dim, args):\n return MultiheadAttention(\n embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, xlmr_state=None, use_decadapter=False):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, seq_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,\n where `tgt_len` is the length of output and `src_len` is the\n length of input, though here both are equal to `seq_len`.\n `attn_mask[tgt_i, src_j] = 1` means that when calculating the\n embedding for `tgt_i`, we exclude (mask out) `src_j`. This is\n useful for strided self-attention.\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, _ = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n attn_mask=attn_mask,\n )\n x = self.dropout_module(x)\n\n ## TODO: check the valid masks. attn_mask, key_padding_mask\n if self.xlmr_attn is not None and xlmr_state is not None: \n x_xlmr, _ = self.xlmr_attn( \n query = residual,\n key = xlmr_state,\n value = xlmr_state,\n key_padding_mask = encoder_padding_mask,\n attn_mask = attn_mask,\n )\n ratios = self.get_ratio()\n x_xlmr = self.dropout_module(x_xlmr)\n x = ratios[0] * x + ratios[1] * x_xlmr \n \n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n \n if use_decadapter and self.decadapter is not None:\n x = self.decadapter(x, residual)\n\n return x\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, no_self_attn=False,\n ):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)\n self.quant_noise = getattr(args, \"quant_noise_pq\", 0)\n self.quant_noise_block_size = getattr(args, \"quant_noise_pq_block_size\", 8)\n\n self.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, \"char_inputs\", False)\n\n if not no_self_attn:\n self.self_attn = self.build_self_attention(\n self.embed_dim,\n args,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n else:\n self.self_attn = None \n self.self_attn_layer_norm = None \n\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, \"activation_fn\", \"relu\")\n )\n activation_dropout_p = getattr(args, \"activation_dropout\", 0)\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use args.relu_dropout\n activation_dropout_p = getattr(args, \"relu_dropout\", 0)\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__)\n self.normalize_before = args.decoder_normalize_before\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n self.fc1 = self.build_fc1(\n self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n self.fc2 = self.build_fc2(\n args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n if args.xlmr_task in ['embnoft_ms']:\n self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)\n self.xlmr_attn = MultiheadAttention(\n self.embed_dim,\n args.decoder_attention_heads,\n kdim=getattr(args, \"encoder_embed_dim\", None),\n vdim=getattr(args, \"encoder_embed_dim\", None),\n dropout=args.attention_dropout,\n self_attention=False,\n encoder_decoder_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n else:\n self.xlmr_attn = None\n self.encoder_bert_dropout_ratio = 0\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):\n return MultiheadAttention(\n embed_dim,\n args.decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=not getattr(args, \"cross_self_attention\", False),\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def build_encoder_attention(self, embed_dim, args):\n cross_attn_dropout = getattr(args, \"cross_attention_dropout\", 0.0)\n attn_dropout = getattr(args, \"attention_dropout\", 0.0)\n final_attn_dropout = max(cross_attn_dropout,attn_dropout)\n\n return MultiheadAttention(\n embed_dim,\n args.decoder_attention_heads,\n kdim=getattr(args, \"encoder_embed_dim\", None),\n vdim=getattr(args, \"encoder_embed_dim\", None),\n dropout=final_attn_dropout, ##args.cross_attention_dropout,\n encoder_decoder_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def get_ratio(self):\n frand = float(uniform(0, 1))\n if frand < self.encoder_bert_dropout_ratio and self.training:\n return [1, 0]\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\n return [0, 1]\n else:\n return [0.5, 0.5]\n\n def forward(\n self,\n x,\n encoder_out: Optional[torch.Tensor] = None,\n encoder_padding_mask: Optional[torch.Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n prev_self_attn_state: Optional[List[torch.Tensor]] = None,\n prev_attn_state: Optional[List[torch.Tensor]] = None,\n self_attn_mask: Optional[torch.Tensor] = None,\n self_attn_padding_mask: Optional[torch.Tensor] = None,\n need_attn: bool = False,\n need_head_weights: bool = False,\n xlmr_state = None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n if need_head_weights:\n need_attn = True\n\n residual = x\n if self.self_attn is not None:\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n if prev_self_attn_state is not None:\n prev_key, prev_value = prev_self_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_self_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_self_attn_state[2]\n assert incremental_state is not None\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)\n if self.cross_self_attention and not (\n incremental_state is not None\n and _self_attn_input_buffer is not None\n and \"prev_key\" in _self_attn_input_buffer\n ):\n if self_attn_mask is not None:\n assert encoder_out is not None\n self_attn_mask = torch.cat(\n (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1\n )\n if self_attn_padding_mask is not None:\n if encoder_padding_mask is None:\n assert encoder_out is not None\n encoder_padding_mask = self_attn_padding_mask.new_zeros(\n encoder_out.size(1), encoder_out.size(0)\n )\n self_attn_padding_mask = torch.cat(\n (encoder_padding_mask, self_attn_padding_mask), dim=1\n )\n assert encoder_out is not None\n y = torch.cat((encoder_out, x), dim=0)\n else:\n y = x\n\n x, attn = self.self_attn(\n query=x,\n key=y,\n value=y,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = self.dropout_module(x)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n if self.encoder_attn is not None:\n residual = x\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n if prev_attn_state is not None:\n prev_key, prev_value = prev_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_attn_state[2]\n assert incremental_state is not None\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n )\n x = self.dropout_module(x)\n\n if self.xlmr_attn is not None and xlmr_state is not None: ## TODO: check the valid masks.\n ratios = self.get_ratio()\n x_xlmr, _ = self.xlmr_attn( \n query = residual,\n key = xlmr_state,\n value = xlmr_state,\n key_padding_mask = encoder_padding_mask,\n incremental_state = incremental_state,\n static_kv = True,\n need_weights = False,\n need_head_weights = False,\n )\n x_xlmr = self.dropout_module(x_xlmr)\n x = ratios[0] * x + ratios[1] * x_xlmr \n \n x = residual + x\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if self.onnx_trace and incremental_state is not None and self.self_attn is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n assert saved_state is not None\n if self_attn_padding_mask is not None:\n self_attn_state = [\n saved_state[\"prev_key\"],\n saved_state[\"prev_value\"],\n saved_state[\"prev_key_padding_mask\"],\n ]\n else:\n self_attn_state = [saved_state[\"prev_key\"], saved_state[\"prev_value\"]]\n return x, attn, self_attn_state\n return x, attn, None\n\n def make_generation_fast_(self, need_attn: bool = False, **kwargs):\n self.need_attn = need_attn\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.0)\n return m\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"numpy.random.uniform"
]
] |
BeHappyForMe/retrieval-faq
|
[
"bdc060b5005fc26bc5d27e1577bfc5a17710804d"
] |
[
"faq/round_trip_translation.py"
] |
[
"import pandas as pd\nimport json\nimport requests\nimport time\n\npd_all = pd.read_csv('../data/baoxian_right.csv', sep='\\t')\nbest_title = pd_all['best_title'].tolist()\n\n\ndef translate(word, ip):\n # 有道词典 api\n url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null'\n # 传输的参数,其中 i 为需要翻译的内容\n key = {\n 'type': \"AUTO\",\n 'i': word,\n \"doctype\": \"json\",\n \"version\": \"2.1\",\n \"keyfrom\": \"fanyi.web\",\n \"ue\": \"UTF-8\",\n \"action\": \"FY_BY_CLICKBUTTON\",\n \"typoResult\": \"true\"\n }\n\n # 代理\n proxies = {'http': ip, 'https': ip}\n # key 这个字典为发送给有道词典服务器的内容\n response = requests.post(url, data=key, proxies=proxies)\n # 判断服务器是否相应成功\n if response.status_code == 200:\n # 然后相应的结果\n return response.text\n else:\n print(\"有道词典调用失败\")\n # 相应失败就返回空\n return None\n\n\ndef get_ip(url):\n ip_text = requests.get(url)\n ip_text = ip_text.text\n while ip_text.find('提取太频繁') != -1:\n time.sleep(5)\n print('提取太频繁')\n ip_text = requests.get(url)\n ip_text = ip_text.text\n return ip_text.strip()\n\n\nnum_thread_all = 15\nnum_thread_ing = 0\nip = None\nflag = True # 表示线程可前进\nsynonymous = []\nsynonymous.append(\"best_title\" + '\\t' + \"translated\" + '\\n')\nfrom threading import Lock\nimport threading\n\nlock = Lock()\n\n\ndef get_synonymous_thread(line, index):\n global num_thread_ing\n global ip\n try:\n list_trans = translate(line, ip)\n # print('test')\n if list_trans == None:\n if index == 0:\n ip = get_ip(\n 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=d4980dea2ab74a35907e9534fc146246&orderno=YZ2019840424LhDCX9&returnType=1&count=1')\n\n elif list_trans.find('来自您ip的请求异常频繁') == -1:\n result = json.loads(list_trans)\n en_result = result['translateResult'][0][0]['tgt']\n list_trans = translate(en_result, ip)\n else:\n flag = False\n\n if list_trans == None:\n if index == 0:\n ip = get_ip(\n 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=d4980dea2ab74a35907e9534fc146246&orderno=YZ2019840424LhDCX9&returnType=1&count=1')\n\n\n elif list_trans.find('来自您ip的请求异常频繁') == -1:\n result = json.loads(list_trans)\n cn_result = result['translateResult'][0][0]['tgt']\n\n print(line + '\\t' + cn_result)\n # lock.acquire()\n synonymous.append(line + '\\t' + cn_result + '\\n')\n # lock.release()\n else:\n flag = False\n except Exception:\n pass\n num_thread_ing -= 1\n\n\nip = get_ip(\n 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=d4980dea2ab74a35907e9534fc146246&orderno=YZ2019840424LhDCX9&returnType=1&count=1')\n\nfor idx, line in enumerate(best_title):\n while True:\n if num_thread_ing < num_thread_all:\n\n num_thread_ing += 1\n threading.Thread(target=get_synonymous_thread, args=(line, num_thread_ing)).start()\n\n idx = idx + 1\n if idx % 500 == 0:\n print(idx)\n ip = get_ip(\n 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=d4980dea2ab74a35907e9534fc146246&orderno=YZ2019840424LhDCX9&returnType=1&count=1')\n\n break\n else:\n time.sleep(1)\n\nwith open('../data/baoxian_synonymous.csv', 'w', encoding='utf-8') as file:\n file.writelines(synonymous)\n\ntranslated = pd.read_csv('../data/baoxian_synonymous.csv', sep='\\t', header=0)\nmerged = pd_all.merge(translated, left_on='best_title', right_on='best_title')\nmerged[['best_title', 'translated', 'reply', 'is_best']].drop_duplicates(inplace=True)\nmerged[['best_title', 'translated', 'reply', 'is_best']].to_csv('../data/baoxian_preprocessed_synonymous.csv',\n index=False, sep='\\t')"
] |
[
[
"pandas.read_csv"
]
] |
jeffin07/theseus
|
[
"3498bbddf9cca740c2703d0c1aa3a78a7264cb15"
] |
[
"theseus/geometry/lie_group.py"
] |
[
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nfrom typing import Any, List, Optional, Tuple, cast\n\nimport torch\n\nfrom theseus.geometry.manifold import Manifold\n\n\n# Abstract class to represent Lie groups.\n# Concrete classes must implement the following methods:\n# - `exp_map`\n# - `_log_map`\n# - `_adjoint`\n# _ `_compose`\n# _ `_inverse`\n#\n# Constructor can optionally provide an initial data value as a keyword argument.\nclass LieGroup(Manifold):\n def __init__(\n self,\n *args: Any,\n data: Optional[torch.Tensor] = None,\n name: Optional[str] = None,\n dtype: torch.dtype = torch.float,\n ):\n super().__init__(*args, data=data, name=name, dtype=dtype)\n\n @staticmethod\n def _check_jacobians_list(jacobians: List[torch.Tensor]):\n if len(jacobians) != 0:\n raise ValueError(\"jacobians list to be populated must be empty.\")\n\n @staticmethod\n @abc.abstractmethod\n def _init_data(*args: Any) -> torch.Tensor:\n pass\n\n @abc.abstractmethod\n def dof(self) -> int:\n pass\n\n @staticmethod\n @abc.abstractmethod\n def rand(\n *size: int,\n generator: Optional[torch.Generator] = None,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = False,\n ) -> \"LieGroup\":\n pass\n\n @staticmethod\n @abc.abstractmethod\n def randn(\n *size: int,\n generator: Optional[torch.Generator] = None,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = False,\n ) -> \"LieGroup\":\n pass\n\n def __str__(self) -> str:\n return repr(self)\n\n @staticmethod\n @abc.abstractmethod\n def exp_map(\n tangent_vector: torch.Tensor, jacobians: Optional[List[torch.Tensor]] = None\n ) -> \"LieGroup\":\n pass\n\n @abc.abstractmethod\n def _log_map_impl(\n self, jacobians: Optional[List[torch.Tensor]] = None\n ) -> torch.Tensor:\n pass\n\n @abc.abstractmethod\n def to_matrix(self) -> torch.Tensor:\n pass\n\n def log_map(self, jacobians: Optional[List[torch.Tensor]] = None) -> torch.Tensor:\n return self._log_map_impl(jacobians)\n\n @abc.abstractmethod\n def _adjoint_impl(self) -> torch.Tensor:\n pass\n\n def adjoint(self) -> torch.Tensor:\n return self._adjoint_impl()\n\n def _project_check(self, euclidean_grad: torch.Tensor, is_sparse: bool = False):\n if euclidean_grad.dtype != self.dtype:\n raise ValueError(\n \"Euclidean gradients must be of the same type as the Lie group.\"\n )\n\n if euclidean_grad.device != self.device:\n raise ValueError(\n \"Euclidean gradients must be on the same device as the Lie group.\"\n )\n\n if euclidean_grad.shape[-self.ndim + is_sparse :] != self.shape[is_sparse:]:\n raise ValueError(\n \"Euclidean gradients must have consistent shapes with the Lie group.\"\n )\n\n def between(\n self, variable2: \"LieGroup\", jacobians: Optional[List[torch.Tensor]] = None\n ) -> \"LieGroup\":\n v1_inverse = self._inverse_impl()\n between = v1_inverse._compose_impl(variable2)\n if jacobians is not None:\n LieGroup._check_jacobians_list(jacobians)\n Jinv = LieGroup._inverse_jacobian(self)\n Jcmp0, Jcmp1 = v1_inverse._compose_jacobian(variable2)\n Jbetween0 = torch.matmul(Jcmp0, Jinv)\n jacobians.extend([Jbetween0, Jcmp1])\n return between\n\n @abc.abstractmethod\n def _compose_impl(self, variable2: \"LieGroup\") -> \"LieGroup\":\n pass\n\n def compose(\n self, variable2: \"LieGroup\", jacobians: Optional[List[torch.Tensor]] = None\n ) -> \"LieGroup\":\n composition = self._compose_impl(variable2)\n if jacobians is not None:\n LieGroup._check_jacobians_list(jacobians)\n jacobians.extend(self._compose_jacobian(variable2))\n return composition\n\n @abc.abstractmethod\n def _inverse_impl(self) -> \"LieGroup\":\n pass\n\n def inverse(self, jacobian: Optional[List[torch.Tensor]] = None) -> \"LieGroup\":\n the_inverse = self._inverse_impl()\n if jacobian is not None:\n LieGroup._check_jacobians_list(jacobian)\n jacobian.append(self._inverse_jacobian(self))\n return the_inverse\n\n def _compose_jacobian(\n self, group2: \"LieGroup\"\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if not type(self) is type(group2):\n raise ValueError(\"Lie groups for compose must be of the same type.\")\n g2_inverse = group2._inverse_impl()\n jac1 = g2_inverse.adjoint()\n jac2 = (\n torch.eye(group2.dof(), dtype=self.dtype)\n .repeat(group2.shape[0], 1, 1)\n .to(group2.device)\n )\n return jac1, jac2\n\n @staticmethod\n def _inverse_jacobian(group: \"LieGroup\") -> torch.Tensor:\n return -group.adjoint()\n\n def _local_impl(\n self, variable2: Manifold, jacobians: List[torch.Tensor] = None\n ) -> torch.Tensor:\n variable2 = cast(LieGroup, variable2)\n diff = self.between(variable2)\n\n if jacobians is not None:\n LieGroup._check_jacobians_list(jacobians)\n dlog: List[torch.Tensor] = []\n ret = diff.log_map(dlog)\n jacobians.append(-diff.inverse().adjoint() @ dlog[0])\n jacobians.append(dlog[0])\n else:\n ret = diff.log_map()\n\n return ret\n\n def _retract_impl(self, delta: torch.Tensor) -> \"LieGroup\":\n return self.compose(self.exp_map(delta))\n\n # added to avoid casting downstream\n def copy(self, new_name: Optional[str] = None) -> \"LieGroup\":\n return cast(LieGroup, super().copy(new_name=new_name))\n\n\n# Alias for LieGroup.adjoint()\ndef adjoint(variable: LieGroup) -> torch.Tensor:\n return variable.adjoint()\n\n\ndef between(\n variable1: LieGroup,\n variable2: LieGroup,\n jacobians: Optional[List[torch.Tensor]] = None,\n) -> LieGroup:\n return variable1.between(variable2, jacobians=jacobians)\n\n\n# Alias for LieGroup.compose()\ndef compose(\n variable1: LieGroup,\n variable2: LieGroup,\n jacobians: Optional[List[torch.Tensor]] = None,\n) -> LieGroup:\n return variable1.compose(variable2, jacobians=jacobians)\n\n\n# Alias for LieGroup.inverse()\ndef inverse(\n variable1: LieGroup, jacobian: Optional[List[torch.Tensor]] = None\n) -> LieGroup:\n return variable1.inverse(jacobian=jacobian)\n\n\n# Alias for LieGroup.log_map()\ndef log_map(\n variable: LieGroup, jacobians: Optional[List[torch.Tensor]] = None\n) -> torch.Tensor:\n return variable.log_map(jacobians=jacobians)\n\n\n# Alias for LieGroup.exp_map()\ndef exp_map(\n variable: LieGroup,\n tangent_vector: torch.Tensor,\n jacobians: Optional[List[torch.Tensor]] = None,\n) -> LieGroup:\n return variable.__class__.exp_map(tangent_vector, jacobians=jacobians)\n"
] |
[
[
"torch.matmul"
]
] |
oxygenxo/openvino
|
[
"1c3848a96fdd325b044babe6d5cd26db341cf85b"
] |
[
"ngraph/python/tests/test_ngraph/test_ops_fused.py"
] |
[
"# ******************************************************************************\n# Copyright 2017-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\nimport numpy as np\nimport pytest\n\nimport ngraph as ng\nfrom tests.runtime import get_runtime\nfrom tests import (xfail_issue_40957,\n xfail_issue_34327,\n xfail_issue_36485,\n xfail_issue_36486,\n xfail_issue_36487,\n xfail_issue_44976)\n\n\n@xfail_issue_40957\ndef test_elu_operator_with_scalar_and_array():\n runtime = get_runtime()\n\n data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)\n alpha_value = np.float32(3)\n\n model = ng.elu(data_value, alpha_value)\n computation = runtime.computation(model)\n\n result = computation()\n expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)\n assert np.allclose(result, expected)\n\n\ndef test_elu_operator_with_scalar():\n runtime = get_runtime()\n\n data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)\n alpha_value = np.float32(3)\n\n data_shape = [2, 2]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.elu(parameter_data, alpha_value)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)\n assert np.allclose(result, expected)\n\n\n@xfail_issue_44976\ndef test_fake_quantize():\n runtime = get_runtime()\n\n data_value = np.arange(24.0, dtype=np.float32).reshape(1, 2, 3, 4)\n input_low_value = np.float32(0)\n input_high_value = np.float32(23)\n output_low_value = np.float32(2)\n output_high_value = np.float32(16)\n levels = np.float32(4)\n\n data_shape = [1, 2, 3, 4]\n bound_shape = []\n parameter_data = ng.parameter(data_shape, name=\"data\", dtype=np.float32)\n parameter_input_low = ng.parameter(bound_shape, name=\"input_low\", dtype=np.float32)\n parameter_input_high = ng.parameter(bound_shape, name=\"input_high\", dtype=np.float32)\n parameter_output_low = ng.parameter(bound_shape, name=\"output_low\", dtype=np.float32)\n parameter_output_high = ng.parameter(bound_shape, name=\"output_high\", dtype=np.float32)\n\n model = ng.fake_quantize(\n parameter_data,\n parameter_input_low,\n parameter_input_high,\n parameter_output_low,\n parameter_output_high,\n levels,\n )\n computation = runtime.computation(\n model,\n parameter_data,\n parameter_input_low,\n parameter_input_high,\n parameter_output_low,\n parameter_output_high,\n )\n\n result = computation(data_value, input_low_value, input_high_value, output_low_value, output_high_value)\n\n expected = np.array(\n [\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [6.6666669, 6.6666669, 6.6666669, 6.6666669],\n [6.6666669, 6.6666669, 6.6666669, 6.6666669],\n ],\n [\n [11.33333301, 11.33333301, 11.33333301, 11.33333301],\n [11.33333301, 11.33333301, 11.33333301, 11.33333301],\n [16.0, 16.0, 16.0, 16.0],\n ],\n ]\n ]\n ],\n dtype=np.float32,\n )\n assert np.allclose(result, expected)\n\n\ndef test_depth_to_space():\n runtime = get_runtime()\n\n data_value = np.array(\n [\n [\n [[0, 1, 2], [3, 4, 5]],\n [[6, 7, 8], [9, 10, 11]],\n [[12, 13, 14], [15, 16, 17]],\n [[18, 19, 20], [21, 22, 23]],\n ]\n ],\n dtype=np.float32,\n )\n mode = \"blocks_first\"\n block_size = np.float32(2)\n\n data_shape = [1, 4, 2, 3]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.depth_to_space(parameter_data, mode, block_size)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array(\n [[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]],\n dtype=np.float32,\n )\n assert np.allclose(result, expected)\n\n\n@xfail_issue_34327\ndef test_space_to_batch():\n runtime = get_runtime()\n\n data_value = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)\n data_shape = data_value.shape\n\n block_shape = np.array([1, 2, 3, 2], dtype=np.int64)\n pads_begin = np.array([0, 0, 1, 0], dtype=np.int64)\n pads_end = np.array([0, 0, 0, 1], dtype=np.int64)\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.space_to_batch(parameter_data, block_shape, pads_begin, pads_end)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array(\n [\n [[[0, 0]]],\n [[[0, 0]]],\n [[[0, 2]]],\n [[[1, 0]]],\n [[[3, 5]]],\n [[[4, 0]]],\n [[[0, 0]]],\n [[[0, 0]]],\n [[[6, 8]]],\n [[[7, 0]]],\n [[[9, 11]]],\n [[[10, 0]]],\n ],\n dtype=np.float32,\n )\n assert np.allclose(result, expected)\n\n\n@xfail_issue_34327\ndef test_batch_to_space():\n runtime = get_runtime()\n\n data = np.array(\n [\n [[[0, 0]]],\n [[[0, 0]]],\n [[[0, 2]]],\n [[[1, 0]]],\n [[[3, 5]]],\n [[[4, 0]]],\n [[[0, 0]]],\n [[[0, 0]]],\n [[[6, 8]]],\n [[[7, 0]]],\n [[[9, 11]]],\n [[[10, 0]]],\n ],\n dtype=np.float32,\n )\n data_shape = data.shape\n\n block_shape = np.array([1, 2, 3, 2], dtype=np.int64)\n crops_begin = np.array([0, 0, 1, 0], dtype=np.int64)\n crops_end = np.array([0, 0, 0, 1], dtype=np.int64)\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.batch_to_space(parameter_data, block_shape, crops_begin, crops_end)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data)\n expected = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)\n\n assert np.allclose(result, expected)\n\n\ndef test_gelu_operator_with_parameters():\n runtime = get_runtime()\n\n data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)\n\n data_shape = [2, 2]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.gelu(parameter_data)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array([[-1.4901161e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32)\n assert np.allclose(result, expected, 0.007, 0.007)\n\n\n@xfail_issue_40957\ndef test_gelu_operator_with_array():\n runtime = get_runtime()\n\n data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)\n\n model = ng.gelu(data_value)\n computation = runtime.computation(model)\n\n result = computation()\n expected = np.array([[-1.4901161e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32)\n\n assert np.allclose(result, expected, 0.007, 0.007)\n\n\ndef test_clamp_operator():\n runtime = get_runtime()\n\n data_shape = [2, 2]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n min_value = np.float32(3)\n max_value = np.float32(12)\n\n model = ng.clamp(parameter_data, min_value, max_value)\n computation = runtime.computation(model, parameter_data)\n\n data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)\n\n result = computation(data_value)\n expected = np.clip(data_value, min_value, max_value)\n assert np.allclose(result, expected)\n\n\n@xfail_issue_40957\ndef test_clamp_operator_with_array():\n runtime = get_runtime()\n\n data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)\n min_value = np.float32(3)\n max_value = np.float32(12)\n\n model = ng.clamp(data_value, min_value, max_value)\n computation = runtime.computation(model)\n\n result = computation()\n expected = np.clip(data_value, min_value, max_value)\n\n assert np.allclose(result, expected)\n\n\ndef test_squeeze_operator():\n runtime = get_runtime()\n\n data_shape = [1, 2, 1, 3, 1, 1]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n data_value = np.arange(6.0, dtype=np.float32).reshape([1, 2, 1, 3, 1, 1])\n axes = [2, 4]\n model = ng.squeeze(parameter_data, axes)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.arange(6.0, dtype=np.float32).reshape([1, 2, 3, 1])\n assert np.allclose(result, expected)\n\n\ndef test_squared_difference_operator():\n runtime = get_runtime()\n\n x1_shape = [1, 2, 3, 4]\n x2_shape = [2, 3, 4]\n\n parameter_x1 = ng.parameter(x1_shape, name=\"x1\", dtype=np.float32)\n parameter_x2 = ng.parameter(x2_shape, name=\"x2\", dtype=np.float32)\n\n x1_value = np.arange(24.0, dtype=np.float32).reshape(x1_shape)\n x2_value = np.arange(start=4.0, stop=28.0, step=1.0, dtype=np.float32).reshape(x2_shape)\n\n model = ng.squared_difference(parameter_x1, parameter_x2)\n computation = runtime.computation(model, parameter_x1, parameter_x2)\n\n result = computation(x1_value, x2_value)\n expected = np.square(np.subtract(x1_value, x2_value))\n assert np.allclose(result, expected)\n\n\n@xfail_issue_36485\ndef test_shuffle_channels_operator():\n runtime = get_runtime()\n\n data_shape = [1, 15, 2, 2]\n axis = 1\n groups = 5\n\n parameter = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n data_value = np.arange(60.0, dtype=np.float32).reshape(data_shape)\n\n model = ng.shuffle_channels(parameter, axis, groups)\n computation = runtime.computation(model, parameter)\n\n result = computation(data_value)\n expected = np.array(\n [\n [\n [[0.0, 1.0], [2.0, 3.0]],\n [[12.0, 13.0], [14.0, 15.0]],\n [[24.0, 25.0], [26.0, 27.0]],\n [[36.0, 37.0], [38.0, 39.0]],\n [[48.0, 49.0], [50.0, 51.0]],\n [[4.0, 5.0], [6.0, 7.0]],\n [[16.0, 17.0], [18.0, 19.0]],\n [[28.0, 29.0], [30.0, 31.0]],\n [[40.0, 41.0], [42.0, 43.0]],\n [[52.0, 53.0], [54.0, 55.0]],\n [[8.0, 9.0], [10.0, 11.0]],\n [[20.0, 21.0], [22.0, 23.0]],\n [[32.0, 33.0], [34.0, 35.0]],\n [[44.0, 45.0], [46.0, 47.0]],\n [[56.0, 57.0], [58.0, 59.0]],\n ]\n ],\n dtype=np.float32,\n )\n assert np.allclose(result, expected)\n\n\ndef test_unsqueeze():\n runtime = get_runtime()\n\n data_shape = [3, 4, 5]\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n data_value = np.arange(60.0, dtype=np.float32).reshape(3, 4, 5)\n axes = [0, 4]\n model = ng.unsqueeze(parameter_data, axes)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.arange(60.0, dtype=np.float32).reshape([1, 3, 4, 5, 1])\n assert np.allclose(result, expected)\n\n\ndef test_grn_operator():\n runtime = get_runtime()\n\n data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape([1, 2, 3, 4])\n bias = np.float32(1e-6)\n\n data_shape = [1, 2, 3, 4]\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.grn(parameter_data, bias)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array(\n [\n [\n [\n [0.0766965, 0.14142136, 0.19611613, 0.24253564],\n [0.28216633, 0.31622776, 0.34570536, 0.37139067],\n [0.39391932, 0.41380295, 0.4314555, 0.4472136],\n ],\n [\n [0.9970545, 0.98994946, 0.9805807, 0.97014254],\n [0.9593655, 0.9486833, 0.9383431, 0.9284767],\n [0.91914505, 0.9103665, 0.9021342, 0.8944272],\n ],\n ]\n ],\n dtype=np.float32,\n )\n\n assert np.allclose(result, expected)\n\n\ndef test_prelu_operator():\n runtime = get_runtime()\n\n data_shape = [1, 2, 3, 4]\n slope_shape = [2, 3, 1]\n\n data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape)\n slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape)\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n parameter_slope = ng.parameter(slope_shape, name=\"Slope\", dtype=np.float32)\n\n model = ng.prelu(parameter_data, parameter_slope)\n computation = runtime.computation(model, parameter_data, parameter_slope)\n\n result = computation(data_value, slope_value)\n expected = np.clip(data_value, 0, np.inf) + np.clip(data_value, -np.inf, 0) * slope_value\n assert np.allclose(result, expected)\n\n\ndef test_selu_operator():\n runtime = get_runtime()\n\n data_shape = [4, 2, 3, 1]\n\n data = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape)\n alpha = np.array(1.6733, dtype=np.float32)\n lambda_value = np.array(1.0507, dtype=np.float32)\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n model = ng.selu(parameter_data, alpha, lambda_value)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data)\n expected = lambda_value * ((data > 0) * data + (data <= 0) * (alpha * np.exp(data) - alpha))\n assert np.allclose(result, expected)\n\n\n@xfail_issue_36486\ndef test_hard_sigmoid_operator():\n runtime = get_runtime()\n\n data_shape = [3]\n alpha_value = np.float32(0.5)\n beta_value = np.float32(0.6)\n\n data_value = np.array([-1, 0, 1], dtype=np.float32)\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n parameter_alpha = ng.parameter([], name=\"Alpha\", dtype=np.float32)\n parameter_beta = ng.parameter([], name=\"Beta\", dtype=np.float32)\n\n model = ng.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta)\n computation = runtime.computation(model, parameter_data, parameter_alpha, parameter_beta)\n\n result = computation(data_value, alpha_value, beta_value)\n expected = [0.1, 0.6, 1.0]\n assert np.allclose(result, expected)\n\n\n@xfail_issue_36487\ndef test_mvn_operator():\n runtime = get_runtime()\n\n data_shape = [3, 3, 3, 1]\n across_channels = True\n normalize_variance = True\n eps = np.float32(1e-9)\n\n data_value = np.array(\n [\n [\n [[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]],\n ],\n [\n [[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]],\n ],\n [\n [[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]],\n ],\n ],\n dtype=np.float32,\n )\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.mvn(parameter_data, across_channels, normalize_variance, eps)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n\n expected = np.array(\n [\n [\n [[0.9951074], [0.14548765], [-1.410561]],\n [[-1.4999886], [-1.1923014], [-0.03975919]],\n [[0.8463296], [1.2926502], [1.3340596]],\n ],\n [\n [[-1.0463363], [-0.1747985], [-0.7784088]],\n [[0.47672555], [-1.5383], [0.32375798]],\n [[1.2404392], [1.3878832], [-1.2228798]],\n ],\n [\n [[-0.3228847], [1.2063044], [0.22751297]],\n [[0.91956615], [0.81839436], [-1.2279599]],\n [[0.5312334], [0.067952], [-1.3592235]],\n ],\n ],\n )\n\n assert np.allclose(result, expected)\n\n\ndef test_space_to_depth_operator():\n runtime = get_runtime()\n\n data_shape = [1, 2, 4, 4]\n data_value = np.arange(start=0, stop=32, step=1.0, dtype=np.float32).reshape(data_shape)\n mode = \"blocks_first\"\n block_size = 2\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n\n model = ng.space_to_depth(parameter_data, mode, block_size)\n computation = runtime.computation(model, parameter_data)\n\n result = computation(data_value)\n expected = np.array(\n [\n 0,\n 2,\n 8,\n 10,\n 16,\n 18,\n 24,\n 26,\n 1,\n 3,\n 9,\n 11,\n 17,\n 19,\n 25,\n 27,\n 4,\n 6,\n 12,\n 14,\n 20,\n 22,\n 28,\n 30,\n 5,\n 7,\n 13,\n 15,\n 21,\n 23,\n 29,\n 31,\n ],\n dtype=np.float32,\n ).reshape(1, 8, 2, 2)\n assert np.allclose(result, expected)\n\n batch_size = 2\n input_size = 3\n hidden_size = 3\n\n X_shape = [batch_size, input_size]\n H_t_shape = [batch_size, hidden_size]\n W_shape = [hidden_size, input_size]\n R_shape = [hidden_size, hidden_size]\n B_shape = [hidden_size]\n\n parameter_X = ng.parameter(X_shape, name=\"X\", dtype=np.float32)\n parameter_H_t = ng.parameter(H_t_shape, name=\"H_t\", dtype=np.float32)\n parameter_W = ng.parameter(W_shape, name=\"W\", dtype=np.float32)\n parameter_R = ng.parameter(R_shape, name=\"R\", dtype=np.float32)\n parameter_B = ng.parameter(B_shape, name=\"B\", dtype=np.float32)\n\n X_value = np.array(\n [0.3432185, 0.612268, 0.20272376, 0.9513413, 0.30585995, 0.7265472], dtype=np.float32\n ).reshape(X_shape)\n H_t_value = np.array(\n [0.12444675, 0.52055854, 0.46489045, 0.4983964, 0.7730452, 0.28439692], dtype=np.float32\n ).reshape(H_t_shape)\n W_value = np.array(\n [\n 0.41930267,\n 0.7872176,\n 0.89940447,\n 0.23659843,\n 0.24676207,\n 0.17101714,\n 0.3147149,\n 0.6555601,\n 0.4559603,\n ],\n dtype=np.float32,\n ).reshape(W_shape)\n R_value = np.array(\n [\n 0.8374871,\n 0.86660194,\n 0.82114047,\n 0.71549815,\n 0.18775631,\n 0.3182116,\n 0.25392973,\n 0.38301638,\n 0.85531586,\n ],\n dtype=np.float32,\n ).reshape(R_shape)\n B_value = np.array([1.0289404, 1.6362579, 0.4370661], dtype=np.float32).reshape(B_shape)\n activations = [\"sigmoid\"]\n activation_alpha = []\n activation_beta = []\n clip = 2.88\n\n model = ng.rnn_cell(\n parameter_X,\n parameter_H_t,\n parameter_W,\n parameter_R,\n parameter_B,\n hidden_size,\n activations,\n activation_alpha,\n activation_beta,\n clip,\n )\n computation = runtime.computation(\n model, parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B\n )\n\n result = computation(X_value, H_t_value, W_value, R_value, B_value)\n expected = np.array(\n [0.94126844, 0.9036043, 0.841243, 0.9468489, 0.934215, 0.873708], dtype=np.float32\n ).reshape(batch_size, hidden_size)\n\n assert np.allclose(result, expected)\n\n\ndef test_group_convolution_operator():\n runtime = get_runtime()\n\n data_shape = [1, 4, 2, 2]\n filters_shape = [2, 1, 2, 1, 1]\n\n parameter_data = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n parameter_filters = ng.parameter(filters_shape, name=\"Filters\", dtype=np.float32)\n\n data_value = np.arange(start=1.0, stop=17.0, dtype=np.float32).reshape(data_shape)\n filters_value = np.arange(start=1.0, stop=5.0, dtype=np.float32).reshape(filters_shape)\n strides = [1, 1]\n dilations = [1, 1]\n pads_begin = [0, 0]\n pads_end = [0, 0]\n\n model = ng.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations)\n computation = runtime.computation(model, parameter_data, parameter_filters)\n result = computation(data_value, filters_value)\n\n expected = np.array([11, 14, 17, 20, 79, 86, 93, 100], dtype=np.float32).reshape(1, 2, 2, 2)\n\n assert np.allclose(result, expected)\n\n\n@pytest.mark.xfail(reason=\"Computation mismatch\")\ndef test_group_convolution_backprop_data():\n runtime = get_runtime()\n\n data_shape = [1, 1, 3, 3]\n filters_shape = [1, 1, 1, 3, 3]\n strides = [2, 2]\n output_padding = [1, 1]\n pads_begin = [1, 1]\n pads_end = [1, 1]\n\n data_node = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n filters_node = ng.parameter(filters_shape, name=\"Filters\", dtype=np.float32)\n model = ng.group_convolution_backprop_data(\n data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding\n )\n\n data_value = np.array(\n [\n 0.16857791,\n -0.15161794,\n 0.08540368,\n 0.1820628,\n -0.21746576,\n 0.08245695,\n 0.1431433,\n -0.43156421,\n 0.30591947,\n ],\n dtype=np.float32,\n ).reshape(data_shape)\n\n filters_value = np.array(\n [\n -0.06230065,\n 0.37932432,\n -0.25388849,\n 0.33878803,\n 0.43709868,\n -0.22477469,\n 0.04118127,\n -0.44696793,\n 0.06373066,\n ],\n dtype=np.float32,\n ).reshape(filters_shape)\n\n computation = runtime.computation(model, data_node, filters_node)\n result = computation(data_value, filters_value)\n\n expected = np.array(\n [\n 0.07368518,\n -0.08925839,\n -0.06627201,\n 0.06301362,\n 0.03732984,\n -0.01919658,\n -0.00628807,\n -0.02817563,\n -0.01472169,\n 0.04392925,\n -0.00689478,\n -0.01549204,\n 0.07957941,\n -0.11459791,\n -0.09505399,\n 0.07681622,\n 0.03604182,\n -0.01853423,\n -0.0270785,\n -0.00680824,\n -0.06650258,\n 0.08004665,\n 0.07918708,\n 0.0724144,\n 0.06256775,\n -0.17838378,\n -0.18863615,\n 0.20064656,\n 0.133717,\n -0.06876295,\n -0.06398046,\n -0.00864975,\n 0.19289537,\n -0.01490572,\n -0.13673618,\n 0.01949645,\n ],\n dtype=np.float32,\n ).reshape(1, 1, 6, 6)\n\n assert np.allclose(result, expected)\n\n\ndef test_group_convolution_backprop_data_output_shape():\n runtime = get_runtime()\n\n data_shape = [1, 1, 1, 10]\n filters_shape = [1, 1, 1, 1, 5]\n strides = [1, 1]\n\n data_node = ng.parameter(data_shape, name=\"Data\", dtype=np.float32)\n filters_node = ng.parameter(filters_shape, name=\"Filters\", dtype=np.float32)\n output_shape_node = ng.constant(np.array([1, 14], dtype=np.int64))\n\n model = ng.group_convolution_backprop_data(\n data_node, filters_node, strides, output_shape_node, auto_pad=\"same_upper\"\n )\n\n data_value = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], dtype=np.float32).reshape(\n data_shape\n )\n\n filters_value = np.array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=np.float32).reshape(filters_shape)\n\n computation = runtime.computation(model, data_node, filters_node)\n result = computation(data_value, filters_value)\n\n expected = np.array(\n [0.0, 1.0, 4.0, 10.0, 18.0, 27.0, 36.0, 45.0, 54.0, 63.0, 62.0, 50.0, 26.0, 9.0], dtype=np.float32,\n ).reshape(1, 1, 1, 14)\n\n assert np.allclose(result, expected)\n"
] |
[
[
"numpy.array",
"numpy.exp",
"numpy.allclose",
"numpy.float32",
"numpy.subtract",
"numpy.arange",
"numpy.clip"
]
] |
madhuv2002/pandas
|
[
"006f1e0efb3ec81d52ff4d080b0c770b7b79d041",
"006f1e0efb3ec81d52ff4d080b0c770b7b79d041",
"006f1e0efb3ec81d52ff4d080b0c770b7b79d041"
] |
[
"pandas/core/indexes/datetimelike.py",
"pandas/tests/series/indexing/test_where.py",
"pandas/tests/groupby/test_apply.py"
] |
[
"\"\"\"\nBase and utility classes for tseries type pandas objects.\n\"\"\"\nfrom datetime import datetime\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Hashable,\n List,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n Timedelta,\n iNaT,\n join as libjoin,\n lib,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n Resolution,\n Tick,\n)\nfrom pandas._typing import Callable\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_categorical_dtype,\n is_dtype_equal,\n is_integer,\n is_list_like,\n is_period_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.concat import concat_compat\n\nfrom pandas.core.arrays import (\n DatetimeArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n)\nfrom pandas.core.indexes.extension import (\n NDArrayBackedExtensionIndex,\n inherit_names,\n make_wrapped_arith_op,\n)\nfrom pandas.core.indexes.numeric import Int64Index\nfrom pandas.core.tools.timedeltas import to_timedelta\n\nif TYPE_CHECKING:\n from pandas import CategoricalIndex\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_T = TypeVar(\"_T\", bound=\"DatetimeIndexOpsMixin\")\n\n\ndef _join_i8_wrapper(joinf, with_indexers: bool = True):\n \"\"\"\n Create the join wrapper methods.\n \"\"\"\n\n # error: 'staticmethod' used with a non-method\n @staticmethod # type: ignore[misc]\n def wrapper(left, right):\n # Note: these only get called with left.dtype == right.dtype\n orig_left = left\n\n left = left.view(\"i8\")\n right = right.view(\"i8\")\n\n results = joinf(left, right)\n if with_indexers:\n\n join_index, left_indexer, right_indexer = results\n if not isinstance(orig_left, np.ndarray):\n # When called from Index._intersection/_union, we have the EA\n join_index = join_index.view(orig_left._ndarray.dtype)\n join_index = orig_left._from_backing_data(join_index)\n\n return join_index, left_indexer, right_indexer\n\n return results\n\n return wrapper\n\n\n@inherit_names(\n [\"inferred_freq\", \"_resolution_obj\", \"resolution\"],\n DatetimeLikeArrayMixin,\n cache=True,\n)\n@inherit_names([\"mean\", \"asi8\", \"freq\", \"freqstr\"], DatetimeLikeArrayMixin)\nclass DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):\n \"\"\"\n Common ops mixin to support a unified interface datetimelike Index.\n \"\"\"\n\n _can_hold_strings = False\n _data: Union[DatetimeArray, TimedeltaArray, PeriodArray]\n _data_cls: Union[Type[DatetimeArray], Type[TimedeltaArray], Type[PeriodArray]]\n freq: Optional[BaseOffset]\n freqstr: Optional[str]\n _resolution_obj: Resolution\n _bool_ops: List[str] = []\n _field_ops: List[str] = []\n\n # error: \"Callable[[Any], Any]\" has no attribute \"fget\"\n hasnans = cache_readonly(\n DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined]\n )\n _hasnans = hasnans # for index / array -agnostic code\n\n @classmethod\n def _simple_new(\n cls,\n values: Union[DatetimeArray, TimedeltaArray, PeriodArray],\n name: Optional[Hashable] = None,\n ):\n assert isinstance(values, cls._data_cls), type(values)\n\n result = object.__new__(cls)\n result._data = values\n result._name = name\n result._cache = {}\n\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = values._ndarray\n\n result._reset_identity()\n return result\n\n @property\n def _is_all_dates(self) -> bool:\n return True\n\n # ------------------------------------------------------------------------\n # Abstract data attributes\n\n @property\n def values(self) -> np.ndarray:\n # Note: PeriodArray overrides this to return an ndarray of objects.\n return self._data._ndarray\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc and other functions.\n \"\"\"\n result = lib.item_from_zerodim(result)\n if is_bool_dtype(result) or lib.is_scalar(result):\n return result\n\n attrs = self._get_attributes_dict()\n if not is_period_dtype(self.dtype) and attrs[\"freq\"]:\n # no need to infer if freq is None\n attrs[\"freq\"] = \"infer\"\n return type(self)(result, **attrs)\n\n # ------------------------------------------------------------------------\n\n def equals(self, other: Any) -> bool:\n \"\"\"\n Determines if two Index objects contain the same elements.\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n elif other.dtype.kind in [\"f\", \"i\", \"u\", \"c\"]:\n return False\n elif not isinstance(other, type(self)):\n should_try = False\n inferable = self._data._infer_matches\n if other.dtype == object:\n should_try = other.inferred_type in inferable\n elif is_categorical_dtype(other.dtype):\n other = cast(\"CategoricalIndex\", other)\n should_try = other.categories.inferred_type in inferable\n\n if should_try:\n try:\n other = type(self)(other)\n except (ValueError, TypeError, OverflowError):\n # e.g.\n # ValueError -> cannot parse str entry, or OutOfBoundsDatetime\n # TypeError -> trying to convert IntervalIndex to DatetimeIndex\n # OverflowError -> Index([very_large_timedeltas])\n return False\n\n if not is_dtype_equal(self.dtype, other.dtype):\n # have different timezone\n return False\n\n return np.array_equal(self.asi8, other.asi8)\n\n @Appender(Index.__contains__.__doc__)\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n res = self.get_loc(key)\n except (KeyError, TypeError, ValueError):\n return False\n return bool(\n is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))\n )\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n nv.validate_take((), kwargs)\n indices = np.asarray(indices, dtype=np.intp)\n\n maybe_slice = lib.maybe_indices_to_slice(indices, len(self))\n\n result = NDArrayBackedExtensionIndex.take(\n self, indices, axis, allow_fill, fill_value, **kwargs\n )\n if isinstance(maybe_slice, slice):\n freq = self._data._get_getitem_freq(maybe_slice)\n result._data._freq = freq\n return result\n\n _can_hold_na = True\n\n _na_value = NaT\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n def _convert_tolerance(self, tolerance, target):\n tolerance = np.asarray(to_timedelta(tolerance).to_numpy())\n return super()._convert_tolerance(tolerance, target)\n\n def tolist(self) -> List:\n \"\"\"\n Return a list of the underlying data.\n \"\"\"\n return list(self.astype(object))\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Index or minimum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.min\n Series.min : Return the minimum value in a Series.\n \"\"\"\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n i8 = self.asi8\n\n if len(i8) and self.is_monotonic_increasing:\n # quick check\n if i8[0] != iNaT:\n return self._data._box_func(i8[0])\n\n if self.hasnans:\n if not skipna:\n return self._na_value\n i8 = i8[~self._isnan]\n\n if not len(i8):\n return self._na_value\n\n min_stamp = i8.min()\n return self._data._box_func(min_stamp)\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Returns the indices of the minimum values along an axis.\n\n See `numpy.ndarray.argmin` for more information on the\n `axis` parameter.\n\n See Also\n --------\n numpy.ndarray.argmin\n \"\"\"\n nv.validate_argmin(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n i8 = self.asi8\n if self.hasnans:\n mask = self._isnan\n if mask.all() or not skipna:\n return -1\n i8 = i8.copy()\n i8[mask] = np.iinfo(\"int64\").max\n return i8.argmin()\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Index or maximum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.max\n Series.max : Return the maximum value in a Series.\n \"\"\"\n nv.validate_max(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n if not len(self):\n return self._na_value\n\n i8 = self.asi8\n\n if len(i8) and self.is_monotonic:\n # quick check\n if i8[-1] != iNaT:\n return self._data._box_func(i8[-1])\n\n if self.hasnans:\n if not skipna:\n return self._na_value\n i8 = i8[~self._isnan]\n\n if not len(i8):\n return self._na_value\n\n max_stamp = i8.max()\n return self._data._box_func(max_stamp)\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Returns the indices of the maximum values along an axis.\n\n See `numpy.ndarray.argmax` for more information on the\n `axis` parameter.\n\n See Also\n --------\n numpy.ndarray.argmax\n \"\"\"\n nv.validate_argmax(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n i8 = self.asi8\n if self.hasnans:\n mask = self._isnan\n if mask.all() or not skipna:\n return -1\n i8 = i8.copy()\n i8[mask] = 0\n return i8.argmax()\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def format(\n self,\n name: bool = False,\n formatter: Optional[Callable] = None,\n na_rep: str = \"NaT\",\n date_format: Optional[str] = None,\n ) -> List[str]:\n \"\"\"\n Render a string representation of the Index.\n \"\"\"\n header = []\n if name:\n header.append(\n ibase.pprint_thing(self.name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if self.name is not None\n else \"\"\n )\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, na_rep=na_rep, date_format=date_format)\n\n def _format_with_header(\n self, header: List[str], na_rep: str = \"NaT\", date_format: Optional[str] = None\n ) -> List[str]:\n return header + list(\n self._format_native_types(na_rep=na_rep, date_format=date_format)\n )\n\n @property\n def _formatter_func(self):\n return self._data._formatter()\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n attrs = super()._format_attrs()\n for attrib in self._attributes:\n if attrib == \"freq\":\n freq = self.freqstr\n if freq is not None:\n freq = repr(freq)\n attrs.append((\"freq\", freq))\n return attrs\n\n def _summary(self, name=None) -> str:\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n Name to use in the summary representation.\n\n Returns\n -------\n str\n Summarized representation of the index.\n \"\"\"\n formatter = self._formatter_func\n if len(self) > 0:\n index_summary = f\", {formatter(self[0])} to {formatter(self[-1])}\"\n else:\n index_summary = \"\"\n\n if name is None:\n name = type(self).__name__\n result = f\"{name}: {len(self)} entries{index_summary}\"\n if self.freq:\n result += f\"\\nFreq: {self.freqstr}\"\n\n # display as values, not quoted\n result = result.replace(\"'\", \"\")\n return result\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _validate_partial_date_slice(self, reso: Resolution):\n raise NotImplementedError\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n raise NotImplementedError\n\n def _partial_date_slice(\n self,\n reso: Resolution,\n parsed: datetime,\n ):\n \"\"\"\n Parameters\n ----------\n reso : Resolution\n parsed : datetime\n\n Returns\n -------\n slice or ndarray[intp]\n \"\"\"\n self._validate_partial_date_slice(reso)\n\n t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n vals = self._data._ndarray\n unbox = self._data._unbox\n\n if self.is_monotonic_increasing:\n\n if len(self) and (\n (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])\n ):\n # we are out of range\n raise KeyError\n\n # TODO: does this depend on being monotonic _increasing_?\n\n # a monotonic (sorted) series can be sliced\n left = vals.searchsorted(unbox(t1), side=\"left\")\n right = vals.searchsorted(unbox(t2), side=\"right\")\n return slice(left, right)\n\n else:\n lhs_mask = vals >= unbox(t1)\n rhs_mask = vals <= unbox(t2)\n\n # try to find the dates\n return (lhs_mask & rhs_mask).nonzero()[0]\n\n # --------------------------------------------------------------------\n # Arithmetic Methods\n\n __add__ = make_wrapped_arith_op(\"__add__\")\n __sub__ = make_wrapped_arith_op(\"__sub__\")\n __radd__ = make_wrapped_arith_op(\"__radd__\")\n __rsub__ = make_wrapped_arith_op(\"__rsub__\")\n __pow__ = make_wrapped_arith_op(\"__pow__\")\n __rpow__ = make_wrapped_arith_op(\"__rpow__\")\n __mul__ = make_wrapped_arith_op(\"__mul__\")\n __rmul__ = make_wrapped_arith_op(\"__rmul__\")\n __floordiv__ = make_wrapped_arith_op(\"__floordiv__\")\n __rfloordiv__ = make_wrapped_arith_op(\"__rfloordiv__\")\n __mod__ = make_wrapped_arith_op(\"__mod__\")\n __rmod__ = make_wrapped_arith_op(\"__rmod__\")\n __divmod__ = make_wrapped_arith_op(\"__divmod__\")\n __rdivmod__ = make_wrapped_arith_op(\"__rdivmod__\")\n __truediv__ = make_wrapped_arith_op(\"__truediv__\")\n __rtruediv__ = make_wrapped_arith_op(\"__rtruediv__\")\n\n def shift(self: _T, periods: int = 1, freq=None) -> _T:\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n\n .. versionchanged:: 0.24.0\n\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.DatetimeIndex\n Shifted index.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n PeriodIndex.shift : Shift values of PeriodIndex.\n \"\"\"\n arr = self._data.view()\n arr._freq = self.freq\n result = arr._time_shift(periods, freq=freq)\n return type(self)(result, name=self.name)\n\n # --------------------------------------------------------------------\n # List-like Methods\n\n def _get_delete_freq(self, loc: int):\n \"\"\"\n Find the `freq` for self.delete(loc).\n \"\"\"\n freq = None\n if is_period_dtype(self.dtype):\n freq = self.freq\n elif self.freq is not None:\n if is_integer(loc):\n if loc in (0, -len(self), -1, len(self) - 1):\n freq = self.freq\n else:\n if is_list_like(loc):\n loc = lib.maybe_indices_to_slice(\n np.asarray(loc, dtype=np.intp), len(self)\n )\n if isinstance(loc, slice) and loc.step in (1, None):\n if loc.start in (0, None) or loc.stop in (len(self), None):\n freq = self.freq\n return freq\n\n def _get_insert_freq(self, loc: int, item):\n \"\"\"\n Find the `freq` for self.insert(loc, item).\n \"\"\"\n value = self._data._validate_scalar(item)\n item = self._data._box_func(value)\n\n freq = None\n if is_period_dtype(self.dtype):\n freq = self.freq\n elif self.freq is not None:\n # freq can be preserved on edge cases\n if self.size:\n if item is NaT:\n pass\n elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:\n freq = self.freq\n elif (loc == len(self)) and item - self.freq == self[-1]:\n freq = self.freq\n else:\n # Adding a single item to an empty index may preserve freq\n if self.freq.is_on_offset(item):\n freq = self.freq\n return freq\n\n @doc(NDArrayBackedExtensionIndex.delete)\n def delete(self: _T, loc) -> _T:\n result = super().delete(loc)\n result._data._freq = self._get_delete_freq(loc)\n return result\n\n @doc(NDArrayBackedExtensionIndex.insert)\n def insert(self, loc: int, item):\n result = super().insert(loc, item)\n if isinstance(result, type(self)):\n # i.e. parent class method did not cast\n result._data._freq = self._get_insert_freq(loc, item)\n return result\n\n # --------------------------------------------------------------------\n # Join/Set Methods\n\n _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer)\n _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer)\n _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer)\n _left_indexer_unique = _join_i8_wrapper(\n libjoin.left_join_indexer_unique, with_indexers=False\n )\n\n def _get_join_freq(self, other):\n \"\"\"\n Get the freq to attach to the result of a join operation.\n \"\"\"\n if is_period_dtype(self.dtype):\n freq = self.freq\n else:\n self = cast(DatetimeTimedeltaMixin, self)\n freq = self.freq if self._can_fast_union(other) else None\n return freq\n\n def _wrap_joined_index(self, joined: np.ndarray, other):\n assert other.dtype == self.dtype, (other.dtype, self.dtype)\n assert joined.dtype == \"i8\" or joined.dtype == self.dtype, joined.dtype\n joined = joined.view(self._data._ndarray.dtype)\n result = super()._wrap_joined_index(joined, other)\n result._data._freq = self._get_join_freq(other)\n return result\n\n @doc(Index._convert_arr_indexer)\n def _convert_arr_indexer(self, keyarr):\n try:\n return self._data._validate_listlike(keyarr, allow_object=True)\n except (ValueError, TypeError):\n return com.asarray_tuplesafe(keyarr)\n\n\nclass DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):\n \"\"\"\n Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,\n but not PeriodIndex\n \"\"\"\n\n # Compat for frequency inference, see GH#23789\n _is_monotonic_increasing = Index.is_monotonic_increasing\n _is_monotonic_decreasing = Index.is_monotonic_decreasing\n _is_unique = Index.is_unique\n\n def _with_freq(self, freq):\n arr = self._data._with_freq(freq)\n return type(self)._simple_new(arr, name=self.name)\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return False\n\n def is_type_compatible(self, kind: str) -> bool:\n return kind in self._data._infer_matches\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _difference(self, other, sort=None):\n new_idx = super()._difference(other, sort=sort)._with_freq(None)\n return new_idx\n\n def _intersection(self, other: Index, sort=False) -> Index:\n \"\"\"\n intersection specialized to the case with matching dtypes.\n \"\"\"\n other = cast(\"DatetimeTimedeltaMixin\", other)\n if len(self) == 0:\n return self.copy()._get_reconciled_name_object(other)\n if len(other) == 0:\n return other.copy()._get_reconciled_name_object(self)\n\n elif not self._can_fast_intersect(other):\n result = Index._intersection(self, other, sort=sort)\n # We need to invalidate the freq because Index._intersection\n # uses _shallow_copy on a view of self._data, which will preserve\n # self.freq if we're not careful.\n # At this point we should have result.dtype == self.dtype\n # and type(result) is type(self._data)\n result = self._wrap_setop_result(other, result)\n return result._with_freq(None)._with_freq(\"infer\")\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n # after sorting, the intersection always starts with the right index\n # and ends with the index of which the last elements is smallest\n end = min(left[-1], right[-1])\n start = right[0]\n\n if end < start:\n result = self[:0]\n else:\n lslice = slice(*left.slice_locs(start, end))\n result = left._values[lslice]\n\n return result\n\n def _can_fast_intersect(self: _T, other: _T) -> bool:\n # Note: we only get here with len(self) > 0 and len(other) > 0\n if self.freq is None:\n return False\n\n elif other.freq != self.freq:\n return False\n\n elif not self.is_monotonic_increasing:\n # Because freq is not None, we must then be monotonic decreasing\n return False\n\n elif self.freq.is_anchored():\n # this along with matching freqs ensure that we \"line up\",\n # so intersection will preserve freq\n return True\n\n elif isinstance(self.freq, Tick):\n # We \"line up\" if and only if the difference between two of our points\n # is a multiple of our freq\n diff = self[0] - other[0]\n remainder = diff % self.freq.delta\n return remainder == Timedelta(0)\n\n return True\n\n def _can_fast_union(self: _T, other: _T) -> bool:\n # Assumes that type(self) == type(other), as per the annotation\n # The ability to fast_union also implies that `freq` should be\n # retained on union.\n freq = self.freq\n\n if freq is None or freq != other.freq:\n return False\n\n if not self.is_monotonic_increasing:\n # Because freq is not None, we must then be monotonic decreasing\n # TODO: do union on the reversed indexes?\n return False\n\n if len(self) == 0 or len(other) == 0:\n return True\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n right_start = right[0]\n left_end = left[-1]\n\n # Only need to \"adjoin\", not overlap\n return (right_start == left_end + freq) or right_start in left\n\n def _fast_union(self: _T, other: _T, sort=None) -> _T:\n if len(other) == 0:\n return self.view(type(self))\n\n if len(self) == 0:\n return other.view(type(self))\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n elif sort is False:\n # TDIs are not in the \"correct\" order and we don't want\n # to sort but want to remove overlaps\n left, right = self, other\n left_start = left[0]\n loc = right.searchsorted(left_start, side=\"left\")\n right_chunk = right._values[:loc]\n dates = concat_compat((left._values, right_chunk))\n # With sort being False, we can't infer that result.freq == self.freq\n # TODO: no tests rely on the _with_freq(\"infer\"); needed?\n result = type(self)._simple_new(dates, name=self.name)\n result = result._with_freq(\"infer\")\n return result\n else:\n left, right = other, self\n\n left_end = left[-1]\n right_end = right[-1]\n\n # concatenate\n if left_end < right_end:\n loc = right.searchsorted(left_end, side=\"right\")\n right_chunk = right._values[loc:]\n dates = concat_compat([left._values, right_chunk])\n # The can_fast_union check ensures that the result.freq\n # should match self.freq\n dates = type(self._data)(dates, freq=self.freq)\n result = type(self)._simple_new(dates)\n return result\n else:\n return left\n\n def _union(self, other, sort):\n # We are called by `union`, which is responsible for this validation\n assert isinstance(other, type(self))\n assert self.dtype == other.dtype\n\n if self._can_fast_union(other):\n result = self._fast_union(other, sort=sort)\n if sort is None:\n # In the case where sort is None, _can_fast_union\n # implies that result.freq should match self.freq\n assert result.freq == self.freq, (result.freq, self.freq)\n elif result.freq is None:\n # TODO: no tests rely on this; needed?\n result = result._with_freq(\"infer\")\n return result\n else:\n i8self = Int64Index._simple_new(self.asi8)\n i8other = Int64Index._simple_new(other.asi8)\n i8result = i8self._union(i8other, sort=sort)\n result = type(self)(i8result, dtype=self.dtype, freq=\"infer\")\n return result\n\n # --------------------------------------------------------------------\n # Join Methods\n _join_precedence = 10\n\n def join(\n self, other, how: str = \"left\", level=None, return_indexers=False, sort=False\n ):\n \"\"\"\n See Index.join\n \"\"\"\n pself, pother = self._maybe_promote(other)\n if pself is not self or pother is not other:\n return pself.join(\n pother, how=how, level=level, return_indexers=return_indexers, sort=sort\n )\n\n self._maybe_utc_convert(other) # raises if we dont have tzawareness compat\n return Index.join(\n self,\n other,\n how=how,\n level=level,\n return_indexers=return_indexers,\n sort=sort,\n )\n\n def _maybe_utc_convert(self: _T, other: Index) -> Tuple[_T, Index]:\n # Overridden by DatetimeIndex\n return self, other\n",
"import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer\n\nimport pandas as pd\nfrom pandas import (\n Series,\n Timestamp,\n date_range,\n isna,\n)\nimport pandas._testing as tm\n\n\ndef test_where_unsafe_int(sint_dtype):\n s = Series(np.arange(10), dtype=sint_dtype)\n mask = s < 5\n\n s[mask] = range(2, 7)\n expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)\n\n tm.assert_series_equal(s, expected)\n\n\ndef test_where_unsafe_float(float_dtype):\n s = Series(np.arange(10), dtype=float_dtype)\n mask = s < 5\n\n s[mask] = range(2, 7)\n data = list(range(2, 7)) + list(range(5, 10))\n expected = Series(data, dtype=float_dtype)\n\n tm.assert_series_equal(s, expected)\n\n\n@pytest.mark.parametrize(\n \"dtype,expected_dtype\",\n [\n (np.int8, np.float64),\n (np.int16, np.float64),\n (np.int32, np.float64),\n (np.int64, np.float64),\n (np.float32, np.float32),\n (np.float64, np.float64),\n ],\n)\ndef test_where_unsafe_upcast(dtype, expected_dtype):\n # see gh-9743\n s = Series(np.arange(10), dtype=dtype)\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n mask = s < 5\n expected = Series(values + list(range(5, 10)), dtype=expected_dtype)\n s[mask] = values\n tm.assert_series_equal(s, expected)\n\n\ndef test_where_unsafe():\n # see gh-9731\n s = Series(np.arange(10), dtype=\"int64\")\n values = [2.5, 3.5, 4.5, 5.5]\n\n mask = s > 5\n expected = Series(list(range(6)) + values, dtype=\"float64\")\n\n s[mask] = values\n tm.assert_series_equal(s, expected)\n\n # see gh-3235\n s = Series(np.arange(10), dtype=\"int64\")\n mask = s < 5\n s[mask] = range(2, 7)\n expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=\"int64\")\n tm.assert_series_equal(s, expected)\n assert s.dtype == expected.dtype\n\n s = Series(np.arange(10), dtype=\"int64\")\n mask = s > 5\n s[mask] = [0] * 4\n expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype=\"int64\")\n tm.assert_series_equal(s, expected)\n\n s = Series(np.arange(10))\n mask = s > 5\n\n msg = \"cannot assign mismatch length to masked array\"\n with pytest.raises(ValueError, match=msg):\n s[mask] = [5, 4, 3, 2, 1]\n\n with pytest.raises(ValueError, match=msg):\n s[mask] = [0] * 5\n\n # dtype changes\n s = Series([1, 2, 3, 4])\n result = s.where(s > 2, np.nan)\n expected = Series([np.nan, np.nan, 3, 4])\n tm.assert_series_equal(result, expected)\n\n # GH 4667\n # setting with None changes dtype\n s = Series(range(10)).astype(float)\n s[8] = None\n result = s[8]\n assert isna(result)\n\n s = Series(range(10)).astype(float)\n s[s > 8] = None\n result = s[isna(s)]\n expected = Series(np.nan, index=[9])\n tm.assert_series_equal(result, expected)\n\n\ndef test_where():\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(cond).dropna()\n rs2 = s[cond]\n tm.assert_series_equal(rs, rs2)\n\n rs = s.where(cond, -s)\n tm.assert_series_equal(rs, s.abs())\n\n rs = s.where(cond)\n assert s.shape == rs.shape\n assert rs is not s\n\n # test alignment\n cond = Series([True, False, False, True, False], index=s.index)\n s2 = -(s.abs())\n\n expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)\n rs = s2.where(cond[:3])\n tm.assert_series_equal(rs, expected)\n\n expected = s2.abs()\n expected.iloc[0] = s2[0]\n rs = s2.where(cond[:3], -s2)\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_error():\n s = Series(np.random.randn(5))\n cond = s > 0\n\n msg = \"Array conditional must be same shape as self\"\n with pytest.raises(ValueError, match=msg):\n s.where(1)\n with pytest.raises(ValueError, match=msg):\n s.where(cond[:3].values, -s)\n\n # GH 2745\n s = Series([1, 2])\n s[[True, False]] = [0, 1]\n expected = Series([0, 2])\n tm.assert_series_equal(s, expected)\n\n # failures\n msg = \"cannot assign mismatch length to masked array\"\n with pytest.raises(ValueError, match=msg):\n s[[True, False]] = [0, 2, 3]\n msg = (\n \"NumPy boolean array indexing assignment cannot assign 0 input \"\n \"values to the 1 output values where the mask is true\"\n )\n with pytest.raises(ValueError, match=msg):\n s[[True, False]] = []\n\n\n@pytest.mark.parametrize(\"klass\", [list, tuple, np.array, Series])\ndef test_where_array_like(klass):\n # see gh-15414\n s = Series([1, 2, 3])\n cond = [False, True, True]\n expected = Series([np.nan, 2, 3])\n\n result = s.where(klass(cond))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"cond\",\n [\n [1, 0, 1],\n Series([2, 5, 7]),\n [\"True\", \"False\", \"True\"],\n [Timestamp(\"2017-01-01\"), pd.NaT, Timestamp(\"2017-01-02\")],\n ],\n)\ndef test_where_invalid_input(cond):\n # see gh-15414: only boolean arrays accepted\n s = Series([1, 2, 3])\n msg = \"Boolean array expected for the condition\"\n\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n msg = \"Array conditional must be same shape as self\"\n with pytest.raises(ValueError, match=msg):\n s.where([True])\n\n\ndef test_where_ndframe_align():\n msg = \"Array conditional must be same shape as self\"\n s = Series([1, 2, 3])\n\n cond = [True]\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n expected = Series([1, np.nan, np.nan])\n\n out = s.where(Series(cond))\n tm.assert_series_equal(out, expected)\n\n cond = np.array([False, True, False, True])\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n expected = Series([np.nan, 2, np.nan])\n\n out = s.where(Series(cond))\n tm.assert_series_equal(out, expected)\n\n\ndef test_where_setitem_invalid():\n # GH 2702\n # make sure correct exceptions are raised on invalid list assignment\n\n msg = (\n lambda x: f\"cannot set using a {x} indexer with a \"\n \"different length than the value\"\n )\n # slice\n s = Series(list(\"abc\"))\n\n with pytest.raises(ValueError, match=msg(\"slice\")):\n s[0:3] = list(range(27))\n\n s[0:3] = list(range(3))\n expected = Series([0, 1, 2])\n tm.assert_series_equal(s.astype(np.int64), expected)\n\n # slice with step\n s = Series(list(\"abcdef\"))\n\n with pytest.raises(ValueError, match=msg(\"slice\")):\n s[0:4:2] = list(range(27))\n\n s = Series(list(\"abcdef\"))\n s[0:4:2] = list(range(2))\n expected = Series([0, \"b\", 1, \"d\", \"e\", \"f\"])\n tm.assert_series_equal(s, expected)\n\n # neg slices\n s = Series(list(\"abcdef\"))\n\n with pytest.raises(ValueError, match=msg(\"slice\")):\n s[:-1] = list(range(27))\n\n s[-3:-1] = list(range(2))\n expected = Series([\"a\", \"b\", \"c\", 0, 1, \"f\"])\n tm.assert_series_equal(s, expected)\n\n # list\n s = Series(list(\"abc\"))\n\n with pytest.raises(ValueError, match=msg(\"list-like\")):\n s[[0, 1, 2]] = list(range(27))\n\n s = Series(list(\"abc\"))\n\n with pytest.raises(ValueError, match=msg(\"list-like\")):\n s[[0, 1, 2]] = list(range(2))\n\n # scalar\n s = Series(list(\"abc\"))\n s[0] = list(range(10))\n expected = Series([list(range(10)), \"b\", \"c\"])\n tm.assert_series_equal(s, expected)\n\n\n@pytest.mark.parametrize(\"size\", range(2, 6))\n@pytest.mark.parametrize(\n \"mask\", [[True, False, False, False, False], [True, False], [False]]\n)\n@pytest.mark.parametrize(\n \"item\", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]\n)\n# Test numpy arrays, lists and tuples as the input to be\n# broadcast\n@pytest.mark.parametrize(\n \"box\", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]\n)\ndef test_broadcast(size, mask, item, box):\n selection = np.resize(mask, size)\n\n data = np.arange(size, dtype=float)\n\n # Construct the expected series by taking the source\n # data or item based on the selection\n expected = Series(\n [item if use_item else data[i] for i, use_item in enumerate(selection)]\n )\n\n s = Series(data)\n s[selection] = box(item)\n tm.assert_series_equal(s, expected)\n\n s = Series(data)\n result = s.where(~selection, box(item))\n tm.assert_series_equal(result, expected)\n\n s = Series(data)\n result = s.mask(selection, box(item))\n tm.assert_series_equal(result, expected)\n\n\ndef test_where_inplace():\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n\n rs.where(cond, inplace=True)\n tm.assert_series_equal(rs.dropna(), s[cond])\n tm.assert_series_equal(rs, s.where(cond))\n\n rs = s.copy()\n rs.where(cond, -s, inplace=True)\n tm.assert_series_equal(rs, s.where(cond, -s))\n\n\ndef test_where_dups():\n # GH 4550\n # where crashes with dups in index\n s1 = Series(list(range(3)))\n s2 = Series(list(range(3)))\n comb = pd.concat([s1, s2])\n result = comb.where(comb < 2)\n expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # GH 4548\n # inplace updating not working with dups\n comb[comb < 1] = 5\n expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(comb, expected)\n\n comb[comb < 2] += 10\n expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(comb, expected)\n\n\ndef test_where_numeric_with_string():\n # GH 9280\n s = Series([1, 2, 3])\n w = s.where(s > 1, \"X\")\n\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == \"object\"\n\n w = s.where(s > 1, [\"X\", \"Y\", \"Z\"])\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == \"object\"\n\n w = s.where(s > 1, np.array([\"X\", \"Y\", \"Z\"]))\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == \"object\"\n\n\ndef test_where_timedelta_coerce():\n s = Series([1, 2], dtype=\"timedelta64[ns]\")\n expected = Series([10, 10])\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype=\"object\")\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_datetime_conversion():\n s = Series(date_range(\"20130102\", periods=2))\n expected = Series([10, 10])\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n tm.assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype=\"object\")\n tm.assert_series_equal(rs, expected)\n\n # GH 15701\n timestamps = [\"2016-12-31 12:00:04+00:00\", \"2016-12-31 12:00:04.010000+00:00\"]\n s = Series([Timestamp(t) for t in timestamps])\n rs = s.where(Series([False, True]))\n expected = Series([pd.NaT, s[1]])\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_dt_tz_values(tz_naive_fixture):\n ser1 = Series(\n pd.DatetimeIndex([\"20150101\", \"20150102\", \"20150103\"], tz=tz_naive_fixture)\n )\n ser2 = Series(\n pd.DatetimeIndex([\"20160514\", \"20160515\", \"20160516\"], tz=tz_naive_fixture)\n )\n mask = Series([True, True, False])\n result = ser1.where(mask, ser2)\n exp = Series(\n pd.DatetimeIndex([\"20150101\", \"20150102\", \"20160516\"], tz=tz_naive_fixture)\n )\n tm.assert_series_equal(exp, result)\n\n\ndef test_where_sparse():\n # GH#17198 make sure we dont get an AttributeError for sp_index\n ser = Series(pd.arrays.SparseArray([1, 2]))\n result = ser.where(ser >= 2, 0)\n expected = Series(pd.arrays.SparseArray([0, 2]))\n tm.assert_series_equal(result, expected)\n\n\ndef test_where_empty_series_and_empty_cond_having_non_bool_dtypes():\n # https://github.com/pandas-dev/pandas/issues/34592\n ser = Series([], dtype=float)\n result = ser.where([])\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize(\"klass\", [Series, pd.DataFrame])\ndef test_where_categorical(klass):\n # https://github.com/pandas-dev/pandas/issues/18888\n exp = klass(\n pd.Categorical([\"A\", \"A\", \"B\", \"B\", np.nan], categories=[\"A\", \"B\", \"C\"]),\n dtype=\"category\",\n )\n df = klass([\"A\", \"A\", \"B\", \"B\", \"C\"], dtype=\"category\")\n res = df.where(df != \"C\")\n tm.assert_equal(exp, res)\n\n\ndef test_where_datetimelike_categorical(tz_naive_fixture):\n # GH#37682\n tz = tz_naive_fixture\n\n dr = date_range(\"2001-01-01\", periods=3, tz=tz)._with_freq(None)\n lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])\n rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])\n\n mask = np.array([True, True, False])\n\n # DatetimeIndex.where\n res = lvals.where(mask, rvals)\n tm.assert_index_equal(res, dr)\n\n # DatetimeArray.where\n res = lvals._data.where(mask, rvals)\n tm.assert_datetime_array_equal(res, dr._data)\n\n # Series.where\n res = Series(lvals).where(mask, rvals)\n tm.assert_series_equal(res, Series(dr))\n\n # DataFrame.where\n res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))\n\n tm.assert_frame_equal(res, pd.DataFrame(dr))\n",
"from datetime import (\n date,\n datetime,\n)\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n bdate_range,\n)\nimport pandas._testing as tm\n\n\ndef test_apply_issues():\n # GH 5788\n\n s = \"\"\"2011.05.16,00:00,1.40893\n2011.05.16,01:00,1.40760\n2011.05.16,02:00,1.40750\n2011.05.16,03:00,1.40649\n2011.05.17,02:00,1.40893\n2011.05.17,03:00,1.40760\n2011.05.17,04:00,1.40750\n2011.05.17,05:00,1.40649\n2011.05.18,02:00,1.40893\n2011.05.18,03:00,1.40760\n2011.05.18,04:00,1.40750\n2011.05.18,05:00,1.40649\"\"\"\n\n df = pd.read_csv(\n StringIO(s),\n header=None,\n names=[\"date\", \"time\", \"value\"],\n parse_dates=[[\"date\", \"time\"]],\n )\n df = df.set_index(\"date_time\")\n\n expected = df.groupby(df.index.date).idxmax()\n result = df.groupby(df.index.date).apply(lambda x: x.idxmax())\n tm.assert_frame_equal(result, expected)\n\n # GH 5789\n # don't auto coerce dates\n df = pd.read_csv(StringIO(s), header=None, names=[\"date\", \"time\", \"value\"])\n exp_idx = Index(\n [\"2011.05.16\", \"2011.05.17\", \"2011.05.18\"], dtype=object, name=\"date\"\n )\n expected = Series([\"00:00\", \"02:00\", \"02:00\"], index=exp_idx)\n result = df.groupby(\"date\").apply(lambda x: x[\"time\"][x[\"value\"].idxmax()])\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_trivial():\n # GH 20066\n # trivial apply: ignore input and return a constant dataframe.\n df = DataFrame(\n {\"key\": [\"a\", \"a\", \"b\", \"b\", \"a\"], \"data\": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=[\"key\", \"data\"],\n )\n expected = pd.concat([df.iloc[1:], df.iloc[1:]], axis=1, keys=[\"float64\", \"object\"])\n result = df.groupby([str(x) for x in df.dtypes], axis=1).apply(\n lambda x: df.iloc[1:]\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_trivial_fail():\n # GH 20066\n df = DataFrame(\n {\"key\": [\"a\", \"a\", \"b\", \"b\", \"a\"], \"data\": [1.0, 2.0, 3.0, 4.0, 5.0]},\n columns=[\"key\", \"data\"],\n )\n expected = pd.concat([df, df], axis=1, keys=[\"float64\", \"object\"])\n result = df.groupby([str(x) for x in df.dtypes], axis=1).apply(lambda x: df)\n\n tm.assert_frame_equal(result, expected)\n\n\n@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fast_apply not used\ndef test_fast_apply():\n # make sure that fast apply is correctly called\n # rather than raising any kind of error\n # otherwise the python path will be callsed\n # which slows things down\n N = 1000\n labels = np.random.randint(0, 2000, size=N)\n labels2 = np.random.randint(0, 3, size=N)\n df = DataFrame(\n {\n \"key\": labels,\n \"key2\": labels2,\n \"value1\": np.random.randn(N),\n \"value2\": [\"foo\", \"bar\", \"baz\", \"qux\"] * (N // 4),\n }\n )\n\n def f(g):\n return 1\n\n g = df.groupby([\"key\", \"key2\"])\n\n grouper = g.grouper\n\n splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)\n group_keys = grouper._get_group_keys()\n sdata = splitter.sorted_data\n\n values, mutated = splitter.fast_apply(f, sdata, group_keys)\n\n assert not mutated\n\n\n@pytest.mark.parametrize(\n \"df, group_names\",\n [\n (DataFrame({\"a\": [1, 1, 1, 2, 3], \"b\": [\"a\", \"a\", \"a\", \"b\", \"c\"]}), [1, 2, 3]),\n (DataFrame({\"a\": [0, 0, 1, 1], \"b\": [0, 1, 0, 1]}), [0, 1]),\n (DataFrame({\"a\": [1]}), [1]),\n (DataFrame({\"a\": [1, 1, 1, 2, 2, 1, 1, 2], \"b\": range(8)}), [1, 2]),\n (DataFrame({\"a\": [1, 2, 3, 1, 2, 3], \"two\": [4, 5, 6, 7, 8, 9]}), [1, 2, 3]),\n (\n DataFrame(\n {\n \"a\": list(\"aaabbbcccc\"),\n \"B\": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4],\n \"C\": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8],\n }\n ),\n [\"a\", \"b\", \"c\"],\n ),\n (DataFrame([[1, 2, 3], [2, 2, 3]], columns=[\"a\", \"b\", \"c\"]), [1, 2]),\n ],\n ids=[\n \"GH2936\",\n \"GH7739 & GH10519\",\n \"GH10519\",\n \"GH2656\",\n \"GH12155\",\n \"GH20084\",\n \"GH21417\",\n ],\n)\ndef test_group_apply_once_per_group(df, group_names):\n # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417\n\n # This test should ensure that a function is only evaluated\n # once per group. Previously the function has been evaluated twice\n # on the first group to check if the Cython index slider is safe to use\n # This test ensures that the side effect (append to list) is only triggered\n # once per group\n\n names = []\n # cannot parameterize over the functions since they need external\n # `names` to detect side effects\n\n def f_copy(group):\n # this takes the fast apply path\n names.append(group.name)\n return group.copy()\n\n def f_nocopy(group):\n # this takes the slow apply path\n names.append(group.name)\n return group\n\n def f_scalar(group):\n # GH7739, GH2656\n names.append(group.name)\n return 0\n\n def f_none(group):\n # GH10519, GH12155, GH21417\n names.append(group.name)\n return None\n\n def f_constant_df(group):\n # GH2936, GH20084\n names.append(group.name)\n return DataFrame({\"a\": [1], \"b\": [1]})\n\n for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]:\n del names[:]\n\n df.groupby(\"a\").apply(func)\n assert names == group_names\n\n\ndef test_group_apply_once_per_group2(capsys):\n # GH: 31111\n # groupby-apply need to execute len(set(group_by_columns)) times\n\n expected = 2 # Number of times `apply` should call a function for the current test\n\n df = DataFrame(\n {\n \"group_by_column\": [0, 0, 0, 0, 1, 1, 1, 1],\n \"test_column\": [\"0\", \"2\", \"4\", \"6\", \"8\", \"10\", \"12\", \"14\"],\n },\n index=[\"0\", \"2\", \"4\", \"6\", \"8\", \"10\", \"12\", \"14\"],\n )\n\n df.groupby(\"group_by_column\").apply(lambda df: print(\"function_called\"))\n\n result = capsys.readouterr().out.count(\"function_called\")\n # If `groupby` behaves unexpectedly, this test will break\n assert result == expected\n\n\n@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fast_apply not used\n@pytest.mark.xfail(reason=\"GH-34998\")\ndef test_apply_fast_slow_identical():\n # GH 31613\n\n df = DataFrame({\"A\": [0, 0, 1], \"b\": range(3)})\n\n # For simple index structures we check for fast/slow apply using\n # an identity check on in/output\n def slow(group):\n return group\n\n def fast(group):\n return group.copy()\n\n fast_df = df.groupby(\"A\").apply(fast)\n slow_df = df.groupby(\"A\").apply(slow)\n\n tm.assert_frame_equal(fast_df, slow_df)\n\n\n@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fast_apply not used\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: x,\n pytest.param(lambda x: x[:], marks=pytest.mark.xfail(reason=\"GH-34998\")),\n lambda x: x.copy(deep=False),\n pytest.param(\n lambda x: x.copy(deep=True), marks=pytest.mark.xfail(reason=\"GH-34998\")\n ),\n ],\n)\ndef test_groupby_apply_identity_maybecopy_index_identical(func):\n # GH 14927\n # Whether the function returns a copy of the input data or not should not\n # have an impact on the index structure of the result since this is not\n # transparent to the user\n\n df = DataFrame({\"g\": [1, 2, 2, 2], \"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]})\n\n result = df.groupby(\"g\").apply(func)\n tm.assert_frame_equal(result, df)\n\n\ndef test_apply_with_mixed_dtype():\n # GH3480, apply with mixed dtype on axis=1 breaks in 0.11\n df = DataFrame(\n {\n \"foo1\": np.random.randn(6),\n \"foo2\": [\"one\", \"two\", \"two\", \"three\", \"one\", \"two\"],\n }\n )\n result = df.apply(lambda x: x, axis=1).dtypes\n expected = df.dtypes\n tm.assert_series_equal(result, expected)\n\n # GH 3610 incorrect dtype conversion with as_index=False\n df = DataFrame({\"c1\": [1, 2, 6, 6, 8]})\n df[\"c2\"] = df.c1 / 2.0\n result1 = df.groupby(\"c2\").mean().reset_index().c2\n result2 = df.groupby(\"c2\", as_index=False).mean().c2\n tm.assert_series_equal(result1, result2)\n\n\ndef test_groupby_as_index_apply(df):\n # GH #4648 and #3417\n df = DataFrame(\n {\n \"item_id\": [\"b\", \"b\", \"a\", \"c\", \"a\", \"b\"],\n \"user_id\": [1, 2, 1, 1, 3, 1],\n \"time\": range(6),\n }\n )\n\n g_as = df.groupby(\"user_id\", as_index=True)\n g_not_as = df.groupby(\"user_id\", as_index=False)\n\n res_as = g_as.head(2).index\n res_not_as = g_not_as.head(2).index\n exp = Index([0, 1, 2, 4])\n tm.assert_index_equal(res_as, exp)\n tm.assert_index_equal(res_not_as, exp)\n\n res_as_apply = g_as.apply(lambda x: x.head(2)).index\n res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index\n\n # apply doesn't maintain the original ordering\n # changed in GH5610 as the as_index=False returns a MI here\n exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])\n tp = [(1, 0), (1, 2), (2, 1), (3, 4)]\n exp_as_apply = MultiIndex.from_tuples(tp, names=[\"user_id\", None])\n\n tm.assert_index_equal(res_as_apply, exp_as_apply)\n tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)\n\n ind = Index(list(\"abcde\"))\n df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)\n res = df.groupby(0, as_index=False).apply(lambda x: x).index\n tm.assert_index_equal(res, ind)\n\n\n@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) quantile\ndef test_apply_concat_preserve_names(three_group):\n grouped = three_group.groupby([\"A\", \"B\"])\n\n def desc(group):\n result = group.describe()\n result.index.name = \"stat\"\n return result\n\n def desc2(group):\n result = group.describe()\n result.index.name = \"stat\"\n result = result[: len(group)]\n # weirdo\n return result\n\n def desc3(group):\n result = group.describe()\n\n # names are different\n result.index.name = f\"stat_{len(group):d}\"\n\n result = result[: len(group)]\n # weirdo\n return result\n\n result = grouped.apply(desc)\n assert result.index.names == (\"A\", \"B\", \"stat\")\n\n result2 = grouped.apply(desc2)\n assert result2.index.names == (\"A\", \"B\", \"stat\")\n\n result3 = grouped.apply(desc3)\n assert result3.index.names == (\"A\", \"B\", None)\n\n\ndef test_apply_series_to_frame():\n def f(piece):\n with np.errstate(invalid=\"ignore\"):\n logged = np.log(piece)\n return DataFrame(\n {\"value\": piece, \"demeaned\": piece - piece.mean(), \"logged\": logged}\n )\n\n dr = bdate_range(\"1/1/2000\", periods=100)\n ts = Series(np.random.randn(100), index=dr)\n\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.apply(f)\n\n assert isinstance(result, DataFrame)\n tm.assert_index_equal(result.index, ts.index)\n\n\ndef test_apply_series_yield_constant(df):\n result = df.groupby([\"A\", \"B\"])[\"C\"].apply(len)\n assert result.index.names[:2] == (\"A\", \"B\")\n\n\ndef test_apply_frame_yield_constant(df):\n # GH13568\n result = df.groupby([\"A\", \"B\"]).apply(len)\n assert isinstance(result, Series)\n assert result.name is None\n\n result = df.groupby([\"A\", \"B\"])[[\"C\", \"D\"]].apply(len)\n assert isinstance(result, Series)\n assert result.name is None\n\n\ndef test_apply_frame_to_series(df):\n grouped = df.groupby([\"A\", \"B\"])\n result = grouped.apply(len)\n expected = grouped.count()[\"C\"]\n tm.assert_index_equal(result.index, expected.index)\n tm.assert_numpy_array_equal(result.values, expected.values)\n\n\ndef test_apply_frame_not_as_index_column_name(df):\n # GH 35964 - path within _wrap_applied_output not hit by a test\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n result = grouped.apply(len)\n expected = grouped.count().rename(columns={\"C\": np.nan}).drop(columns=\"D\")\n # TODO: Use assert_frame_equal when column name is not np.nan (GH 36306)\n tm.assert_index_equal(result.index, expected.index)\n tm.assert_numpy_array_equal(result.values, expected.values)\n\n\ndef test_apply_frame_concat_series():\n def trans(group):\n return group.groupby(\"B\")[\"C\"].sum().sort_values()[:2]\n\n def trans2(group):\n grouped = group.groupby(df.reindex(group.index)[\"B\"])\n return grouped.sum().sort_values()[:2]\n\n df = DataFrame(\n {\n \"A\": np.random.randint(0, 5, 1000),\n \"B\": np.random.randint(0, 5, 1000),\n \"C\": np.random.randn(1000),\n }\n )\n\n result = df.groupby(\"A\").apply(trans)\n exp = df.groupby(\"A\")[\"C\"].apply(trans2)\n tm.assert_series_equal(result, exp, check_names=False)\n assert result.name == \"C\"\n\n\ndef test_apply_transform(ts):\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.apply(lambda x: x * 2)\n expected = grouped.transform(lambda x: x * 2)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_multikey_corner(tsframe):\n grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])\n\n def f(group):\n return group.sort_values(\"A\")[-5:]\n\n result = grouped.apply(f)\n for key, group in grouped:\n tm.assert_frame_equal(result.loc[key], f(group))\n\n\ndef test_apply_chunk_view():\n # Low level tinkering could be unsafe, make sure not\n df = DataFrame({\"key\": [1, 1, 1, 2, 2, 2, 3, 3, 3], \"value\": range(9)})\n\n result = df.groupby(\"key\", group_keys=False).apply(lambda x: x[:2])\n expected = df.take([0, 1, 3, 4, 6, 7])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_no_name_column_conflict():\n df = DataFrame(\n {\n \"name\": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],\n \"name2\": [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],\n \"value\": range(9, -1, -1),\n }\n )\n\n # it works! #2605\n grouped = df.groupby([\"name\", \"name2\"])\n grouped.apply(lambda x: x.sort_values(\"value\", inplace=True))\n\n\ndef test_apply_typecast_fail():\n df = DataFrame(\n {\n \"d\": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],\n \"c\": np.tile([\"a\", \"b\", \"c\"], 2),\n \"v\": np.arange(1.0, 7.0),\n }\n )\n\n def f(group):\n v = group[\"v\"]\n group[\"v2\"] = (v - v.min()) / (v.max() - v.min())\n return group\n\n result = df.groupby(\"d\").apply(f)\n\n expected = df.copy()\n expected[\"v2\"] = np.tile([0.0, 0.5, 1], 2)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_multiindex_fail():\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])\n df = DataFrame(\n {\n \"d\": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],\n \"c\": np.tile([\"a\", \"b\", \"c\"], 2),\n \"v\": np.arange(1.0, 7.0),\n },\n index=index,\n )\n\n def f(group):\n v = group[\"v\"]\n group[\"v2\"] = (v - v.min()) / (v.max() - v.min())\n return group\n\n result = df.groupby(\"d\").apply(f)\n\n expected = df.copy()\n expected[\"v2\"] = np.tile([0.0, 0.5, 1], 2)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_corner(tsframe):\n result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)\n expected = tsframe * 2\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_without_copy():\n # GH 5545\n # returning a non-copy in an applied function fails\n\n data = DataFrame(\n {\n \"id_field\": [100, 100, 200, 300],\n \"category\": [\"a\", \"b\", \"c\", \"c\"],\n \"value\": [1, 2, 3, 4],\n }\n )\n\n def filt1(x):\n if x.shape[0] == 1:\n return x.copy()\n else:\n return x[x.category == \"c\"]\n\n def filt2(x):\n if x.shape[0] == 1:\n return x\n else:\n return x[x.category == \"c\"]\n\n expected = data.groupby(\"id_field\").apply(filt1)\n result = data.groupby(\"id_field\").apply(filt2)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\"test_series\", [True, False])\ndef test_apply_with_duplicated_non_sorted_axis(test_series):\n # GH 30667\n df = DataFrame(\n [[\"x\", \"p\"], [\"x\", \"p\"], [\"x\", \"o\"]], columns=[\"X\", \"Y\"], index=[1, 2, 2]\n )\n if test_series:\n ser = df.set_index(\"Y\")[\"X\"]\n result = ser.groupby(level=0).apply(lambda x: x)\n\n # not expecting the order to remain the same for duplicated axis\n result = result.sort_index()\n expected = ser.sort_index()\n tm.assert_series_equal(result, expected)\n else:\n result = df.groupby(\"Y\").apply(lambda x: x)\n\n # not expecting the order to remain the same for duplicated axis\n result = result.sort_values(\"Y\")\n expected = df.sort_values(\"Y\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_reindex_values():\n # GH: 26209\n # reindexing from a single column of a groupby object with duplicate indices caused\n # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was\n # solved in #30679\n values = [1, 2, 3, 4]\n indices = [1, 1, 2, 2]\n df = DataFrame({\"group\": [\"Group1\", \"Group2\"] * 2, \"value\": values}, index=indices)\n expected = Series(values, index=indices, name=\"value\")\n\n def reindex_helper(x):\n return x.reindex(np.arange(x.index.min(), x.index.max() + 1))\n\n # the following group by raised a ValueError\n result = df.groupby(\"group\").value.apply(reindex_helper)\n tm.assert_series_equal(expected, result)\n\n\ndef test_apply_corner_cases():\n # #535, can't use sliding iterator\n\n N = 1000\n labels = np.random.randint(0, 100, size=N)\n df = DataFrame(\n {\n \"key\": labels,\n \"value1\": np.random.randn(N),\n \"value2\": [\"foo\", \"bar\", \"baz\", \"qux\"] * (N // 4),\n }\n )\n\n grouped = df.groupby(\"key\")\n\n def f(g):\n g[\"value3\"] = g[\"value1\"] * 2\n return g\n\n result = grouped.apply(f)\n assert \"value3\" in result\n\n\ndef test_apply_numeric_coercion_when_datetime():\n # In the past, group-by/apply operations have been over-eager\n # in converting dtypes to numeric, in the presence of datetime\n # columns. Various GH issues were filed, the reproductions\n # for which are here.\n\n # GH 15670\n df = DataFrame(\n {\"Number\": [1, 2], \"Date\": [\"2017-03-02\"] * 2, \"Str\": [\"foo\", \"inf\"]}\n )\n expected = df.groupby([\"Number\"]).apply(lambda x: x.iloc[0])\n df.Date = pd.to_datetime(df.Date)\n result = df.groupby([\"Number\"]).apply(lambda x: x.iloc[0])\n tm.assert_series_equal(result[\"Str\"], expected[\"Str\"])\n\n # GH 15421\n df = DataFrame(\n {\"A\": [10, 20, 30], \"B\": [\"foo\", \"3\", \"4\"], \"T\": [pd.Timestamp(\"12:31:22\")] * 3}\n )\n\n def get_B(g):\n return g.iloc[0][[\"B\"]]\n\n result = df.groupby(\"A\").apply(get_B)[\"B\"]\n expected = df.B\n expected.index = df.A\n tm.assert_series_equal(result, expected)\n\n # GH 14423\n def predictions(tool):\n out = Series(index=[\"p1\", \"p2\", \"useTime\"], dtype=object)\n if \"step1\" in list(tool.State):\n out[\"p1\"] = str(tool[tool.State == \"step1\"].Machine.values[0])\n if \"step2\" in list(tool.State):\n out[\"p2\"] = str(tool[tool.State == \"step2\"].Machine.values[0])\n out[\"useTime\"] = str(tool[tool.State == \"step2\"].oTime.values[0])\n return out\n\n df1 = DataFrame(\n {\n \"Key\": [\"B\", \"B\", \"A\", \"A\"],\n \"State\": [\"step1\", \"step2\", \"step1\", \"step2\"],\n \"oTime\": [\"\", \"2016-09-19 05:24:33\", \"\", \"2016-09-19 23:59:04\"],\n \"Machine\": [\"23\", \"36L\", \"36R\", \"36R\"],\n }\n )\n df2 = df1.copy()\n df2.oTime = pd.to_datetime(df2.oTime)\n expected = df1.groupby(\"Key\").apply(predictions).p1\n result = df2.groupby(\"Key\").apply(predictions).p1\n tm.assert_series_equal(expected, result)\n\n\ndef test_apply_aggregating_timedelta_and_datetime():\n # Regression test for GH 15562\n # The following groupby caused ValueErrors and IndexErrors pre 0.20.0\n\n df = DataFrame(\n {\n \"clientid\": [\"A\", \"B\", \"C\"],\n \"datetime\": [np.datetime64(\"2017-02-01 00:00:00\")] * 3,\n }\n )\n df[\"time_delta_zero\"] = df.datetime - df.datetime\n result = df.groupby(\"clientid\").apply(\n lambda ddf: Series(\n {\"clientid_age\": ddf.time_delta_zero.min(), \"date\": ddf.datetime.min()}\n )\n )\n expected = DataFrame(\n {\n \"clientid\": [\"A\", \"B\", \"C\"],\n \"clientid_age\": [np.timedelta64(0, \"D\")] * 3,\n \"date\": [np.datetime64(\"2017-02-01 00:00:00\")] * 3,\n }\n ).set_index(\"clientid\")\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_groupby_datetimeindex():\n # GH 26182\n # groupby apply failed on dataframe with DatetimeIndex\n\n data = [[\"A\", 10], [\"B\", 20], [\"B\", 30], [\"C\", 40], [\"C\", 50]]\n df = DataFrame(\n data, columns=[\"Name\", \"Value\"], index=pd.date_range(\"2020-09-01\", \"2020-09-05\")\n )\n\n result = df.groupby(\"Name\").sum()\n\n expected = DataFrame({\"Name\": [\"A\", \"B\", \"C\"], \"Value\": [10, 50, 90]})\n expected.set_index(\"Name\", inplace=True)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_time_field_bug():\n # Test a fix for the following error related to GH issue 11324 When\n # non-key fields in a group-by dataframe contained time-based fields\n # that were not returned by the apply function, an exception would be\n # raised.\n\n df = DataFrame({\"a\": 1, \"b\": [datetime.now() for nn in range(10)]})\n\n def func_with_no_date(batch):\n return Series({\"c\": 2})\n\n def func_with_date(batch):\n return Series({\"b\": datetime(2015, 1, 1), \"c\": 2})\n\n dfg_no_conversion = df.groupby(by=[\"a\"]).apply(func_with_no_date)\n dfg_no_conversion_expected = DataFrame({\"c\": 2}, index=[1])\n dfg_no_conversion_expected.index.name = \"a\"\n\n dfg_conversion = df.groupby(by=[\"a\"]).apply(func_with_date)\n dfg_conversion_expected = DataFrame({\"b\": datetime(2015, 1, 1), \"c\": 2}, index=[1])\n dfg_conversion_expected.index.name = \"a\"\n\n tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)\n tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)\n\n\ndef test_gb_apply_list_of_unequal_len_arrays():\n\n # GH1738\n df = DataFrame(\n {\n \"group1\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"a\", \"a\", \"a\", \"b\", \"b\", \"b\"],\n \"group2\": [\"c\", \"c\", \"d\", \"d\", \"d\", \"e\", \"c\", \"c\", \"d\", \"d\", \"d\", \"e\"],\n \"weight\": [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],\n \"value\": [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3],\n }\n )\n df = df.set_index([\"group1\", \"group2\"])\n df_grouped = df.groupby(level=[\"group1\", \"group2\"], sort=True)\n\n def noddy(value, weight):\n out = np.array(value * weight).repeat(3)\n return out\n\n # the kernel function returns arrays of unequal length\n # pandas sniffs the first one, sees it's an array and not\n # a list, and assumed the rest are of equal length\n # and so tries a vstack\n\n # don't die\n df_grouped.apply(lambda x: noddy(x.value, x.weight))\n\n\ndef test_groupby_apply_all_none():\n # Tests to make sure no errors if apply function returns all None\n # values. Issue 9684.\n test_df = DataFrame({\"groups\": [0, 0, 1, 1], \"random_vars\": [8, 7, 4, 5]})\n\n def test_func(x):\n pass\n\n result = test_df.groupby(\"groups\").apply(test_func)\n expected = DataFrame()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_apply_none_first():\n # GH 12824. Tests if apply returns None first.\n test_df1 = DataFrame({\"groups\": [1, 1, 1, 2], \"vars\": [0, 1, 2, 3]})\n test_df2 = DataFrame({\"groups\": [1, 2, 2, 2], \"vars\": [0, 1, 2, 3]})\n\n def test_func(x):\n if x.shape[0] < 2:\n return None\n return x.iloc[[0, -1]]\n\n result1 = test_df1.groupby(\"groups\").apply(test_func)\n result2 = test_df2.groupby(\"groups\").apply(test_func)\n index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=[\"groups\", None])\n index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=[\"groups\", None])\n expected1 = DataFrame({\"groups\": [1, 1], \"vars\": [0, 2]}, index=index1)\n expected2 = DataFrame({\"groups\": [2, 2], \"vars\": [1, 3]}, index=index2)\n tm.assert_frame_equal(result1, expected1)\n tm.assert_frame_equal(result2, expected2)\n\n\ndef test_groupby_apply_return_empty_chunk():\n # GH 22221: apply filter which returns some empty groups\n df = DataFrame({\"value\": [0, 1], \"group\": [\"filled\", \"empty\"]})\n groups = df.groupby(\"group\")\n result = groups.apply(lambda group: group[group.value != 1][\"value\"])\n expected = Series(\n [0],\n name=\"value\",\n index=MultiIndex.from_product(\n [[\"empty\", \"filled\"], [0]], names=[\"group\", None]\n ).drop(\"empty\"),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_with_mixed_types():\n # gh-20949\n df = DataFrame({\"A\": \"a a b\".split(), \"B\": [1, 2, 3], \"C\": [4, 6, 5]})\n g = df.groupby(\"A\")\n\n result = g.transform(lambda x: x / x.sum())\n expected = DataFrame({\"B\": [1 / 3.0, 2 / 3.0, 1], \"C\": [0.4, 0.6, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n result = g.apply(lambda x: x / x.sum())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_func_returns_object():\n # GH 28652\n df = DataFrame({\"a\": [1, 2]}, index=pd.Int64Index([1, 2]))\n result = df.groupby(\"a\").apply(lambda g: g.index)\n expected = Series(\n [pd.Int64Index([1]), pd.Int64Index([2])], index=pd.Int64Index([1, 2], name=\"a\")\n )\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"group_column_dtlike\",\n [datetime.today(), datetime.today().date(), datetime.today().time()],\n)\ndef test_apply_datetime_issue(group_column_dtlike):\n # GH-28247\n # groupby-apply throws an error if one of the columns in the DataFrame\n # is a datetime object and the column labels are different from\n # standard int values in range(len(num_columns))\n\n df = DataFrame({\"a\": [\"foo\"], \"b\": [group_column_dtlike]})\n result = df.groupby(\"a\").apply(lambda x: Series([\"spam\"], index=[42]))\n\n expected = DataFrame(\n [\"spam\"], Index([\"foo\"], dtype=\"object\", name=\"a\"), columns=[42]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_series_return_dataframe_groups():\n # GH 10078\n tdf = DataFrame(\n {\n \"day\": {\n 0: pd.Timestamp(\"2015-02-24 00:00:00\"),\n 1: pd.Timestamp(\"2015-02-24 00:00:00\"),\n 2: pd.Timestamp(\"2015-02-24 00:00:00\"),\n 3: pd.Timestamp(\"2015-02-24 00:00:00\"),\n 4: pd.Timestamp(\"2015-02-24 00:00:00\"),\n },\n \"userAgent\": {\n 0: \"some UA string\",\n 1: \"some UA string\",\n 2: \"some UA string\",\n 3: \"another UA string\",\n 4: \"some UA string\",\n },\n \"userId\": {\n 0: \"17661101\",\n 1: \"17661101\",\n 2: \"17661101\",\n 3: \"17661101\",\n 4: \"17661101\",\n },\n }\n )\n\n def most_common_values(df):\n return Series({c: s.value_counts().index[0] for c, s in df.iteritems()})\n\n result = tdf.groupby(\"day\").apply(most_common_values)[\"userId\"]\n expected = Series(\n [\"17661101\"], index=pd.DatetimeIndex([\"2015-02-24\"], name=\"day\"), name=\"userId\"\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\"category\", [False, True])\ndef test_apply_multi_level_name(category):\n # https://github.com/pandas-dev/pandas/issues/31068\n b = [1, 2] * 5\n if category:\n b = pd.Categorical(b, categories=[1, 2, 3])\n expected_index = pd.CategoricalIndex([1, 2], categories=[1, 2, 3], name=\"B\")\n else:\n expected_index = Index([1, 2], name=\"B\")\n df = DataFrame(\n {\"A\": np.arange(10), \"B\": b, \"C\": list(range(10)), \"D\": list(range(10))}\n ).set_index([\"A\", \"B\"])\n result = df.groupby(\"B\").apply(lambda x: x.sum())\n expected = DataFrame({\"C\": [20, 25], \"D\": [20, 25]}, index=expected_index)\n tm.assert_frame_equal(result, expected)\n assert df.index.names == [\"A\", \"B\"]\n\n\ndef test_groupby_apply_datetime_result_dtypes():\n # GH 14849\n data = DataFrame.from_records(\n [\n (pd.Timestamp(2016, 1, 1), \"red\", \"dark\", 1, \"8\"),\n (pd.Timestamp(2015, 1, 1), \"green\", \"stormy\", 2, \"9\"),\n (pd.Timestamp(2014, 1, 1), \"blue\", \"bright\", 3, \"10\"),\n (pd.Timestamp(2013, 1, 1), \"blue\", \"calm\", 4, \"potato\"),\n ],\n columns=[\"observation\", \"color\", \"mood\", \"intensity\", \"score\"],\n )\n result = data.groupby(\"color\").apply(lambda g: g.iloc[0]).dtypes\n expected = Series(\n [np.dtype(\"datetime64[ns]\"), object, object, np.int64, object],\n index=[\"observation\", \"color\", \"mood\", \"intensity\", \"score\"],\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"index\",\n [\n pd.CategoricalIndex(list(\"abc\")),\n pd.interval_range(0, 3),\n pd.period_range(\"2020\", periods=3, freq=\"D\"),\n MultiIndex.from_tuples([(\"a\", 0), (\"a\", 1), (\"b\", 0)]),\n ],\n)\ndef test_apply_index_has_complex_internals(index):\n # GH 31248\n df = DataFrame({\"group\": [1, 1, 2], \"value\": [0, 1, 0]}, index=index)\n result = df.groupby(\"group\").apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\n@pytest.mark.parametrize(\n \"function, expected_values\",\n [\n (lambda x: x.index.to_list(), [[0, 1], [2, 3]]),\n (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),\n (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),\n (\n lambda x: {n: i for (n, i) in enumerate(x.index.to_list())},\n [{0: 0, 1: 1}, {0: 2, 1: 3}],\n ),\n (\n lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())],\n [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]],\n ),\n ],\n)\ndef test_apply_function_returns_non_pandas_non_scalar(function, expected_values):\n # GH 31441\n df = DataFrame([\"A\", \"A\", \"B\", \"B\"], columns=[\"groups\"])\n result = df.groupby(\"groups\").apply(function)\n expected = Series(expected_values, index=Index([\"A\", \"B\"], name=\"groups\"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_function_returns_numpy_array():\n # GH 31605\n def fct(group):\n return group[\"B\"].values.flatten()\n\n df = DataFrame({\"A\": [\"a\", \"a\", \"b\", \"none\"], \"B\": [1, 2, 3, np.nan]})\n\n result = df.groupby(\"A\").apply(fct)\n expected = Series(\n [[1.0, 2.0], [3.0], [np.nan]], index=Index([\"a\", \"b\", \"none\"], name=\"A\")\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\"function\", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1])\ndef test_apply_function_index_return(function):\n # GH: 22541\n df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=[\"id\"])\n result = df.groupby(\"id\").apply(function)\n expected = Series(\n [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])],\n index=Index([1, 2, 3], name=\"id\"),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_function_with_indexing_return_column():\n # GH: 7002\n df = DataFrame(\n {\n \"foo1\": [\"one\", \"two\", \"two\", \"three\", \"one\", \"two\"],\n \"foo2\": [1, 2, 4, 4, 5, 6],\n }\n )\n result = df.groupby(\"foo1\", as_index=False).apply(lambda x: x.mean())\n expected = DataFrame({\"foo1\": [\"one\", \"three\", \"two\"], \"foo2\": [3.0, 4.0, 4.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_timezones_aware(using_array_manager, request):\n # GH: 27212\n if not using_array_manager:\n request.node.add_marker(pytest.mark.xfail(reason=\"GH-34998\"))\n\n dates = [\"2001-01-01\"] * 2 + [\"2001-01-02\"] * 2 + [\"2001-01-03\"] * 2\n index_no_tz = pd.DatetimeIndex(dates)\n index_tz = pd.DatetimeIndex(dates, tz=\"UTC\")\n df1 = DataFrame({\"x\": list(range(2)) * 3, \"y\": range(6), \"t\": index_no_tz})\n df2 = DataFrame({\"x\": list(range(2)) * 3, \"y\": range(6), \"t\": index_tz})\n\n result1 = df1.groupby(\"x\", group_keys=False).apply(lambda df: df[[\"x\", \"y\"]].copy())\n result2 = df2.groupby(\"x\", group_keys=False).apply(lambda df: df[[\"x\", \"y\"]].copy())\n\n tm.assert_frame_equal(result1, result2)\n\n\ndef test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func):\n # GH #34656\n # GH #34271\n df = DataFrame(\n {\n \"a\": [99, 99, 99, 88, 88, 88],\n \"b\": [1, 2, 3, 4, 5, 6],\n \"c\": [10, 20, 30, 40, 50, 60],\n }\n )\n\n expected = DataFrame(\n {\"a\": [264, 297], \"b\": [15, 6], \"c\": [150, 60]},\n index=Index([88, 99], name=\"a\"),\n )\n\n # Check output when no other methods are called before .apply()\n grp = df.groupby(by=\"a\")\n result = grp.apply(sum)\n tm.assert_frame_equal(result, expected)\n\n # Check output when another method is called before .apply()\n grp = df.groupby(by=\"a\")\n args = {\"nth\": [0], \"corrwith\": [df]}.get(reduction_func, [])\n _ = getattr(grp, reduction_func)(*args)\n result = grp.apply(sum)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():\n # GH 29617\n\n df = DataFrame(\n {\n \"A\": [\"a\", \"a\", \"a\", \"b\"],\n \"B\": [\n date(2020, 1, 10),\n date(2020, 1, 10),\n date(2020, 2, 10),\n date(2020, 2, 10),\n ],\n \"C\": [1, 2, 3, 4],\n },\n index=Index([100, 101, 102, 103], name=\"idx\"),\n )\n\n grp = df.groupby([\"A\", \"B\"])\n result = grp.apply(lambda x: x.head(1))\n\n expected = df.iloc[[0, 2, 3]]\n expected = expected.reset_index()\n expected.index = MultiIndex.from_frame(expected[[\"A\", \"B\", \"idx\"]])\n expected = expected.drop(columns=\"idx\")\n\n tm.assert_frame_equal(result, expected)\n for val in result.index.levels[1]:\n assert type(val) is date\n\n\ndef test_apply_by_cols_equals_apply_by_rows_transposed():\n # GH 16646\n # Operating on the columns, or transposing and operating on the rows\n # should give the same result. There was previously a bug where the\n # by_rows operation would work fine, but by_cols would throw a ValueError\n\n df = DataFrame(\n np.random.random([6, 4]),\n columns=MultiIndex.from_product([[\"A\", \"B\"], [1, 2]]),\n )\n\n by_rows = df.T.groupby(axis=0, level=0).apply(\n lambda x: x.droplevel(axis=0, level=0)\n )\n by_cols = df.groupby(axis=1, level=0).apply(lambda x: x.droplevel(axis=1, level=0))\n\n tm.assert_frame_equal(by_cols, by_rows.T)\n tm.assert_frame_equal(by_cols, df)\n\n\ndef test_apply_dropna_with_indexed_same():\n # GH 38227\n\n df = DataFrame(\n {\n \"col\": [1, 2, 3, 4, 5],\n \"group\": [\"a\", np.nan, np.nan, \"b\", \"b\"],\n },\n index=list(\"xxyxz\"),\n )\n result = df.groupby(\"group\").apply(lambda x: x)\n expected = DataFrame(\n {\n \"col\": [1, 4, 5],\n \"group\": [\"a\", \"b\", \"b\"],\n },\n index=list(\"xxz\"),\n )\n\n tm.assert_frame_equal(result, expected)\n"
] |
[
[
"pandas.core.indexes.extension.make_wrapped_arith_op",
"numpy.array_equal",
"pandas.core.indexes.extension.NDArrayBackedExtensionIndex.take",
"pandas.compat.numpy.function.validate_max",
"pandas.core.common.asarray_tuplesafe",
"pandas.compat.numpy.function.validate_argmin",
"pandas._libs.Timedelta",
"pandas._libs.lib.is_scalar",
"pandas.core.tools.timedeltas.to_timedelta",
"pandas.core.indexes.extension.inherit_names",
"pandas.core.indexes.base.Index._intersection",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.compat.numpy.function.validate_argmax",
"pandas._libs.lib.item_from_zerodim",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.indexes.base.Index.join",
"pandas.util._decorators.cache_readonly",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.is_integer",
"pandas.compat.numpy.function.validate_take",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.dtypes.common.is_list_like",
"numpy.iinfo",
"pandas.compat.numpy.function.validate_min",
"numpy.asarray",
"pandas.core.indexes.numeric.Int64Index._simple_new",
"pandas.core.indexes.base.pprint_thing",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_bool_dtype"
],
[
"pandas.DatetimeIndex",
"numpy.resize",
"pandas.Timestamp",
"numpy.finfo",
"pandas.concat",
"pandas._testing.assert_series_equal",
"pandas.DataFrame",
"pandas.arrays.SparseArray",
"numpy.arange",
"numpy.array",
"pandas.core.dtypes.common.is_integer",
"numpy.random.randn",
"pandas._testing.assert_equal",
"pandas._testing.assert_index_equal",
"pandas.isna",
"pandas.date_range",
"pandas.Categorical",
"pandas.Series",
"pandas._testing.assert_datetime_array_equal"
],
[
"pandas.DatetimeIndex",
"pandas.CategoricalIndex",
"numpy.tile",
"pandas.Timestamp",
"pandas.concat",
"pandas.MultiIndex.from_frame",
"pandas._testing.assert_series_equal",
"pandas.period_range",
"numpy.random.random",
"numpy.dtype",
"numpy.log",
"pandas.DataFrame",
"numpy.random.randint",
"numpy.arange",
"pandas.bdate_range",
"pandas.to_datetime",
"numpy.array",
"pandas.MultiIndex.from_tuples",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"pandas.MultiIndex.from_arrays",
"pandas.MultiIndex.from_product",
"numpy.timedelta64",
"pandas._testing.assert_index_equal",
"numpy.datetime64",
"pandas.Index",
"numpy.errstate",
"pandas.Int64Index",
"pandas.date_range",
"pandas.Categorical",
"pandas._testing.assert_numpy_array_equal",
"pandas.interval_range",
"pandas.Series"
]
] |
gusamarante/QuantFin
|
[
"ba8ae75095692b3e6a922220ef8cefc1bea7c35e"
] |
[
"cqf_final_project/chart_value.py"
] |
[
"import pandas as pd\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nfile_path = r'/Users/gustavoamarante/Dropbox/CQF/Final Project/' # Mac\n# file_path = r'/Users/gusamarante/Dropbox/CQF/Final Project/' # Macbook\n\n# Read Bloomberg Tickers for renaming\ndf_tickers = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',\n index_col=0, sheet_name='Tickers')\n\nppp_dict = df_tickers['PPP'].to_dict()\nppp_dict = {v: k for k, v in ppp_dict.items()}\n\nspot_dict = df_tickers['Spot'].to_dict()\nspot_dict = {v: k for k, v in spot_dict.items()}\n\n# Read PPP\ndf_ppp = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',\n index_col=0, sheet_name='PPP')\ndf_ppp = df_ppp.rename(ppp_dict, axis=1)\n\n# Read spot price\ndf_spot = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',\n index_col=0, sheet_name='Spot')\ndf_spot = df_spot.rename(spot_dict, axis=1)\n\n# compute the value\ndf_value = 1 - df_ppp / df_spot\ndf_value = df_value.dropna(axis=1, how='all')\n\n# Chart\nMyFont = {'fontname': 'Century Gothic'}\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Century Gothic']\n\ndf_value = df_value.dropna(how='all')\n\nplt.figure(figsize=(11, 6))\nplt.plot(df_value, linewidth=2)\nplt.tick_params(axis='y', which='both', right=False, left=False, labelleft=True)\nplt.tick_params(axis='x', which='both', top=False, bottom=False, labelbottom=True)\nplt.xticks(rotation=45)\nplt.legend(df_value.columns, loc='lower right', ncol=3, frameon=True)\nax = plt.gca()\nax.yaxis.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.5)\nax.xaxis.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.5)\nax.set_xlim((df_value.index[0], df_value.index[-1]))\nlocators = mdates.YearLocator()\nax.xaxis.set_major_locator(locators)\nax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\nplt.tight_layout()\nplt.savefig(file_path + r'figures/Value.pdf', pad_inches=0)\nplt.show()\n"
] |
[
[
"matplotlib.dates.YearLocator",
"matplotlib.pyplot.savefig",
"pandas.read_excel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xticks"
]
] |
InduManimaran/pennylane
|
[
"375d25acc7bd2e6d5243b5273958b26513c33189"
] |
[
"pennylane/beta/qnodes/qubit.py"
] |
[
"# Copyright 2019 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nQubit parameter shift quantum node.\r\n\r\nProvides analytic differentiation for all one-parameter gates where the generator\r\nonly has two unique eigenvalues; this includes one-parameter single-qubit gates.\r\n\"\"\"\r\nimport itertools\r\nimport copy\r\n\r\nimport numpy as np\r\nfrom scipy import linalg\r\n\r\nimport pennylane as qml\r\nfrom pennylane.measure import var\r\nfrom pennylane.utils import expand\r\n\r\nfrom pennylane.operation import Observable, ObservableReturnTypes\r\n\r\nfrom .base import QuantumFunctionError\r\nfrom .jacobian import JacobianQNode\r\n\r\n\r\nclass QubitQNode(JacobianQNode):\r\n \"\"\"Quantum node for qubit parameter shift analytic differentiation\"\"\"\r\n\r\n def _best_method(self, idx):\r\n \"\"\"Determine the correct partial derivative computation method for a free parameter.\r\n\r\n Use the parameter-shift analytic method iff every gate that depends on the parameter supports it.\r\n If not, use the finite difference method only.\r\n\r\n Note that if even one dependent Operation does not support differentiation,\r\n we cannot differentiate with respect to this parameter at all.\r\n\r\n Args:\r\n idx (int): free parameter index\r\n\r\n Returns:\r\n str: partial derivative method to be used\r\n \"\"\"\r\n # operations that depend on this free parameter\r\n ops = [d.op for d in self.variable_deps[idx]]\r\n\r\n # Observables in the circuit\r\n # (the topological order is the queue order)\r\n observables = self.circuit.observables_in_order\r\n\r\n # an empty list to store the 'best' partial derivative method\r\n # for each operator/observable pair\r\n best = np.empty((len(ops), len(observables)), dtype=object)\r\n\r\n # find the best supported partial derivative method for each operator\r\n for k_op, op in enumerate(ops):\r\n if op.grad_method is None:\r\n # one nondifferentiable item makes the whole nondifferentiable\r\n op.use_method = None\r\n continue\r\n\r\n # loop over all observables\r\n for k_ob, ob in enumerate(observables):\r\n # get the set of operations betweens the\r\n # operation and the observable\r\n S = self.circuit.nodes_between(op, ob)\r\n\r\n # If there is no path between them, p.d. is zero\r\n # Otherwise, use finite differences\r\n best[k_op, k_ob] = \"0\" if not S else op.grad_method\r\n\r\n if all(k == \"0\" for k in best[k_op, :]):\r\n # one nondifferentiable item makes the whole nondifferentiable\r\n op.use_method = \"0\"\r\n elif \"F\" in best[k_op, :]:\r\n # one non-analytic item makes the whole numeric\r\n op.use_method = \"F\"\r\n else:\r\n op.use_method = \"A\"\r\n\r\n # if all ops that depend on the free parameter have a best method\r\n # of \"0\", then we can skip the partial derivative altogether\r\n if all(o.use_method == \"0\" for o in ops):\r\n return \"0\"\r\n\r\n # one nondifferentiable item makes the whole nondifferentiable\r\n if any(o.use_method is None for o in ops):\r\n return None\r\n\r\n # one non-analytic item makes the whole numeric\r\n if any(o.use_method == \"F\" for o in ops):\r\n return \"F\"\r\n\r\n return \"A\"\r\n\r\n def _pd_analytic(self, idx, args, kwargs, **options):\r\n \"\"\"Partial derivative of the node using the analytic parameter shift method.\r\n Args:\r\n idx (int): flattened index of the parameter wrt. which the p.d. is computed\r\n args (array[float]): flattened positional arguments at which to evaluate the p.d.\r\n kwargs (dict[str, Any]): auxiliary arguments\r\n\r\n Returns:\r\n array[float]: partial derivative of the node\r\n \"\"\"\r\n n = self.num_variables\r\n pd = 0.0\r\n # find the Operators in which the free parameter appears, use the product rule\r\n for op, p_idx in self.variable_deps[idx]:\r\n\r\n # We temporarily edit the Operator such that parameter p_idx is replaced by a new one,\r\n # which we can modify without affecting other Operators depending on the original.\r\n orig = op.params[p_idx]\r\n assert orig.idx == idx\r\n assert orig.name is None\r\n\r\n # reference to a new, temporary parameter with index n, otherwise identical with orig\r\n temp_var = copy.copy(orig)\r\n temp_var.idx = n\r\n op.params[p_idx] = temp_var\r\n\r\n multiplier, shift = op.get_parameter_shift(p_idx)\r\n\r\n # shifted parameter values\r\n shift_p1 = np.r_[args, args[idx] + shift]\r\n shift_p2 = np.r_[args, args[idx] - shift]\r\n\r\n # evaluate the circuit at two points with shifted parameter values\r\n y2 = np.asarray(self.evaluate(shift_p1, kwargs))\r\n y1 = np.asarray(self.evaluate(shift_p2, kwargs))\r\n pd += (y2 - y1) * multiplier\r\n\r\n # restore the original parameter\r\n op.params[p_idx] = orig\r\n\r\n return pd\r\n\r\n def _pd_analytic_var(self, idx, args, kwargs, **options):\r\n \"\"\"Partial derivative of the variance of an observable using the parameter-shift method.\r\n\r\n Args:\r\n idx (int): flattened index of the parameter wrt. which the p.d. is computed\r\n args (array[float]): flattened positional arguments at which to evaluate the p.d.\r\n kwargs (dict[str, Any]): auxiliary arguments\r\n\r\n Returns:\r\n array[float]: partial derivative of the node\r\n \"\"\"\r\n # boolean mask: elements are True where the return type is a variance, False for expectations\r\n where_var = [\r\n e.return_type is ObservableReturnTypes.Variance for e in self.circuit.observables\r\n ]\r\n var_observables = [\r\n e for e in self.circuit.observables if e.return_type == ObservableReturnTypes.Variance\r\n ]\r\n\r\n # first, replace each var(A) with <A^2>\r\n new_observables = []\r\n for e in var_observables:\r\n # need to calculate d<A^2>/dp\r\n w = e.wires\r\n\r\n if e.name == \"Hermitian\":\r\n # since arbitrary Hermitian observables\r\n # are not guaranteed to be involutory, need to take them into\r\n # account separately to calculate d<A^2>/dp\r\n\r\n A = e.params[0] # Hermitian matrix\r\n # if not np.allclose(A @ A, np.identity(A.shape[0])):\r\n new = qml.expval(qml.Hermitian(A @ A, w, do_queue=False))\r\n else:\r\n # involutory, A^2 = I\r\n # For involutory observables (A^2 = I) we have d<A^2>/dp = 0\r\n new = qml.expval(qml.Hermitian(np.identity(2 ** len(w)), w, do_queue=False))\r\n\r\n # replace the var(A) observable with <A^2>\r\n self.circuit.update_node(e, new)\r\n new_observables.append(new)\r\n\r\n # calculate the analytic derivatives of the <A^2> observables\r\n pdA2 = self._pd_analytic(idx, args, kwargs)\r\n\r\n # restore the original observables, but convert their return types to expectation\r\n for e, new in zip(var_observables, new_observables):\r\n self.circuit.update_node(new, e)\r\n e.return_type = ObservableReturnTypes.Expectation\r\n\r\n # evaluate <A>\r\n evA = np.asarray(self.evaluate(args, kwargs))\r\n\r\n # evaluate the analytic derivative of <A>\r\n pdA = self._pd_analytic(idx, args, kwargs)\r\n\r\n # restore return types\r\n for e in var_observables:\r\n e.return_type = ObservableReturnTypes.Variance\r\n\r\n # return d(var(A))/dp = d<A^2>/dp -2 * <A> * d<A>/dp for the variances,\r\n # d<A>/dp for plain expectations\r\n return np.where(where_var, pdA2 - 2 * evA * pdA, pdA)\r\n\r\n def _construct_metric_tensor(self, *, diag_approx=False):\r\n \"\"\"Construct metric tensor subcircuits for qubit circuits.\r\n\r\n Constructs a set of quantum circuits for computing a block-diagonal approximation of the\r\n Fubini-Study metric tensor on the parameter space of the variational circuit represented\r\n by the QNode, using the Quantum Geometric Tensor.\r\n\r\n If the parameter appears in a gate :math:`G`, the subcircuit contains\r\n all gates which precede :math:`G`, and :math:`G` is replaced by the variance\r\n value of its generator.\r\n\r\n Args:\r\n diag_approx (bool): iff True, use the diagonal approximation\r\n\r\n Raises:\r\n QuantumFunctionError: if a metric tensor cannot be generated because no generator\r\n was defined\r\n\r\n \"\"\"\r\n # pylint: disable=too-many-statements, too-many-branches\r\n\r\n self._metric_tensor_subcircuits = {}\r\n for queue, curr_ops, param_idx, _ in self.circuit.iterate_layers():\r\n obs = []\r\n scale = []\r\n\r\n Ki_matrices = []\r\n KiKj_matrices = []\r\n Ki_ev = []\r\n KiKj_ev = []\r\n V = None\r\n\r\n # for each operation in the layer, get the generator and convert it to a variance\r\n for n, op in enumerate(curr_ops):\r\n gen, s = op.generator\r\n w = op.wires\r\n\r\n if gen is None:\r\n raise QuantumFunctionError(\r\n \"Can't generate metric tensor, operation {}\"\r\n \"has no defined generator\".format(op)\r\n )\r\n\r\n # get the observable corresponding to the generator of the current operation\r\n if isinstance(gen, np.ndarray):\r\n # generator is a Hermitian matrix\r\n variance = var(qml.Hermitian(gen, w, do_queue=False))\r\n\r\n if not diag_approx:\r\n Ki_matrices.append((n, expand(gen, w, self.num_wires)))\r\n\r\n elif issubclass(gen, Observable):\r\n # generator is an existing PennyLane operation\r\n variance = var(gen(w, do_queue=False))\r\n\r\n if not diag_approx:\r\n if issubclass(gen, qml.PauliX):\r\n mat = np.array([[0, 1], [1, 0]])\r\n elif issubclass(gen, qml.PauliY):\r\n mat = np.array([[0, -1j], [1j, 0]])\r\n elif issubclass(gen, qml.PauliZ):\r\n mat = np.array([[1, 0], [0, -1]])\r\n\r\n Ki_matrices.append((n, expand(mat, w, self.num_wires)))\r\n\r\n else:\r\n raise QuantumFunctionError(\r\n \"Can't generate metric tensor, generator {}\"\r\n \"has no corresponding observable\".format(gen)\r\n )\r\n\r\n obs.append(variance)\r\n scale.append(s)\r\n\r\n if not diag_approx:\r\n # In order to compute the block diagonal portion of the metric tensor,\r\n # we need to compute 'second order' <psi|K_i K_j|psi> terms.\r\n\r\n for i, j in itertools.product(range(len(Ki_matrices)), repeat=2):\r\n # compute the matrices representing all K_i K_j terms\r\n obs1 = Ki_matrices[i]\r\n obs2 = Ki_matrices[j]\r\n KiKj_matrices.append(((obs1[0], obs2[0]), obs1[1] @ obs2[1]))\r\n\r\n V = np.identity(2 ** self.num_wires, dtype=np.complex128)\r\n\r\n # generate the unitary operation to rotate to\r\n # the shared eigenbasis of all observables\r\n for _, term in Ki_matrices:\r\n _, S = linalg.eigh(V.conj().T @ term @ V)\r\n V = np.round(V @ S, 15)\r\n\r\n V = V.conj().T\r\n\r\n # calculate the eigenvalues for\r\n # each observable in the shared eigenbasis\r\n for idx, term in Ki_matrices:\r\n eigs = np.diag(V @ term @ V.conj().T).real\r\n Ki_ev.append((idx, eigs))\r\n\r\n for idx, term in KiKj_matrices:\r\n eigs = np.diag(V @ term @ V.conj().T).real\r\n KiKj_ev.append((idx, eigs))\r\n\r\n self._metric_tensor_subcircuits[param_idx] = {\r\n \"queue\": queue,\r\n \"observable\": obs,\r\n \"Ki_expectations\": Ki_ev,\r\n \"KiKj_expectations\": KiKj_ev,\r\n \"eigenbasis_matrix\": V,\r\n \"result\": None,\r\n \"scale\": scale,\r\n }\r\n\r\n def metric_tensor(self, args, kwargs=None, *, diag_approx=False, only_construct=False):\r\n \"\"\"Evaluate the value of the metric tensor.\r\n\r\n Args:\r\n args (tuple[Any]): positional (differentiable) arguments\r\n kwargs (dict[str, Any]): auxiliary arguments\r\n diag_approx (bool): iff True, use the diagonal approximation\r\n only_construct (bool): Iff True, construct the circuits used for computing\r\n the metric tensor but do not execute them, and return None.\r\n\r\n Returns:\r\n array[float]: metric tensor\r\n \"\"\"\r\n kwargs = kwargs or {}\r\n kwargs = self._default_args(kwargs)\r\n\r\n if self.circuit is None or self.mutable:\r\n # construct the circuit\r\n self._construct(args, kwargs)\r\n\r\n if self._metric_tensor_subcircuits is None:\r\n self._construct_metric_tensor(diag_approx=diag_approx)\r\n\r\n if only_construct:\r\n return None\r\n\r\n # temporarily store the parameter values in the Variable class\r\n self._set_variables(args, kwargs)\r\n\r\n tensor = np.zeros([self.num_variables, self.num_variables])\r\n\r\n # execute constructed metric tensor subcircuits\r\n for params, circuit in self._metric_tensor_subcircuits.items():\r\n self.device.reset()\r\n\r\n s = np.array(circuit[\"scale\"])\r\n V = circuit[\"eigenbasis_matrix\"]\r\n\r\n if not diag_approx:\r\n # block diagonal approximation\r\n\r\n unitary_op = qml.QubitUnitary(V, wires=list(range(self.num_wires)), do_queue=False)\r\n self.device.execute(circuit[\"queue\"] + [unitary_op], circuit[\"observable\"])\r\n probs = list(self.device.probability().values())\r\n\r\n first_order_ev = np.zeros([len(params)])\r\n second_order_ev = np.zeros([len(params), len(params)])\r\n\r\n for idx, ev in circuit[\"Ki_expectations\"]:\r\n first_order_ev[idx] = ev @ probs\r\n\r\n for idx, ev in circuit[\"KiKj_expectations\"]:\r\n # idx is a 2-tuple (i, j), representing\r\n # generators K_i, K_j\r\n second_order_ev[idx] = ev @ probs\r\n\r\n # since K_i and K_j are assumed to commute,\r\n # <psi|K_j K_i|psi> = <psi|K_i K_j|psi>,\r\n # and thus the matrix of second-order expectations\r\n # is symmetric\r\n second_order_ev[idx[1], idx[0]] = second_order_ev[idx]\r\n\r\n g = np.zeros([len(params), len(params)])\r\n\r\n for i, j in itertools.product(range(len(params)), repeat=2):\r\n g[i, j] = (\r\n s[i]\r\n * s[j]\r\n * (second_order_ev[i, j] - first_order_ev[i] * first_order_ev[j])\r\n )\r\n\r\n row = np.array(params).reshape(-1, 1)\r\n col = np.array(params).reshape(1, -1)\r\n circuit[\"result\"] = np.diag(g)\r\n tensor[row, col] = g\r\n\r\n else:\r\n # diagonal approximation\r\n circuit[\"result\"] = s ** 2 * self.device.execute(\r\n circuit[\"queue\"], circuit[\"observable\"]\r\n )\r\n tensor[np.array(params), np.array(params)] = circuit[\"result\"]\r\n\r\n return tensor\r\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.round",
"numpy.identity",
"numpy.where",
"numpy.diag"
]
] |
jgueldenstein/urdf2webots
|
[
"39850f249baa8461d014e0ca9c4b4d05d19cf7ab"
] |
[
"urdf2webots/writeProto.py"
] |
[
"\"\"\"Import modules.\"\"\"\n\nimport math\nimport numpy as np\n\nfrom urdf2webots.math_utils import rotateVector, matrixFromRotation, multiplyMatrix, rotationFromMatrix\n\ntoolSlot = None\nstaticBase = False\nenableMultiFile = False\nmeshFilesPath = None\nrobotNameMain = ''\ninitPos = None\n\n\nclass RGB():\n \"\"\"RGB color object.\"\"\"\n\n def __init__(self):\n \"\"\"Initialization.\"\"\"\n self.red = 0.5\n self.green = 0.5\n self.blue = 0.5\n\n\n# ref: https://marcodiiga.github.io/rgba-to-rgb-conversion\ndef RGBA2RGB(RGBA_color, RGB_background=RGB()):\n \"\"\"Convert RGBA to RGB expression.\"\"\"\n alpha = RGBA_color.alpha\n\n new_color = RGB()\n new_color.red = (1 - alpha) * RGB_background.red + alpha * RGBA_color.red\n new_color.green = (1 - alpha) * RGB_background.green + alpha * RGBA_color.green\n new_color.blue = (1 - alpha) * RGB_background.blue + alpha * RGBA_color.blue\n\n return new_color\n\n\ndef header(proto, srcFile=None, robotName='', tags=[]):\n \"\"\"Specify VRML file header.\"\"\"\n if srcFile:\n header.sourceFile = srcFile\n proto.write('#VRML_SIM R2020b utf8\\n')\n proto.write('# license: Apache License 2.0\\n')\n proto.write('# license url: http://www.apache.org/licenses/LICENSE-2.0\\n')\n if tags:\n proto.write('# tags: %s\\n' % ','.join(tags))\n if robotName:\n proto.write('# This is a proto file for Webots for the ' + robotName + '\\n')\n if header.sourceFile is not None:\n proto.write('# Extracted from: ' + header.sourceFile + '\\n\\n')\n\n\nheader.sourceFile = None\n\n\ndef declaration(proto, robotName, initRotation):\n \"\"\"Prototype declaration.\"\"\"\n spaces = ' ' * max(1, len(robotName) - 2)\n proto.write('PROTO ' + robotName + ' [\\n')\n proto.write(' field SFVec3f translation 0 0 0\\n')\n proto.write(' field SFRotation rotation ' + initRotation + '\\n')\n proto.write(' field SFString name \"' + robotName + '\" # Is `Robot.name`.\\n')\n proto.write(' field SFString controller \"void\"' + spaces + '# Is `Robot.controller`.\\n')\n proto.write(' field MFString controllerArgs [] ' + spaces + '# Is `Robot.controllerArgs`.\\n')\n proto.write(' field SFString customData \"\" ' + spaces + '# Is `Robot.customData`.\\n')\n proto.write(' field SFBool supervisor FALSE ' + spaces + '# Is `Robot.supervisor`.\\n')\n proto.write(' field SFBool synchronization TRUE ' + spaces + '# Is `Robot.synchronization`.\\n')\n proto.write(' field SFBool selfCollision FALSE ' + spaces + '# Is `Robot.selfCollision`.\\n')\n if staticBase:\n proto.write(' field SFBool staticBase TRUE ' + spaces + '# Defines if the robot base should ' +\n 'be pinned to the static environment.\\n')\n if toolSlot:\n proto.write(' field MFNode toolSlot [] ' + spaces +\n '# Extend the robot with new nodes at the end of the arm.\\n')\n proto.write(']\\n')\n proto.write('{\\n')\n\n\ndef URDFLink(proto, link, level, parentList, childList, linkList, jointList, sensorList,\n jointPosition=[0.0, 0.0, 0.0], jointRotation=[1.0, 0.0, 0.0, 0.0],\n boxCollision=False, normal=False, dummy=False, robot=False, endpoint=False):\n \"\"\"Write a link iteratively.\"\"\"\n indent = ' '\n haveChild = False\n if robot:\n proto.write(level * indent + 'Robot {\\n')\n proto.write((level + 1) * indent + 'translation IS translation\\n')\n proto.write((level + 1) * indent + 'rotation IS rotation\\n')\n proto.write((level + 1) * indent + 'controller IS controller\\n')\n proto.write((level + 1) * indent + 'controllerArgs IS controllerArgs\\n')\n proto.write((level + 1) * indent + 'customData IS customData\\n')\n proto.write((level + 1) * indent + 'supervisor IS supervisor\\n')\n proto.write((level + 1) * indent + 'synchronization IS synchronization\\n')\n proto.write((level + 1) * indent + 'selfCollision IS selfCollision\\n')\n else:\n if link.forceSensor:\n proto.write((' ' if endpoint else level * indent) + 'TouchSensor {\\n')\n proto.write((level + 1) * indent + 'type \"force-3d\"\\n')\n else:\n proto.write((' ' if endpoint else level * indent) + 'Solid {\\n')\n proto.write((level + 1) * indent + 'translation %lf %lf %lf\\n' % (jointPosition[0],\n jointPosition[1],\n jointPosition[2]))\n proto.write((level + 1) * indent + 'rotation %lf %lf %lf %lf\\n' % (jointRotation[0],\n jointRotation[1],\n jointRotation[2],\n jointRotation[3]))\n if not dummy: # dummy: case when link not defined but referenced (e.g. Atlas robot)\n # 1: export Shapes\n if link.visual:\n if not haveChild:\n haveChild = True\n proto.write((level + 1) * indent + 'children [\\n')\n URDFShape(proto, link, level + 2, normal)\n # 2: export Sensors\n for sensor in sensorList:\n if sensor.parentLink == link.name:\n if not haveChild:\n haveChild = True\n proto.write((level + 1) * indent + 'children [\\n')\n sensor.export(proto, level + 2)\n # 3: export Joints\n for joint in jointList:\n if joint.parent == link.name:\n if not haveChild:\n haveChild = True\n proto.write((level + 1) * indent + 'children [\\n')\n URDFJoint(proto, joint, level + 2, parentList, childList,\n linkList, jointList, sensorList, boxCollision, normal)\n # 4: export ToolSlot if specified\n if link.name == toolSlot:\n if not haveChild:\n proto.write((level + 1) * indent + 'children [\\n')\n proto.write((level + 2) * indent + 'Group {\\n')\n proto.write((level + 3) * indent + 'children IS toolSlot\\n')\n proto.write((level + 2) * indent + '}\\n')\n proto.write((level + 1) * indent + ']\\n')\n # add dummy physics and bounding object, so tools don't fall off\n if link.inertia.mass is None:\n proto.write((level + 1) * indent + 'physics Physics {\\n')\n proto.write((level + 1) * indent + '}\\n')\n proto.write((level + 1) * indent + 'boundingObject Box {\\n')\n proto.write((level + 2) * indent + 'size 0.01 0.01 0.01\\n')\n proto.write((level + 1) * indent + '}\\n')\n elif haveChild:\n proto.write((level + 1) * indent + ']\\n')\n if level == 1:\n proto.write((level + 1) * indent + 'name IS name \\n')\n else:\n proto.write((level + 1) * indent + 'name \"' + link.name + '\"\\n')\n\n if link.collision:\n URDFBoundingObject(proto, link, level + 1, boxCollision)\n if link.inertia.mass is not None:\n if level == 1 and staticBase:\n proto.write((level + 1) * indent + '%{ if fields.staticBase.value == false then }%\\n')\n proto.write((level + 1) * indent + 'physics Physics {\\n')\n proto.write((level + 2) * indent + 'density -1\\n')\n proto.write((level + 2) * indent + 'mass %lf\\n' % link.inertia.mass)\n if link.inertia.position != [0.0, 0.0, 0.0]:\n proto.write((level + 2) * indent + 'centerOfMass [ %lf %lf %lf ]\\n' % (link.inertia.position[0],\n link.inertia.position[1],\n link.inertia.position[2]))\n if link.inertia.ixx > 0.0 and link.inertia.iyy > 0.0 and link.inertia.izz > 0.0:\n i = link.inertia\n inertiaMatrix = [i.ixx, i.ixy, i.ixz, i.ixy, i.iyy, i.iyz, i.ixz, i.iyz, i.izz]\n if link.inertia.rotation[-1] != 0.0:\n rotationMatrix = matrixFromRotation(link.inertia.rotation)\n I_mat = np.array(inertiaMatrix).reshape(3, 3)\n R = np.array(rotationMatrix).reshape(3, 3)\n R_t = np.transpose(R)\n # calculate the rotated inertiaMatrix with R_t * I * R. For reference, check the link below\n # https://www.euclideanspace.com/physics/dynamics/inertia/rotation/index.htm\n inertiaMatrix = np.dot(np.dot(R_t, I_mat), R).reshape(9)\n if (inertiaMatrix[0] != 1.0 or inertiaMatrix[4] != 1.0 or inertiaMatrix[8] != 1.0 or\n inertiaMatrix[1] != 0.0 or inertiaMatrix[2] != 0.0 or inertiaMatrix[5] != 0.0):\n proto.write((level + 2) * indent + 'inertiaMatrix [\\n')\n # principals moments of inertia (diagonal)\n proto.write((level + 3) * indent + '%e %e %e\\n' % (inertiaMatrix[0], inertiaMatrix[4], inertiaMatrix[8]))\n # products of inertia\n proto.write((level + 3) * indent + '%e %e %e\\n' % (inertiaMatrix[1], inertiaMatrix[2], inertiaMatrix[5]))\n proto.write((level + 2) * indent + ']\\n')\n proto.write((level + 1) * indent + '}\\n')\n if level == 1 and staticBase:\n proto.write((level + 1) * indent + '%{ end }%\\n')\n elif link.collision:\n if level == 1 and staticBase:\n proto.write((level + 1) * indent + '%{ if fields.staticBase.value == false then }%\\n')\n proto.write((level + 1) * indent + 'physics Physics {\\n')\n proto.write((level + 1) * indent + '}\\n')\n if level == 1 and staticBase:\n proto.write((level + 1) * indent + '%{ end }%\\n')\n proto.write(level * indent + '}\\n')\n\n\ndef URDFBoundingObject(proto, link, level, boxCollision):\n \"\"\"Write an boundingObject.\"\"\"\n indent = ' '\n boundingLevel = level\n proto.write(level * indent + 'boundingObject ')\n hasGroup = len(link.collision) > 1\n if hasGroup:\n proto.write('Group {\\n')\n proto.write((level + 1) * indent + 'children [\\n')\n boundingLevel = level + 2\n\n for boundingObject in link.collision:\n initialIndent = boundingLevel * indent if hasGroup else ''\n if not boxCollision and boundingObject.position != [0.0, 0.0, 0.0] or boundingObject.rotation[3] != 0.0:\n proto.write(initialIndent + 'Transform {\\n')\n proto.write((boundingLevel + 1) * indent + 'translation %lf %lf %lf\\n' % (boundingObject.position[0],\n boundingObject.position[1],\n boundingObject.position[2]))\n proto.write((boundingLevel + 1) * indent + 'rotation %lf %lf %lf %lf\\n' % (boundingObject.rotation[0],\n boundingObject.rotation[1],\n boundingObject.rotation[2],\n boundingObject.rotation[3]))\n proto.write((boundingLevel + 1) * indent + 'children [\\n')\n boundingLevel = boundingLevel + 2\n hasGroup = True\n initialIndent = boundingLevel * indent\n\n if boundingObject.geometry.box.x != 0:\n proto.write(initialIndent + 'Box {\\n')\n proto.write((boundingLevel + 1) * indent + ' size %lf %lf %lf\\n' % (boundingObject.geometry.box.x,\n boundingObject.geometry.box.y,\n boundingObject.geometry.box.z))\n proto.write(boundingLevel * indent + '}\\n')\n\n elif boundingObject.geometry.cylinder.radius != 0 and boundingObject.geometry.cylinder.length != 0:\n proto.write(initialIndent + 'Cylinder {\\n')\n proto.write((boundingLevel + 1) * indent + 'radius ' + str(boundingObject.geometry.cylinder.radius) + '\\n')\n proto.write((boundingLevel + 1) * indent + 'height ' + str(boundingObject.geometry.cylinder.length) + '\\n')\n proto.write(boundingLevel * indent + '}\\n')\n\n elif boundingObject.geometry.sphere.radius != 0:\n proto.write(initialIndent + 'Sphere {\\n')\n proto.write((boundingLevel + 1) * indent + 'radius ' + str(boundingObject.geometry.sphere.radius) + '\\n')\n proto.write(boundingLevel * indent + '}\\n')\n\n elif boundingObject.geometry.trimesh.coord and boxCollision:\n aabb = {\n 'minimum': {'x': float('inf'),\n 'y': float('inf'),\n 'z': float('inf')},\n 'maximum': {'x': float('-inf'),\n 'y': float('-inf'),\n 'z': float('-inf')}\n }\n for value in boundingObject.geometry.trimesh.coord:\n x = value[0] * boundingObject.geometry.scale[0]\n y = value[1] * boundingObject.geometry.scale[1]\n z = value[2] * boundingObject.geometry.scale[2]\n aabb['minimum']['x'] = min(aabb['minimum']['x'], x)\n aabb['maximum']['x'] = max(aabb['maximum']['x'], x)\n aabb['minimum']['y'] = min(aabb['minimum']['y'], y)\n aabb['maximum']['y'] = max(aabb['maximum']['y'], y)\n aabb['minimum']['z'] = min(aabb['minimum']['z'], z)\n aabb['maximum']['z'] = max(aabb['maximum']['z'], z)\n\n proto.write(initialIndent + 'Transform {\\n')\n proto.write((boundingLevel + 1) * indent + 'translation %f %f %f\\n' % (\n 0.5 * (aabb['maximum']['x'] + aabb['minimum']['x']) + boundingObject.position[0],\n 0.5 * (aabb['maximum']['y'] + aabb['minimum']['y']) + boundingObject.position[1],\n 0.5 * (aabb['maximum']['z'] + aabb['minimum']['z']) + boundingObject.position[2],))\n proto.write((boundingLevel + 1) * indent + 'rotation %lf %lf %lf %lf\\n' % (boundingObject.rotation[0],\n boundingObject.rotation[1],\n boundingObject.rotation[2],\n boundingObject.rotation[3]))\n proto.write((boundingLevel + 1) * indent + 'children [\\n')\n proto.write((boundingLevel + 2) * indent + 'Box {\\n')\n proto.write((boundingLevel + 3) * indent + 'size %f %f %f\\n' % (\n aabb['maximum']['x'] - aabb['minimum']['x'],\n aabb['maximum']['y'] - aabb['minimum']['y'],\n aabb['maximum']['z'] - aabb['minimum']['z'],))\n proto.write((boundingLevel + 2) * indent + '}\\n')\n proto.write((boundingLevel + 1) * indent + ']\\n')\n proto.write(boundingLevel * indent + '}\\n')\n\n elif boundingObject.geometry.trimesh.coord:\n if boundingObject.geometry.defName is not None:\n proto.write(initialIndent + 'USE %s\\n' % boundingObject.geometry.defName)\n else:\n if boundingObject.geometry.name is not None:\n boundingObject.geometry.defName = computeDefName(boundingObject.geometry.name)\n proto.write(initialIndent + 'DEF %s IndexedFaceSet {\\n' % boundingObject.geometry.defName)\n else:\n proto.write(initialIndent + 'IndexedFaceSet {\\n')\n\n proto.write((boundingLevel + 1) * indent + 'coord Coordinate {\\n')\n proto.write((boundingLevel + 2) * indent + 'point [\\n' + (boundingLevel + 3) * indent)\n for value in boundingObject.geometry.trimesh.coord:\n proto.write('%lf %lf %lf, ' % (value[0] * boundingObject.geometry.scale[0],\n value[1] * boundingObject.geometry.scale[1],\n value[2] * boundingObject.geometry.scale[2]))\n proto.write('\\n' + (boundingLevel + 2) * indent + ']\\n')\n proto.write((boundingLevel + 1) * indent + '}\\n')\n\n proto.write((boundingLevel + 1) * indent + 'coordIndex [\\n' + (boundingLevel + 2) * indent)\n if isinstance(boundingObject.geometry.trimesh.coordIndex[0], np.ndarray) \\\n or type(boundingObject.geometry.trimesh.coordIndex[0]) == list:\n for value in boundingObject.geometry.trimesh.coordIndex:\n if len(value) == 3:\n proto.write('%d %d %d -1 ' % (value[0], value[1], value[2]))\n elif isinstance(boundingObject.geometry.trimesh.coordIndex[0], np.int32):\n for i in range(len(boundingObject.geometry.trimesh.coordIndex) / 3):\n proto.write('%d %d %d -1 ' % (boundingObject.geometry.trimesh.coordIndex[3 * i + 0],\n boundingObject.geometry.trimesh.coordIndex[3 * i + 1],\n boundingObject.geometry.trimesh.coordIndex[3 * i + 2]))\n else:\n print('Unsupported \"%s\" coordinate type' % type(boundingObject.geometry.trimesh.coordIndex[0]))\n proto.write('\\n' + (boundingLevel + 1) * indent + ']\\n')\n proto.write(boundingLevel * indent + '}\\n')\n\n else:\n proto.write(initialIndent + 'Box{\\n')\n proto.write((boundingLevel + 1) * indent + ' size 0.01 0.01 0.01\\n')\n proto.write(boundingLevel * indent + '}\\n')\n\n if boundingLevel == level + 4:\n proto.write((level + 3) * indent + ']\\n')\n proto.write((level + 2) * indent + '}\\n')\n boundingLevel = level + 2\n if boundingLevel == level + 2:\n proto.write((level + 1) * indent + ']\\n')\n proto.write(level * indent + '}\\n')\n\n\ndef computeDefName(name):\n \"\"\"Compute a VRML compliant DEF name from an arbitrary string.\"\"\"\n defName = name.replace(' ', '_').replace('.', '_')\n if not defName: # empty string\n return None\n return name.replace(' ', '_').replace('.', '_')\n\n\ndef URDFVisual(proto, visualNode, level, normal=False):\n \"\"\"Write a Visual.\"\"\"\n indent = ' '\n shapeLevel = level\n\n proto.write(shapeLevel * indent + 'Shape {\\n')\n if visualNode.material.defName is not None:\n proto.write((shapeLevel + 1) * indent + 'appearance USE %s\\n' % visualNode.material.defName)\n else:\n if visualNode.material.name is not None:\n visualNode.material.defName = computeDefName(visualNode.material.name)\n if visualNode.material.defName is not None:\n proto.write((shapeLevel + 1) * indent + 'appearance DEF %s PBRAppearance {\\n' % visualNode.material.defName)\n else:\n proto.write((shapeLevel + 1) * indent + 'appearance PBRAppearance {\\n')\n ambientColor = RGBA2RGB(visualNode.material.ambient)\n diffuseColor = RGBA2RGB(visualNode.material.diffuse, RGB_background=ambientColor)\n emissiveColor = RGBA2RGB(visualNode.material.emission, RGB_background=ambientColor)\n roughness = 1.0 - visualNode.material.specular.alpha * (visualNode.material.specular.red +\n visualNode.material.specular.green +\n visualNode.material.specular.blue) / 3.0\n if visualNode.material.shininess:\n roughness *= (1.0 - 0.5 * visualNode.material.shininess)\n proto.write((shapeLevel + 2) * indent + 'baseColor %lf %lf %lf\\n' % (diffuseColor.red,\n diffuseColor.green,\n diffuseColor.blue))\n proto.write((shapeLevel + 2) * indent + 'transparency %lf\\n' % (1.0 - visualNode.material.diffuse.alpha))\n proto.write((shapeLevel + 2) * indent + 'roughness %lf\\n' % roughness)\n proto.write((shapeLevel + 2) * indent + 'metalness 0\\n')\n proto.write((shapeLevel + 2) * indent + 'emissiveColor %lf %lf %lf\\n' % (emissiveColor.red,\n emissiveColor.green,\n emissiveColor.blue))\n if visualNode.material.texture != \"\":\n proto.write((shapeLevel + 2) * indent + 'baseColorMap ImageTexture {\\n')\n proto.write((shapeLevel + 3) * indent + 'url [ \"' + visualNode.material.texture + '\" ]\\n')\n proto.write((shapeLevel + 2) * indent + '}\\n')\n proto.write((shapeLevel + 1) * indent + '}\\n')\n\n if visualNode.geometry.box.x != 0:\n proto.write((shapeLevel + 1) * indent + 'geometry Box {\\n')\n proto.write((shapeLevel + 2) * indent + ' size ' +\n str(visualNode.geometry.box.x) + ' ' +\n str(visualNode.geometry.box.y) + ' ' +\n str(visualNode.geometry.box.z) + '\\n')\n proto.write((shapeLevel + 1) * indent + '}\\n')\n\n elif visualNode.geometry.cylinder.radius != 0:\n proto.write((shapeLevel + 1) * indent + 'geometry Cylinder {\\n')\n proto.write((shapeLevel + 2) * indent + 'radius ' + str(visualNode.geometry.cylinder.radius) + '\\n')\n proto.write((shapeLevel + 2) * indent + 'height ' + str(visualNode.geometry.cylinder.length) + '\\n')\n proto.write((shapeLevel + 1) * indent + '}\\n')\n\n elif visualNode.geometry.sphere.radius != 0:\n proto.write((shapeLevel + 1) * indent + 'geometry Sphere {\\n')\n proto.write((shapeLevel + 2) * indent + 'radius ' + str(visualNode.geometry.sphere.radius) + '\\n')\n proto.write((shapeLevel + 1) * indent + '}\\n')\n\n elif visualNode.geometry.trimesh.coord:\n meshType = 'IndexedLineSet' if visualNode.geometry.lineset else 'IndexedFaceSet'\n if visualNode.geometry.defName is not None:\n proto.write((shapeLevel + 1) * indent + 'geometry USE %s\\n' % visualNode.geometry.defName)\n else:\n if visualNode.geometry.name is not None:\n visualNode.geometry.defName = computeDefName(visualNode.geometry.name)\n if visualNode.geometry.defName is not None:\n proto.write((shapeLevel + 1) * indent + 'geometry DEF %s %s {\\n' % (visualNode.geometry.defName, meshType))\n else:\n proto.write((shapeLevel + 1) * indent + 'geometry %s {\\n' % meshType)\n proto.write((shapeLevel + 2) * indent + 'coord Coordinate {\\n')\n proto.write((shapeLevel + 3) * indent + 'point [\\n' + (shapeLevel + 4) * indent)\n for value in visualNode.geometry.trimesh.coord:\n proto.write('%lf %lf %lf, ' % (value[0] * visualNode.geometry.scale[0],\n value[1] * visualNode.geometry.scale[1],\n value[2] * visualNode.geometry.scale[2]))\n proto.write('\\n' + (shapeLevel + 3) * indent + ']\\n')\n proto.write((shapeLevel + 2) * indent + '}\\n')\n\n proto.write((shapeLevel + 2) * indent + 'coordIndex [\\n' + (shapeLevel + 3) * indent)\n if (isinstance(visualNode.geometry.trimesh.coordIndex[0], np.ndarray) or\n type(visualNode.geometry.trimesh.coordIndex[0]) == list):\n for value in visualNode.geometry.trimesh.coordIndex:\n if len(value) == 3:\n proto.write('%d %d %d -1 ' % (value[0], value[1], value[2]))\n elif len(value) == 2:\n assert visualNode.geometry.lineset\n proto.write('%d %d -1 ' % (value[0], value[1]))\n elif isinstance(visualNode.geometry.trimesh.coordIndex[0], np.int32):\n for i in range(int(len(visualNode.geometry.trimesh.coordIndex) / 3)):\n proto.write('%d %d %d -1 ' % (visualNode.geometry.trimesh.coordIndex[3 * i + 0],\n visualNode.geometry.trimesh.coordIndex[3 * i + 1],\n visualNode.geometry.trimesh.coordIndex[3 * i + 2]))\n else:\n print('Unsupported \"%s\" coordinate type' % type(visualNode.geometry.trimesh.coordIndex[0]))\n proto.write('\\n' + (shapeLevel + 2) * indent + ']\\n')\n\n if normal and visualNode.geometry.trimesh.normal and visualNode.geometry.trimesh.normalIndex:\n proto.write((shapeLevel + 2) * indent + 'normal Normal {\\n')\n proto.write((shapeLevel + 3) * indent + 'vector [\\n' + (shapeLevel + 4) * indent)\n for value in visualNode.geometry.trimesh.normal:\n proto.write('%lf %lf %lf, ' % (value[0], value[1], value[2]))\n proto.write('\\n' + (shapeLevel + 3) * indent + ']\\n')\n proto.write((shapeLevel + 2) * indent + '}\\n')\n\n proto.write((shapeLevel + 2) * indent + 'normalIndex [\\n' + (shapeLevel + 3) * indent)\n if (isinstance(visualNode.geometry.trimesh.normalIndex[0], np.ndarray) or\n type(visualNode.geometry.trimesh.normalIndex[0]) == list):\n for value in visualNode.geometry.trimesh.normalIndex:\n if len(value) == 3:\n proto.write('%d %d %d -1 ' % (value[0], value[1], value[2]))\n elif isinstance(visualNode.geometry.trimesh.normalIndex[0], np.int32):\n for i in range(len(visualNode.geometry.trimesh.normalIndex) / 3):\n proto.write('%d %d %d -1 ' % (visualNode.geometry.trimesh.normalIndex[3 * i + 0],\n visualNode.geometry.trimesh.normalIndex[3 * i + 1],\n visualNode.geometry.trimesh.normalIndex[3 * i + 2]))\n else:\n print('Unsupported \"%s\" normal type' % type(visualNode.geometry.trimesh.normalIndex[0]))\n proto.write('\\n' + (shapeLevel + 2) * indent + ']\\n')\n\n if visualNode.geometry.trimesh.texCoord:\n proto.write((shapeLevel + 2) * indent + 'texCoord TextureCoordinate {\\n')\n proto.write((shapeLevel + 3) * indent + 'point [\\n' + (shapeLevel + 4) * indent)\n for value in visualNode.geometry.trimesh.texCoord:\n proto.write('%lf %lf, ' % (value[0], value[1]))\n proto.write('\\n' + (shapeLevel + 3) * indent + ']\\n')\n proto.write((shapeLevel + 2) * indent + '}\\n')\n\n proto.write((shapeLevel + 2) * indent + 'texCoordIndex [\\n' + (shapeLevel + 3) * indent)\n if (isinstance(visualNode.geometry.trimesh.texCoordIndex[0], np.ndarray) or\n type(visualNode.geometry.trimesh.texCoordIndex[0]) == list):\n for value in visualNode.geometry.trimesh.texCoordIndex:\n if len(value) == 3:\n proto.write('%d %d %d -1 ' % (value[0], value[1], value[2]))\n elif isinstance(visualNode.geometry.trimesh.texCoordIndex[0], np.int32):\n for i in range(len(visualNode.geometry.trimesh.texCoordIndex) / 3):\n proto.write('%d %d %d -1 ' % (visualNode.geometry.trimesh.texCoordIndex[3 * i + 0],\n visualNode.geometry.trimesh.texCoordIndex[3 * i + 1],\n visualNode.geometry.trimesh.texCoordIndex[3 * i + 2]))\n else:\n print('Unsupported \"%s\" coordinate type' % type(visualNode.geometry.trimesh.texCoordIndex[0]))\n proto.write('\\n' + (shapeLevel + 2) * indent + ']\\n')\n\n if not visualNode.geometry.lineset:\n proto.write((shapeLevel + 2) * indent + 'creaseAngle 1\\n')\n proto.write((shapeLevel + 1) * indent + '}\\n')\n proto.write(shapeLevel * indent + '}\\n')\n\n\ndef URDFShape(proto, link, level, normal=False):\n \"\"\"Write a Shape.\"\"\"\n indent = ' '\n shapeLevel = level\n transform = False\n\n for visualNode in link.visual:\n if visualNode.position != [0.0, 0.0, 0.0] or visualNode.rotation[3] != 0:\n proto.write(shapeLevel * indent + 'Transform {\\n')\n proto.write((shapeLevel + 1) * indent + 'translation %lf %lf %lf\\n' % (visualNode.position[0],\n visualNode.position[1],\n visualNode.position[2]))\n proto.write((shapeLevel + 1) * indent + 'rotation %lf %lf %lf %lf\\n' % (visualNode.rotation[0],\n visualNode.rotation[1],\n visualNode.rotation[2],\n visualNode.rotation[3]))\n proto.write((shapeLevel + 1) * indent + 'children [\\n')\n shapeLevel += 2\n transform = True\n if enableMultiFile and visualNode.geometry.trimesh.coord:\n name = visualNode.geometry.defName\n if name is None:\n if visualNode.geometry.name is not None:\n name = computeDefName(visualNode.geometry.name)\n name = robotNameMain + '_' + name if robotNameMain else name\n if visualNode.geometry.defName is None:\n print('Create meshFile: %sMesh.proto' % name)\n filepath = '%s/%sMesh.proto' % (meshFilesPath, name)\n meshProtoFile = open(filepath, 'w')\n header(meshProtoFile, tags=['hidden'])\n meshProtoFile.write('PROTO %sMesh [\\n]\\n{\\n' % name)\n visualNode.material.defName = None\n URDFVisual(meshProtoFile, visualNode, 1, normal)\n meshProtoFile.write('}\\n')\n meshProtoFile.close()\n proto.write(shapeLevel * indent + '%sMesh {\\n' % name + shapeLevel * indent + '}\\n')\n else:\n URDFVisual(proto, visualNode, shapeLevel, normal)\n if transform:\n proto.write((shapeLevel - 1) * indent + ']\\n')\n proto.write((shapeLevel - 2) * indent + '}\\n')\n shapeLevel -= 2\n\n\ndef URDFJoint(proto, joint, level, parentList, childList, linkList, jointList,\n sensorList, boxCollision, normal):\n \"\"\"Write a Joint iteratively.\"\"\"\n indent = ' '\n if not joint.axis:\n joint.axis = [1, 0, 0]\n axis = joint.axis\n endpointRotation = joint.rotation\n endpointPosition = joint.position\n if joint.rotation[3] != 0.0 and axis:\n axis = rotateVector(axis, joint.rotation)\n if joint.type == 'revolute' or joint.type == 'continuous':\n proto.write(level * indent + 'HingeJoint {\\n')\n proto.write((level + 1) * indent + 'jointParameters HingeJointParameters {\\n')\n position = None\n if joint.limit.lower > 0.0:\n # if 0 is not in the range, set the position to be the middle of the range\n position = joint.limit.lower\n if joint.limit.upper >= joint.limit.lower:\n position = (joint.limit.upper - joint.limit.lower) / 2.0 + joint.limit.lower\n if initPos is not None:\n if len(initPos) > 0:\n position = initPos[0]\n del initPos[0]\n if position is not None:\n proto.write((level + 2) * indent + 'position %lf \\n' % position)\n mat1 = matrixFromRotation(endpointRotation)\n mat2 = matrixFromRotation([axis[0], axis[1], axis[2], position])\n mat3 = multiplyMatrix(mat2, mat1)\n endpointRotation = rotationFromMatrix(mat3)\n proto.write((level + 2) * indent + 'axis %lf %lf %lf\\n' % (axis[0], axis[1], axis[2]))\n proto.write((level + 2) * indent + 'anchor %lf %lf %lf\\n' % (joint.position[0], joint.position[1], joint.position[2]))\n proto.write((level + 2) * indent + 'dampingConstant ' + str(joint.dynamics.damping) + '\\n')\n proto.write((level + 2) * indent + 'staticFriction ' + str(joint.dynamics.friction) + '\\n')\n proto.write((level + 1) * indent + '}\\n')\n proto.write((level + 1) * indent + 'device [\\n')\n proto.write((level + 2) * indent + 'RotationalMotor {\\n')\n elif joint.type == 'prismatic':\n proto.write(level * indent + 'SliderJoint {\\n')\n proto.write((level + 1) * indent + 'jointParameters JointParameters {\\n')\n if joint.limit.lower > 0.0:\n # if 0 is not in the range, set the position to be the middle of the range\n position = joint.limit.lower\n if joint.limit.upper >= joint.limit.lower:\n position = (joint.limit.upper - joint.limit.lower) / 2.0 + joint.limit.lower\n proto.write((level + 2) * indent + 'position %lf \\n' % position)\n length = math.sqrt(axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2])\n if length > 0:\n endpointPosition[0] += axis[0] / length * position\n endpointPosition[0] += axis[1] / length * position\n endpointPosition[0] += axis[2] / length * position\n proto.write((level + 2) * indent + 'axis %lf %lf %lf\\n' % (axis[0], axis[1], axis[2]))\n proto.write((level + 2) * indent + 'dampingConstant ' + str(joint.dynamics.damping) + '\\n')\n proto.write((level + 2) * indent + 'staticFriction ' + str(joint.dynamics.friction) + '\\n')\n proto.write((level + 1) * indent + '}\\n')\n proto.write((level + 1) * indent + 'device [\\n')\n proto.write((level + 2) * indent + 'LinearMotor {\\n')\n elif joint.type == 'fixed':\n for childLink in linkList:\n if childLink.name == joint.child:\n URDFLink(proto, childLink, level, parentList, childList,\n linkList, jointList, sensorList, joint.position, joint.rotation,\n boxCollision, normal)\n return\n\n elif joint.type == 'floating' or joint.type == 'planar':\n print(joint.type + ' is not a supported joint type in Webots')\n return\n\n proto.write((level + 3) * indent + 'name \"' + joint.name + '\"\\n')\n if joint.limit.velocity != 0.0:\n proto.write((level + 3) * indent + 'maxVelocity ' + str(joint.limit.velocity) + '\\n')\n if joint.limit.lower != 0.0:\n proto.write((level + 3) * indent + 'minPosition ' + str(joint.limit.lower) + '\\n')\n if joint.limit.upper != 0.0:\n proto.write((level + 3) * indent + 'maxPosition ' + str(joint.limit.upper) + '\\n')\n if joint.limit.effort != 0.0:\n if joint.type == 'prismatic':\n proto.write((level + 3) * indent + 'maxForce ' + str(joint.limit.effort) + '\\n')\n else:\n proto.write((level + 3) * indent + 'maxTorque ' + str(joint.limit.effort) + '\\n')\n proto.write((level + 2) * indent + '}\\n')\n proto.write((level + 2) * indent + 'PositionSensor {\\n')\n proto.write((level + 3) * indent + 'name \"' + joint.name + '_sensor\"\\n')\n proto.write((level + 2) * indent + '}\\n')\n proto.write((level + 1) * indent + ']\\n')\n\n proto.write((level + 1) * indent + 'endPoint')\n found_link = False\n for childLink in linkList:\n if childLink.name == joint.child:\n URDFLink(proto, childLink, level + 1, parentList, childList,\n linkList, jointList, sensorList, endpointPosition, endpointRotation,\n boxCollision, normal, endpoint=True)\n assert(not found_link)\n found_link = True\n # case that non-existing link cited, set dummy flag\n if not found_link and joint.child:\n URDFLink(proto, joint.child, level + 1, parentList, childList,\n linkList, jointList, sensorList, endpointPosition, endpointRotation,\n boxCollision, normal, dummy=True)\n print('warning: link ' + joint.child + ' is dummy!')\n proto.write(level * indent + '}\\n')\n"
] |
[
[
"numpy.array",
"numpy.dot",
"numpy.transpose"
]
] |
Mishne-Lab/CIDAN
|
[
"30d1176773e3ad0f236ba342cba48c89492f4e63"
] |
[
"cidan/GUI/ImageView/ROIPaintImageViewModule.py"
] |
[
"import logging\n\nimport numpy as np\nfrom PySide2.QtWidgets import QErrorMessage\nfrom qtpy import QtCore\n\nfrom cidan.GUI.ImageView.ROIImageViewModule import ROIImageViewModule\n\nlogger1 = logging.getLogger(\"cidan.ImageView.ROIImageViewModule\")\n\n\nclass ROIPaintImageViewModule(ROIImageViewModule):\n def __init__(self, main_widget, tab, select_multiple=False,settings_tab=True):\n super(ROIPaintImageViewModule, self).__init__(main_widget, tab, settings_tab, select_multiple=select_multiple)\n # part about selecting pixels\n self.setMouseTracking(True)\n self.image_view.setMouseTracking(True)\n self.image_view.QMouseEvent = lambda x: self.mouse_move(x)\n\n self.select_pixel_on = False # whether you can currently select pixels in the image\n self.brush_size = 1\n self.current_selected_pixels_list = [] # list of currently selected pixels in their 1d number format\n self.current_selected_pixels_mask = np.zeros((self.data_handler.shape[0],\n self.data_handler.shape[\n 1]),\n dtype=bool) # mask 1 when selected 0 when not\n self.shape = self.data_handler.shape\n self.select_image_flat = np.zeros([self.shape[0] * self.shape[1], 3])\n self.halo_image_flat = np.zeros(\n [self.shape[0] * self.shape[1], 3]) # used for cursor halo\n self.select_pixel_color = [0, 255, 0]\n self.select_mode = \"add\" # possibilities add and subtract from current\n # selection\n\n def mouse_move(self, event):\n if event.type() == QtCore.Qt.MouseMove:\n print(event.pos().x())\n def roi_view_click(self, event):\n if event.button() == QtCore.Qt.RightButton:\n if self.image_item.raiseContextMenu(event):\n event.accept()\n try:\n if hasattr(self.main_widget.data_handler,\n \"pixel_with_rois_flat\") and self.main_widget.data_handler.pixel_with_rois_flat is not None:\n pos = event.pos()\n\n y = int(pos.x())\n x = int(pos.y())\n if self.select_mode == \"magic\":\n self.magic_wand(x, y)\n print(\"Done generating ROI\")\n elif self.select_pixel_on:\n event.accept()\n self.pixel_paint(x, y)\n\n else:\n super().roi_view_click(event)\n elif self.select_pixel_on or self.select_mode == \"magic\":\n error_dialog = QErrorMessage(self.main_widget.main_window)\n error_dialog.showMessage(\n \"Something has changed, please regenerate ROIs\")\n except ValueError as e:\n if \"shape\" in e.args[0]:\n self.reset_view()\n\n def roi_view_drag(self, event):\n # if event.button() == QtCore.Qt.RightButton:\n # if self.image_item.raiseContextMenu(event):\n # event.accept()\n pos = event.pos()\n\n y = int(pos.x())\n x = int(pos.y())\n if self.select_pixel_on and self.select_mode != \"magic\":\n if hasattr(self.main_widget.data_handler,\n \"pixel_with_rois_flat\") and self.main_widget.data_handler.pixel_with_rois_flat is not None:\n\n event.accept()\n self.pixel_paint(x, y)\n else:\n error_dialog = QErrorMessage(self.main_widget.main_window)\n error_dialog.showMessage(\n \"Something has changed, please regenerate ROIs\")\n\n def magic_wand(self, x, y):\n shape = self.data_handler.shape\n # self.clearPixelSelection(update_display=False)\n print(\"Generating ROI\")\n new_roi = self.data_handler.genRoiFromPoint((x, y))\n if len(new_roi) == 0:\n print(\n \"Please try again with a different point, we couldn't find an roi where\"\n \" you last selected\")\n self.main_widget.console.updateText(\"Please try again with a different \"\n \"point, we couldn't find an roi where \"\n \"you last selected\")\n\n return False\n\n for cord_1d in new_roi:\n x_new, y_new = cord_1d // shape[1], cord_1d - (\n cord_1d // shape[1]) * shape[1]\n self.image_item.image[x_new, y_new] += [0, 255, 0]\n self.current_selected_pixels_list.append(\n shape[1] * x_new + y_new)\n self.current_selected_pixels_mask[x_new, y_new] = True\n self.image_item.updateImage()\n self.main_widget.console.updateText(\n \"Successfully generated selection of size: %s\" % str(len(new_roi)))\n\n return True\n def pixel_paint(self, x, y):\n try:\n shape = self.main_widget.data_handler.shape\n if self.select_mode == \"add\":\n\n for x_dif in range(self.brush_size * 2 + 1):\n for y_dif in range(self.brush_size * 2 + 1):\n x_new = x - self.brush_size - 1 + x_dif\n y_new = y - self.brush_size - 1 + y_dif\n if shape[1] * x_new + y_new \\\n not in self.current_selected_pixels_list:\n self.image_item.image[x_new, y_new] += [0, 255, 0]\n self.current_selected_pixels_list.append(\n shape[1] * x_new + y_new)\n self.current_selected_pixels_mask[x_new, y_new] = True\n\n if self.select_mode == \"subtract\":\n for x_dif in range(self.brush_size * 2 + 1):\n for y_dif in range(self.brush_size * 2 + 1):\n x_new = x - self.brush_size - 1 + x_dif\n y_new = y - self.brush_size - 1 + y_dif\n if shape[1] * x_new + y_new \\\n in self.current_selected_pixels_list:\n self.image_item.image[x_new, y_new] -= [0, 255, 0]\n self.current_selected_pixels_list.remove(\n shape[1] * x_new + y_new)\n self.current_selected_pixels_mask[x_new, y_new] = False\n\n self.image_item.updateImage()\n except IndexError:\n pass\n except ValueError as e:\n if \"shape\" in e.args[0]:\n print(\"Error please try again\")\n self.reset_view()\n\n pass # TODO use slicing to update pixel based on current thing\n\n def updateImageDisplay(self, new=False, update=False):\n try:\n # new is to determine whether the zoom should be saved\n # TODO add in update with image paint layer\n shape = self.main_widget.data_handler.shape\n\n # range_list = self.main_widget.roi_image_view.image_view.view.viewRange()\n background_max = np.percentile(self.current_background, 98)\n background_min = np.percentile(self.current_background, 2)\n background_image_scaled = (self.current_foreground_intensity / 7 * (\n self.current_background - background_min) * 255 / (\n (background_max - background_min) if (\n background_max - background_min) != 0 else 1))\n background_image_scaled_3_channel = np.hstack(\n [background_image_scaled, background_image_scaled,\n background_image_scaled])\n if not hasattr(self.main_widget.data_handler,\n \"edge_roi_image_flat\"):\n self.image_item.image = background_image_scaled_3_channel.reshape(\n (shape[0], shape[1], 3))\n self.image_item.updateImage(autoLevels=False)\n self.image_item.setLevels((0, 255))\n elif new:\n # if self.add_image:\n combined = self.roi_image_flat * .45 + background_image_scaled_3_channel * .45 + self.select_image_flat * .45\n\n # else:\n # combined = background_image_scaled + self.select_image_flat\n # mask = np.any(self.roi_image_flat != [0, 0, 0], axis=1)\n # combined[mask] = self.roi_image_flat[mask]\n combined_reshaped = combined.reshape((shape[0], shape[1], 3))\n self.tab.image_view.setImage(combined_reshaped)\n\n self.clearPixelSelection(update_display=False)\n self.image_item.setLevels((0, 255))\n else:\n self.image_item.image = background_image_scaled_3_channel.reshape(\n (shape[0], shape[1], 3)) * .45\n self.image_item.setLevels((0, 255))\n\n # if self.add_image:\n combined = (\n self.roi_image_flat * .45 + self.select_image_flat * .45).reshape(\n (shape[0], shape[1], 3))\n self.image_item.image += combined\n self.image_item.image[\n self.current_selected_pixels_mask] += self.select_pixel_color\n\n # else:\n # combined = self.select_image_flat+self.roi_image_flat\n # combined_reshaped = combined.reshape((shape[1], shape[2], 3))\n # mask = np.any(combined != [0, 0, 0], axis=1).reshape((shape[1], shape[2]))\n #\n # self.image_item.image[mask] = combined_reshaped[mask]\n\n self.image_item.updateImage(autoLevels=False)\n\n # self.main_widget.roi_image_view.image_view.view.setRange(xRange=range_list[0],\n # yRange=range_list[1])\n # range_list = self.main_widget.roi_image_view.image_view.view.viewRange()\n # print(range_list)\n\n pass\n except AttributeError as e:\n logger1.error(e)\n except ValueError as e:\n if \"shape\" in e.args[0]:\n # print(\"Error please try again\")\n self.reset_view()\n\n def clearPixelSelection(self, update_display=True, display_update_text=False):\n shape = self.main_widget.data_handler.shape\n self.current_selected_pixels_mask = np.zeros([shape[0], shape[1]], dtype=bool)\n self.current_selected_pixels_list = []\n if update_display:\n self.updateImageDisplay()\n if display_update_text:\n self.main_widget.console.updateText(\"Clearing currently selected pixels\")\n\n def check_pos_in_image(self, x, y):\n pass\n # TODO add in way to check if in image\n\n def setBrushSize(self, size):\n \"\"\"\n Sets the brush size\n\n self.brush_size is the additional size on all dimensions in addition to middle\n point\n Parameters\n ----------\n size from option input\n\n Returns\n -------\n nothing\n \"\"\"\n self.brush_size = int((int(size) - 1) / 2)\n\n def setSelectorBrushType(self, type):\n\n if type == \"off\":\n self.select_pixel_on = False\n self.select_mode = type\n else:\n self.select_pixel_on = True\n self.select_mode = type\n\n def reset_view(self, new=True):\n if not any([x.isRunning() for x in\n self.main_widget.thread_list]) and not self.resetting_view:\n super().reset_view(updateDisplay=False)\n self.resetting_view = True\n self.select_image_flat = np.zeros(\n [self.data_handler.shape[0] * self.data_handler.shape[1], 3])\n\n self.clearPixelSelection()\n self.updateImageDisplay(new=new)\n self.resetting_view = False\n"
] |
[
[
"numpy.percentile",
"numpy.hstack",
"numpy.zeros"
]
] |
hadarohana/fairseq
|
[
"566341e3aae9271facf6b9181b3f51b5120a2774"
] |
[
"fairseq/modules/transformer_sentence_encoder.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq.modules import (\n LayerNorm,\n MultiheadAttention,\n PositionalEmbedding,\n TransformerSentenceEncoderLayer,\n)\nimport random\n\n\ndef init_bert_params(module):\n \"\"\"\n Initialize the weights specific to the BERT Model.\n This overrides the default initializations depending on the specified arguments.\n 1. If normal_init_linear_weights is set then weights of linear\n layer will be initialized using the normal distribution and\n bais will be set to the specified value.\n 2. If normal_init_embed_weights is set then weights of embedding\n layer will be initialized using the normal distribution.\n 3. If normal_init_proj_weights is set then weights of\n in_project_weight for MultiHeadAttention initialized using\n the normal distribution (to be validated).\n \"\"\"\n\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()\n if isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n if isinstance(module, MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n\n\nclass TransformerSentenceEncoder(nn.Module):\n \"\"\"\n Implementation for a Bi-directional Transformer based Sentence Encoder used\n in BERT/XLM style pre-trained models.\n\n This first computes the token embedding using the token embedding matrix,\n position embeddings (if specified) and segment embeddings\n (if specified). After applying the specified number of\n TransformerEncoderLayers, it outputs all the internal states of the\n encoder as well as the final representation associated with the first\n token (usually CLS token).\n\n Input:\n - tokens: B x T matrix representing sentences\n - segment_labels: B x T matrix representing segment label for tokens\n\n Output:\n - a tuple of the following:\n - a list of internal model states used to compute the\n predictions where each tensor has shape B x T x C\n - sentence representation associated with first input token\n in format B x C.\n \"\"\"\n\n def __init__(\n self,\n padding_idx: int,\n vocab_size: int,\n num_encoder_layers: int = 6,\n embedding_dim: int = 768,\n ffn_embedding_dim: int = 3072,\n num_attention_heads: int = 8,\n dropout: float = 0.1,\n attention_dropout: float = 0.1,\n activation_dropout: float = 0.1,\n layerdrop : float = 0.0,\n max_seq_len: int = 256,\n num_segments: int = 2,\n use_position_embeddings: bool = True,\n offset_positions_by_padding: bool = True,\n encoder_normalize_before: bool = False,\n apply_bert_init: bool = False,\n activation_fn: str = \"relu\",\n learned_pos_embedding: bool = True,\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n embed_scale: float = None,\n freeze_embeddings: bool = False,\n n_trans_layers_to_freeze: int = 0,\n export: bool = False,\n ) -> None:\n\n super().__init__()\n self.padding_idx = padding_idx\n self.vocab_size = vocab_size\n self.dropout = dropout\n self.layerdrop = layerdrop\n self.max_seq_len = max_seq_len\n self.embedding_dim = embedding_dim\n self.num_segments = num_segments\n self.use_position_embeddings = use_position_embeddings\n self.apply_bert_init = apply_bert_init\n self.learned_pos_embedding = learned_pos_embedding\n\n self.embed_tokens = nn.Embedding(\n self.vocab_size, self.embedding_dim, self.padding_idx\n )\n self.embed_scale = embed_scale\n\n self.segment_embeddings = (\n nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)\n if self.num_segments > 0\n else None\n )\n\n self.embed_positions = (\n PositionalEmbedding(\n self.max_seq_len,\n self.embedding_dim,\n padding_idx=(self.padding_idx if offset_positions_by_padding else None),\n learned=self.learned_pos_embedding,\n )\n if self.use_position_embeddings\n else None\n )\n\n self.layers = nn.ModuleList(\n [\n TransformerSentenceEncoderLayer(\n embedding_dim=self.embedding_dim,\n ffn_embedding_dim=ffn_embedding_dim,\n num_attention_heads=num_attention_heads,\n dropout=self.dropout,\n attention_dropout=attention_dropout,\n activation_dropout=activation_dropout,\n activation_fn=activation_fn,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n export=export,\n )\n for _ in range(num_encoder_layers)\n ]\n )\n\n if encoder_normalize_before:\n self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)\n else:\n self.emb_layer_norm = None\n\n # Apply initialization of model params after building the model\n if self.apply_bert_init:\n self.apply(init_bert_params)\n\n def freeze_module_params(m):\n if m is not None:\n for p in m.parameters():\n p.requires_grad = False\n\n if freeze_embeddings:\n freeze_module_params(self.embed_tokens)\n freeze_module_params(self.segment_embeddings)\n freeze_module_params(self.embed_positions)\n freeze_module_params(self.emb_layer_norm)\n\n for layer in range(n_trans_layers_to_freeze):\n freeze_module_params(self.layers[layer])\n\n def forward(\n self,\n tokens: torch.Tensor,\n segment_labels: torch.Tensor = None,\n last_state_only: bool = False,\n positions: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n\n # compute padding mask. This is needed for multi-head attention\n padding_mask = tokens.eq(self.padding_idx)\n if not padding_mask.any():\n padding_mask = None\n\n x = self.embed_tokens(tokens)\n\n if self.embed_scale is not None:\n x *= self.embed_scale\n\n if self.embed_positions is not None:\n x += self.embed_positions(tokens, positions=positions)\n\n if self.segment_embeddings is not None and segment_labels is not None:\n x += self.segment_embeddings(segment_labels)\n\n if self.emb_layer_norm is not None:\n x = self.emb_layer_norm(x)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # account for padding while computing the representation\n if padding_mask is not None:\n x *= 1 - padding_mask.unsqueeze(-1).type_as(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n inner_states = []\n if not last_state_only:\n inner_states.append(x)\n\n for layer in self.layers:\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if not self.training or (dropout_probability > self.layerdrop):\n x, _ = layer(x, self_attn_padding_mask=padding_mask)\n if not last_state_only:\n inner_states.append(x)\n\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n sentence_rep = x[:, 0, :]\n\n if last_state_only:\n inner_states = [x]\n\n return inner_states, sentence_rep\n"
] |
[
[
"torch.nn.functional.dropout",
"torch.nn.Embedding"
]
] |
ishine/bolt
|
[
"ea734231f7085898ba5ca10da6d02da38058a705",
"ea734231f7085898ba5ca10da6d02da38058a705"
] |
[
"model_tools/tools/tensorflow2caffe/bert/albert/transform_albert.py",
"model_tools/tools/tensorflow2caffe/bert/tinybert/tinybert-infer.py"
] |
[
"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom tensorflow2caffe_albert import Tensorflow2CaffeALBert\nimport numpy as np\n\n\nif __name__ == '__main__':\n tensorflow_model_path = \"/data/bolt/model_zoo/tensorflow_models/albert_base_zh_additional_36k_steps/albert_model.ckpt\"\n encoder_layers = 12\n caffe_model_path_prefix = \"albert_base\"\n caffe_model_name = \"albert_base\"\n\n max_seq_length = 128\n embedding_dim = 768\n num_heads = 12\n\n albert_caffe = Tensorflow2CaffeALBert(tensorflow_model_path, caffe_model_path_prefix, caffe_model_name,\n max_seq_length, embedding_dim, encoder_layers, num_heads,\n False, True)\n data = {}\n data[\"bert_words\"] = np.array([[101,1045,2342,1037,14764,2005,2296,5353,3531,102]])\n bert_length = len(data[\"bert_words\"][0])\n data[\"bert_positions\"] = np.array([[i for i in range(bert_length)]])\n data[\"bert_token_type\"] = np.array([[0] * bert_length])\n albert_caffe.generate(data)\n",
"import os\nimport argparse\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nfrom tokenization import BertTokenizer\nimport modeling\n\n\ndef get_labels(label_file):\n with open(label_file, 'r', encoding='utf-8') as f:\n labels = f.readlines()\n\n labels = [label.strip() for label in labels]\n return labels\n\n\ndef create_model(bert_config, input_ids, input_mask, segment_ids,\n num_intent_labels, num_slot_labels):\n model = modeling.BertModel(config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=False)\n\n output_layer = model.get_pooled_output()\n print('output layer :{}'.format(output_layer))\n sequence_output = model.get_sequence_output()\n print('output layer :{}'.format(sequence_output))\n\n hidden_size = output_layer.shape[-1].value\n\n intent_classifier_weight = tf.get_variable(\n \"intent_classifier_weight\", [num_intent_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n intent_classifier_bias = tf.get_variable(\n \"intent_classifier_bias\", [num_intent_labels], initializer=tf.zeros_initializer())\n intent_logits = tf.matmul(output_layer, intent_classifier_weight, transpose_b=True)\n intent_confidence = tf.nn.softmax(tf.nn.bias_add(intent_logits, intent_classifier_bias), name='intent_confidence')\n intent = tf.argmax(intent_confidence, axis = -1, name='intent',output_type=tf.int32)\n slot_classifier_weight = tf.get_variable(\n \"slot_classifier_weight\", [num_slot_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n slot_classifier_bias = tf.get_variable(\n \"slot_classifier_bias\", [num_slot_labels], initializer=tf.zeros_initializer())\n slot_logits = tf.tensordot(sequence_output, tf.transpose(slot_classifier_weight), axes = [[2], [0]])\n slot = tf.argmax(tf.nn.softmax(tf.nn.bias_add(slot_logits, slot_classifier_bias)), \n axis = -1, name='slot',output_type=tf.int32)\n return intent, intent_confidence, slot\n\n\ndef write_data(f, array):\n array = np.array(array)\n num = array.size\n array = np.reshape(array, [num])\n output = str(len(array)) + \" \"\n for x in array:\n output = output + str(x) + \" \";\n f.write(\"%s\\n\" % (output))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--path\", default=None, required=True, type=str)\n parser.add_argument(\"--ckpt\", default=None, type=str)\n parser.add_argument(\"--use_pb\", default=None, action='store_true')\n parser.add_argument(\"--use_bolt\", default=False, type=bool)\n args = parser.parse_args()\n\n tokenizer = BertTokenizer.from_pretrained(args.path)\n intent_list = get_labels(os.path.join(args.path,'intention_labels.txt'))\n slot_list = get_labels(os.path.join(args.path,'slot_labels.txt'))\n with tf.Session() as sess:\n if args.use_pb:\n print(\"[INFO] use tensorflow .pb model\")\n with gfile.FastGFile(os.path.join(args.path,'model.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n else:\n print(\"[INFO] use tensorflow .ckpt model\")\n bert_config = modeling.BertConfig.from_json_file(os.path.join(args.ckpt,'config.json'))\n input_ids = tf.placeholder(tf.int32, [None, None], name=\"input_ids\")\n input_mask = tf.placeholder(tf.int32, [None, None], name=\"input_mask\")\n segment_ids = tf.placeholder(tf.int32, [None, None], name=\"segment_ids\")\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n intent, intent_confidence, slot= create_model(\n bert_config, input_ids, input_mask, segment_ids, len(intent_list),len(slot_list))\n saver = tf.train.Saver()\n saver.restore(sess,os.path.join(args.ckpt, 'model.ckpt'))\n\n input_ids = sess.graph.get_tensor_by_name('input_ids:0')\n input_mask = sess.graph.get_tensor_by_name('input_mask:0')\n segment_ids = sess.graph.get_tensor_by_name('segment_ids:0')\n\n intent = sess.graph.get_tensor_by_name('intent:0')\n intent_confidence = sess.graph.get_tensor_by_name('intent_confidence:0')\n slot = sess.graph.get_tensor_by_name('slot:0')\n\n text = \"i need a reminder for every weekend please\"\n tokens = tokenizer.tokenize(text)\n tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n\n input_id = tokenizer.convert_tokens_to_ids(tokens)\n seq_length = len(tokens)\n position = []\n for i in range(seq_length):\n position.append(i)\n segment = [0] * seq_length\n\n if (args.use_bolt):\n print(\"[INFO] use bolt inference\")\n f = open(\"sequence.seq\", \"w\")\n write_data(f, input_id)\n write_data(f, position)\n write_data(f, segment)\n f.close()\n os.system(\"./adb_run.sh\")\n f = open(\"result.txt\", \"r\")\n lines = f.readlines();\n for line in lines:\n line = line.strip();\n if (line.startswith(\"intent:\")):\n array = line.split(\" \")\n intent_id = array[1]\n intent_prob = array[2]\n if (line.startswith(\"slot:\")):\n array = line.split(\" \")\n slot_ids = []\n for i in range(i):\n slot_ids.append(int(array[i+1]))\n if (line.startswith(\"avg_time:\")):\n array = line.split(\":\")\n time_use = (array[1].split(\"ms\"))[0]\n else:\n print(\"[INFO] use tensorflow inference\")\n time_start = time.time()\n ret1, ret2, ret3 = sess.run([intent,intent_confidence,slot], feed_dict={input_ids: [input_id], input_mask: [mask],segment_ids:[segment]})\n time_end = time.time()\n time_use = (time_end - time_start) * 1000.0\n intent_id = ret1[0]\n intent_prob = ret2[0][ret1[0]]\n slot_ids = ret3[0]\n\n print('\\t'.join(tokens))\n print('\\t'.join([slot_list[slot_id] for slot_id in slot_ids]))\n print(\"intention: {}\".format(intent_id))\n print(\"intent confidence: {}\".format(intent_prob))\n print(\"time: {} ms\".format(time_use))\n"
] |
[
[
"numpy.array"
],
[
"tensorflow.zeros_initializer",
"tensorflow.trainable_variables",
"numpy.array",
"numpy.reshape",
"tensorflow.argmax",
"tensorflow.GraphDef",
"tensorflow.matmul",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.transpose",
"tensorflow.import_graph_def",
"tensorflow.placeholder",
"tensorflow.truncated_normal_initializer",
"tensorflow.nn.bias_add"
]
] |
ultmaster/tianshou
|
[
"3ac67d9974b6bd3e3d7feac7738ca6de33b317c7",
"3ac67d9974b6bd3e3d7feac7738ca6de33b317c7"
] |
[
"test/discrete/test_a2c_with_il.py",
"examples/mujoco/mujoco_reinforce.py"
] |
[
"import os\nimport gym\nimport torch\nimport pprint\nimport argparse\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.utils import BasicLogger\nfrom tianshou.env import DummyVectorEnv\nfrom tianshou.utils.net.common import Net\nfrom tianshou.data import Collector, VectorReplayBuffer\nfrom tianshou.utils.net.discrete import Actor, Critic\nfrom tianshou.policy import A2CPolicy, ImitationPolicy\nfrom tianshou.trainer import onpolicy_trainer, offpolicy_trainer\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='CartPole-v0')\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--buffer-size', type=int, default=20000)\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--il-lr', type=float, default=1e-3)\n parser.add_argument('--gamma', type=float, default=0.9)\n parser.add_argument('--epoch', type=int, default=10)\n parser.add_argument('--step-per-epoch', type=int, default=50000)\n parser.add_argument('--il-step-per-epoch', type=int, default=1000)\n parser.add_argument('--episode-per-collect', type=int, default=16)\n parser.add_argument('--step-per-collect', type=int, default=16)\n parser.add_argument('--update-per-step', type=float, default=1 / 16)\n parser.add_argument('--repeat-per-collect', type=int, default=1)\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--hidden-sizes', type=int,\n nargs='*', default=[64, 64])\n parser.add_argument('--imitation-hidden-sizes', type=int,\n nargs='*', default=[128])\n parser.add_argument('--training-num', type=int, default=16)\n parser.add_argument('--test-num', type=int, default=100)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument(\n '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n # a2c special\n parser.add_argument('--vf-coef', type=float, default=0.5)\n parser.add_argument('--ent-coef', type=float, default=0.0)\n parser.add_argument('--max-grad-norm', type=float, default=None)\n parser.add_argument('--gae-lambda', type=float, default=1.)\n parser.add_argument('--rew-norm', action=\"store_true\", default=False)\n args = parser.parse_known_args()[0]\n return args\n\n\ndef test_a2c_with_il(args=get_args()):\n torch.set_num_threads(1) # for poor CPU\n env = gym.make(args.task)\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n # you can also use tianshou.env.SubprocVectorEnv\n # train_envs = gym.make(args.task)\n train_envs = DummyVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)])\n # test_envs = gym.make(args.task)\n test_envs = DummyVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)])\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net = Net(args.state_shape, hidden_sizes=args.hidden_sizes,\n device=args.device)\n actor = Actor(net, args.action_shape, device=args.device).to(args.device)\n critic = Critic(net, device=args.device).to(args.device)\n optim = torch.optim.Adam(set(\n actor.parameters()).union(critic.parameters()), lr=args.lr)\n dist = torch.distributions.Categorical\n policy = A2CPolicy(\n actor, critic, optim, dist,\n discount_factor=args.gamma, gae_lambda=args.gae_lambda,\n vf_coef=args.vf_coef, ent_coef=args.ent_coef,\n max_grad_norm=args.max_grad_norm, reward_normalization=args.rew_norm,\n action_space=env.action_space)\n # collector\n train_collector = Collector(\n policy, train_envs,\n VectorReplayBuffer(args.buffer_size, len(train_envs)),\n exploration_noise=True)\n test_collector = Collector(policy, test_envs)\n # log\n log_path = os.path.join(args.logdir, args.task, 'a2c')\n writer = SummaryWriter(log_path)\n logger = BasicLogger(writer)\n\n def save_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))\n\n def stop_fn(mean_rewards):\n return mean_rewards >= env.spec.reward_threshold\n\n # trainer\n result = onpolicy_trainer(\n policy, train_collector, test_collector, args.epoch,\n args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size,\n episode_per_collect=args.episode_per_collect, stop_fn=stop_fn, save_fn=save_fn,\n logger=logger)\n assert stop_fn(result['best_reward'])\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n env = gym.make(args.task)\n policy.eval()\n collector = Collector(policy, env)\n result = collector.collect(n_episode=1, render=args.render)\n rews, lens = result[\"rews\"], result[\"lens\"]\n print(f\"Final reward: {rews.mean()}, length: {lens.mean()}\")\n\n policy.eval()\n # here we define an imitation collector with a trivial policy\n if args.task == 'CartPole-v0':\n env.spec.reward_threshold = 190 # lower the goal\n net = Net(args.state_shape, hidden_sizes=args.hidden_sizes,\n device=args.device)\n net = Actor(net, args.action_shape, device=args.device).to(args.device)\n optim = torch.optim.Adam(net.parameters(), lr=args.il_lr)\n il_policy = ImitationPolicy(net, optim, mode='discrete')\n il_test_collector = Collector(\n il_policy,\n DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.test_num)])\n )\n train_collector.reset()\n result = offpolicy_trainer(\n il_policy, train_collector, il_test_collector, args.epoch,\n args.il_step_per_epoch, args.step_per_collect, args.test_num,\n args.batch_size, stop_fn=stop_fn, save_fn=save_fn, logger=logger)\n assert stop_fn(result['best_reward'])\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n env = gym.make(args.task)\n il_policy.eval()\n collector = Collector(il_policy, env)\n result = collector.collect(n_episode=1, render=args.render)\n rews, lens = result[\"rews\"], result[\"lens\"]\n print(f\"Final reward: {rews.mean()}, length: {lens.mean()}\")\n\n\nif __name__ == '__main__':\n test_a2c_with_il()\n",
"#!/usr/bin/env python3\n\nimport os\nimport gym\nimport torch\nimport datetime\nimport argparse\nimport numpy as np\nfrom torch import nn\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions import Independent, Normal\n\nfrom tianshou.policy import PGPolicy\nfrom tianshou.utils import BasicLogger\nfrom tianshou.env import SubprocVectorEnv\nfrom tianshou.utils.net.common import Net\nfrom tianshou.trainer import onpolicy_trainer\nfrom tianshou.utils.net.continuous import ActorProb\nfrom tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='HalfCheetah-v3')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--buffer-size', type=int, default=4096)\n parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--epoch', type=int, default=100)\n parser.add_argument('--step-per-epoch', type=int, default=30000)\n parser.add_argument('--step-per-collect', type=int, default=2048)\n parser.add_argument('--repeat-per-collect', type=int, default=1)\n # batch-size >> step-per-collect means caculating all data in one singe forward.\n parser.add_argument('--batch-size', type=int, default=99999)\n parser.add_argument('--training-num', type=int, default=64)\n parser.add_argument('--test-num', type=int, default=10)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument(\n '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n parser.add_argument('--resume-path', type=str, default=None)\n # reinforce special\n parser.add_argument('--rew-norm', type=int, default=True)\n # \"clip\" option also works well.\n parser.add_argument('--action-bound-method', type=str, default=\"tanh\")\n parser.add_argument('--lr-decay', type=int, default=True)\n return parser.parse_args()\n\n\ndef test_reinforce(args=get_args()):\n env = gym.make(args.task)\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n args.max_action = env.action_space.high[0]\n print(\"Observations shape:\", args.state_shape)\n print(\"Actions shape:\", args.action_shape)\n print(\"Action range:\", np.min(env.action_space.low),\n np.max(env.action_space.high))\n # train_envs = gym.make(args.task)\n train_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)],\n norm_obs=True)\n # test_envs = gym.make(args.task)\n test_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)],\n norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False)\n\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes,\n activation=nn.Tanh, device=args.device)\n actor = ActorProb(net_a, args.action_shape, max_action=args.max_action,\n unbounded=True, device=args.device).to(args.device)\n torch.nn.init.constant_(actor.sigma_param, -0.5)\n for m in actor.modules():\n if isinstance(m, torch.nn.Linear):\n # orthogonal initialization\n torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))\n torch.nn.init.zeros_(m.bias)\n # do last policy layer scaling, this will make initial actions have (close to)\n # 0 mean and std, and will help boost performances,\n # see https://arxiv.org/abs/2006.05990, Fig.24 for details\n for m in actor.mu.modules():\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.zeros_(m.bias)\n m.weight.data.copy_(0.01 * m.weight.data)\n\n optim = torch.optim.Adam(actor.parameters(), lr=args.lr)\n lr_scheduler = None\n if args.lr_decay:\n # decay learning rate to 0 linearly\n max_update_num = np.ceil(\n args.step_per_epoch / args.step_per_collect) * args.epoch\n\n lr_scheduler = LambdaLR(\n optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)\n\n def dist(*logits):\n return Independent(Normal(*logits), 1)\n\n policy = PGPolicy(actor, optim, dist, discount_factor=args.gamma,\n reward_normalization=args.rew_norm, action_scaling=True,\n action_bound_method=args.action_bound_method,\n lr_scheduler=lr_scheduler, action_space=env.action_space)\n\n # collector\n if args.training_num > 1:\n buffer = VectorReplayBuffer(args.buffer_size, len(train_envs))\n else:\n buffer = ReplayBuffer(args.buffer_size)\n train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)\n test_collector = Collector(policy, test_envs)\n # log\n t0 = datetime.datetime.now().strftime(\"%m%d_%H%M%S\")\n log_file = f'seed_{args.seed}_{t0}-{args.task.replace(\"-\", \"_\")}_reinforce'\n log_path = os.path.join(args.logdir, args.task, 'reinforce', log_file)\n writer = SummaryWriter(log_path)\n writer.add_text(\"args\", str(args))\n logger = BasicLogger(writer, update_interval=10)\n\n def save_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))\n\n # trainer\n result = onpolicy_trainer(\n policy, train_collector, test_collector, args.epoch, args.step_per_epoch,\n args.repeat_per_collect, args.test_num, args.batch_size,\n step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger,\n test_in_train=False)\n\n # Let's watch its performance!\n policy.eval()\n test_envs.seed(args.seed)\n test_collector.reset()\n result = test_collector.collect(n_episode=args.test_num, render=args.render)\n print(f'Final reward: {result[\"rews\"].mean()}, length: {result[\"lens\"].mean()}')\n\n\nif __name__ == '__main__':\n test_reinforce()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.set_num_threads"
],
[
"numpy.max",
"numpy.ceil",
"torch.nn.init.constant_",
"numpy.random.seed",
"torch.distributions.Normal",
"numpy.min",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.sqrt",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.init.zeros_",
"torch.utils.tensorboard.SummaryWriter"
]
] |
DwayneDuane/tensorflow
|
[
"c90698124aa164e7683e3a9d03b69e9aa8461244"
] |
[
"tensorflow/lite/python/lite_flex_test.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py functionality related to select TF op usage.\"\"\"\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.python import test_util as tflite_test_util\nfrom tensorflow.lite.python.convert import register_custom_opdefs\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.lite.python.testdata import double_op\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework.importer import import_graph_def\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.trackable import autotrackable\n\n\nclass FromSessionTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('EnableMlirConverter', True), # enable mlir\n ('DisableMlirConverter', False)) # disable mlir\n def testFlexMode(self, enable_mlir):\n with ops.Graph().as_default():\n in_tensor = array_ops.placeholder(shape=[1, 4], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])\n converter.experimental_new_converter = enable_mlir\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check the model works with TensorFlow ops.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n def testFlexWithAutomaticPassThrough(self):\n # Create a graph that has one L2Loss op.\n with ops.Graph().as_default():\n with session.Session() as sess:\n in_tensor = array_ops.placeholder(\n shape=[4], dtype=dtypes.float32, name='input')\n out_tensor = nn_ops.l2_loss(in_tensor)\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])\n converter._experimental_allow_all_select_tf_ops = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertIn('FlexL2Loss', tflite_test_util.get_ops_list(tflite_model))\n\n def testDeprecatedFlags(self):\n with ops.Graph().as_default():\n in_tensor = array_ops.placeholder(shape=[1, 4], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])\n\n # Ensure `target_ops` is set to the correct value after flag deprecation.\n self.assertEqual(converter.target_ops, set([lite.OpsSet.SELECT_TF_OPS]))\n self.assertEqual(converter.target_spec.supported_ops,\n set([lite.OpsSet.SELECT_TF_OPS]))\n\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check the model works with TensorFlow ops.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n\nclass FromConcreteFunctionTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('EnableMlirConverter', True), # enable mlir\n ('DisableMlirConverter', False)) # disable mlir\n @test_util.run_v2_only\n def testFloat(self, enable_mlir):\n input_data = constant_op.constant(1., shape=[1])\n root = autotrackable.AutoTrackable()\n root.v1 = variables.Variable(3.)\n root.v2 = variables.Variable(2.)\n root.f = def_function.function(lambda x: root.v1 * root.v2 * x)\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])\n converter.experimental_new_converter = enable_mlir\n tflite_model = converter.convert()\n\n # Check the model works with TensorFlow ops.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([4.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([24.0], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n\nclass WithCustomOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def _createGraphWithCustomOp(self, opname='CustomAdd'):\n custom_opdefs_str = (\n 'name: \\'' + opname + '\\' input_arg: {name: \\'Input1\\' type: DT_FLOAT} '\n 'input_arg: {name: \\'Input2\\' type: DT_FLOAT} output_arg: {name: '\n '\\'Output\\' type: DT_FLOAT}')\n\n # Create a graph that has one add op.\n new_graph = graph_pb2.GraphDef()\n with ops.Graph().as_default():\n with session.Session() as sess:\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')\n out_tensor = in_tensor + in_tensor\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n\n new_graph.CopyFrom(sess.graph_def)\n\n # Rename Add op name to opname.\n for node in new_graph.node:\n if node.op.startswith('Add'):\n node.op = opname\n del node.attr['T']\n\n # Register custom op defs to import modified graph def.\n register_custom_opdefs([custom_opdefs_str])\n\n return (new_graph, inputs, outputs)\n\n def testFlexWithCustomOp(self):\n new_graph, inputs, outputs = self._createGraphWithCustomOp(\n opname='CustomAdd4')\n\n # Import to load the custom opdef.\n saved_model_dir = os.path.join(self.get_temp_dir(), 'model')\n with ops.Graph().as_default():\n with session.Session() as sess:\n import_graph_def(new_graph, name='')\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])\n converter.target_spec.experimental_select_user_tf_ops = ['CustomAdd4']\n tflite_model = converter.convert()\n\n self.assertIn('FlexCustomAdd4', tflite_test_util.get_ops_list(tflite_model))\n\n def testFlexWithDoubleOp(self):\n # Create a graph that has one double op.\n saved_model_dir = os.path.join(self.get_temp_dir(), 'model2')\n with ops.Graph().as_default():\n with session.Session() as sess:\n in_tensor = array_ops.placeholder(\n shape=[1, 4], dtype=dtypes.int32, name='input')\n out_tensor = double_op.double(in_tensor)\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])\n converter.target_spec.experimental_select_user_tf_ops = ['Double']\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertIn('FlexDouble', tflite_test_util.get_ops_list(tflite_model))\n\n # Check the model works with TensorFlow ops.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.int32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.int32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n\nclass FromSavedModelTest(test_util.TensorFlowTestCase):\n\n @test_util.run_v2_only\n def testFlexResourceVariables(self):\n\n class Model(tf.Module):\n\n def __init__(self):\n self.v = tf.Variable([[0.0, 0.0, 0.0, 0.0]])\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[1, 4], dtype=tf.float32)])\n def eval(self, x):\n # Control flow is needed to generate \"FlexReadVariableOp\".\n if tf.reduce_mean(x) > 1.0:\n self.v.assign_add([[1.0, 1.0, 1.0, 1.0]])\n return self.v + x\n\n m = Model()\n to_save = m.eval.get_concrete_function()\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n tf.saved_model.save(m, save_dir, to_save)\n converter = tf.lite.TFLiteConverter.from_saved_model(save_dir)\n\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS,\n tf.lite.OpsSet.SELECT_TF_OPS,\n ]\n converter.experimental_enable_resource_variables = True\n tflite_model = converter.convert()\n\n # Check the model works with TensorFlow ops.\n interpreter = Interpreter(model_content=tflite_model)\n signature_runner = interpreter.get_signature_runner()\n outputs = signature_runner(\n x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))\n expected_output = np.array([[2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\n self.assertTrue((expected_output == list(outputs.values())[0]).all)\n\n # Second run.\n outputs = signature_runner(\n x=np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32))\n expected_output = np.array([[3.0, 4.0, 5.0, 6.0]], dtype=np.float32)\n self.assertTrue((expected_output == list(outputs.values())[0]).all)\n\n\nclass TFQuantizationTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('DefaultMode', 'DEFAULT'),\n ('LegacyIntegerMode', 'LEGACY_INTEGER'))\n def testAddOp(self, tf_quantization_mode):\n root = autotrackable.AutoTrackable()\n root.add_func = def_function.function(lambda x: x + x)\n input_data = tf.reshape(tf.range(4, dtype=tf.float32), [1, 4])\n concrete_func = root.add_func.get_concrete_function(input_data)\n\n # Convert model and check if the op is not flex.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter._experimental_tf_quantization_mode = tf_quantization_mode\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n if tf_quantization_mode == 'LEGACY_INTEGER':\n self.assertIn('ADD', tflite_test_util.get_ops_list(tflite_model))\n else:\n self.assertIn('FlexAddV2', tflite_test_util.get_ops_list(tflite_model))\n\n # Check the model works.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n @parameterized.named_parameters(('DefaultMode', 'DEFAULT'),\n ('LegacyIntegerMode', 'LEGACY_INTEGER'))\n def testL2LossOp(self, tf_quantization_mode):\n root = autotrackable.AutoTrackable()\n root.l2_loss_func = def_function.function(lambda x: nn_ops.l2_loss(x)) # pylint: disable=unnecessary-lambda\n input_data = tf.range(4, dtype=tf.float32)\n concrete_func = root.l2_loss_func.get_concrete_function(input_data)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter._experimental_tf_quantization_mode = tf_quantization_mode\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertIn('FlexL2Loss', tflite_test_util.get_ops_list(tflite_model))\n\n # Check the model works.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([15.0], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n @parameterized.named_parameters(('DefaultMode', 'DEFAULT'),\n ('LegacyIntegerMode', 'LEGACY_INTEGER'))\n def testConvOpWithBias(self, tf_quantization_mode):\n\n class ConvModel(autotrackable.AutoTrackable):\n\n @def_function.function\n def conv_func(self, in_tensor, filter_tensor):\n bias = constant_op.constant(3., shape=[1])\n conv_tensor = tf.nn.conv2d(\n in_tensor,\n filter_tensor,\n strides=[1, 1, 1, 1],\n dilations=[1, 1, 1, 1],\n padding='VALID',\n data_format='NHWC')\n conv_tensor = conv_tensor + bias\n return tf.nn.relu(conv_tensor)\n\n root = ConvModel()\n input_data = tf.reshape(tf.range(4, dtype=tf.float32), [1, 2, 2, 1])\n filter_data = tf.reshape(tf.range(2, dtype=tf.float32), [1, 2, 1, 1])\n concrete_func = root.conv_func.get_concrete_function(\n input_data, filter_data)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter._experimental_tf_quantization_mode = tf_quantization_mode\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertCountEqual(['CONV_2D', 'RESHAPE'],\n tflite_test_util.get_ops_list(tflite_model))\n\n # Check the model works.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape(\n (1, 2, 2, 1))\n interpreter.set_tensor(input_details[0]['index'], test_input)\n test_filter = np.array([1.0, 0.0], dtype=np.float32).reshape((1, 2, 1, 1))\n interpreter.set_tensor(input_details[1]['index'], test_filter)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[[[4.]], [[6.]]]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.nn.conv2d",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.python.ops.nn_ops.l2_loss",
"tensorflow.lite.python.test_util.get_ops_list",
"tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions",
"tensorflow.saved_model.save",
"tensorflow.python.platform.test.main",
"tensorflow.python.saved_model.saved_model.simple_save",
"tensorflow.python.client.session.Session",
"tensorflow.Variable",
"tensorflow.lite.python.interpreter.Interpreter",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.lite.python.testdata.double_op.double",
"tensorflow.lite.python.convert.register_custom_opdefs",
"tensorflow.python.framework.importer.import_graph_def",
"numpy.array",
"tensorflow.range",
"tensorflow.python.framework.ops.Graph",
"tensorflow.nn.relu",
"tensorflow.lite.python.lite.TFLiteConverter.from_session",
"tensorflow.python.eager.def_function.function",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model",
"tensorflow.TensorSpec",
"tensorflow.python.trackable.autotrackable.AutoTrackable",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.reduce_mean"
]
] |
GLivshits/Transistors
|
[
"4452c17a910a706326de0b6d0c95e681fba030dd"
] |
[
"Probestation_via_classes.py"
] |
[
"import os\nimport serial\nimport pyvisa\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport time\n\n\nclass Probestation_FETs(object):\n\n def __init__(self, measurement_type):\n\n self.measurement_type = measurement_type\n\n def turn_on_keysight(self):\n\n rm = pyvisa.ResourceManager(r'C:\\\\Windows\\\\System32\\\\visa64.dll')\n self.my_inst = rm.open_resource(r'GPIB0::17::INSTR')\n del self.my_inst.timeout\n\n self.my_inst.query('*IDN?')\n\n self.my_inst.write('*RST')\n\n self.my_inst.write('*SRE 59')\n\n self.my_inst.write('FMT 12,1')\n\n self.my_inst.write('CN 101') # enables channel 101\n self.my_inst.write('CN 201') # enables channel 201\n\n self.my_inst.write('MM 2,101,201') # enables staircase sweep for channels 101 and 201\n\n self.my_inst.write('CMM 101,1') # sets current measurement (1) for channel 101\n self.my_inst.write('CMM 201,1') # sets current measurement(1) for channel 201\n\n self.my_inst.write('RI 101,0') # sets auto-ranging (0) for channel 101\n self.my_inst.write('RI 201,0') # sets auto-ranging (0) for channel 201\n\n self.my_inst.write('AAD 101,1;AZ 0;AIT 1, 2, 10') # sets high resolution (1) for channel 101\n self.my_inst.write('AAD 201,1;AZ 0;AIT 1, 2, 10') # sets high resolution (1) for channel 201\n\n self.my_inst.write('AZ 0') # ADC zero function disabled (0)\n\n self.my_inst.write('AIT 1, 2, 10')\n\n self.my_inst.write('WAT 1,2,0.1')\n self.my_inst.write('WAT 2,6,0.1')\n\n self.my_inst.write('FL 1,101') # connect (1) to channel 101\n self.my_inst.write('FL 1,201') # connect (1) to channel 201\n\n self.my_inst.write('SSR 101,1') # turns on (1) resistor on channel 101\n self.my_inst.write('SSR 201,0') # turns off (0) resistor on channel 201\n\n self.my_inst.write('WT 1,0.1,0.1,0,0')\n self.error_check()\n\n def turn_on_drive(self):\n\n self.drive = serial.Serial('COM3', baudrate=9600, write_timeout=2, timeout=2, rtscts=True)\n\n def error_check(self):\n\n k = self.my_inst.query('ERRX?')\n if k.startswith('+0') == False:\n print(k)\n\n def measure(self, Vs1, Vs2, num_Vs, Vc, num_cycles):\n\n self.data = np.zeros((num_Vs * len(Vc) * 2, 2 + 2 * num_cycles))\n\n self.test_data = np.zeros((22*len(Vc)))\n\n self.num_cycles = num_cycles\n\n locmax = mpl.ticker.SymmetricalLogLocator(base=10.0, linthresh=1e-13, subs=(0.1, 1, 10, 100,))\n locmin = mpl.ticker.SymmetricalLogLocator(base=10.0, linthresh=1e-13, subs=(0.2, 0.4, 0.6, 0.8, 1,))\n\n fig, ax = plt.subplots(1, 2)\n\n\n for i in range(0, len(Vc)):\n\n status = ''\n\n self.my_inst.write('DV 201,0,{},0.1,0,0'.format(Vc[i]))\n self.error_check()\n\n self.my_inst.write('WV 101,3,0,{},{},11,0.1,0.1'.format(Vs1, Vs2))\n self.error_check()\n\n self.my_inst.write('WM 1,1')\n self.error_check()\n\n self.my_inst.write('BC')\n\n self.my_inst.write('XE')\n\n self.my_inst.query('*OPC?')\n\n self.my_inst.query('NUB?')\n\n self.error_check()\n\n data_raw = self.my_inst.read_ascii_values()\n\n for k in range(0, 22):\n self.test_data[2 * i * num_Vs + k, 0] = Vc[i]\n self.test_data[2 * i * num_Vs + k, 1] = float(data_raw[3 * k + 2])\n self.test_data[2 * i * num_Vs + k, 2] = float(data_raw[3 * k + 1])\n self.test_data[2 * i * num_Vs + k, 3] = float(data_raw[3 * k])\n\n if abs(np.mean(self.test_data[:, 2])) < 1e-11:\n\n status = 'Non-conductive'\n\n if abs(np.mean(self.test_data[:, 3])) > 1e-8:\n\n status = 'Gate leak'\n\n if abs(np.mean(self.test_data[:])) > 1e3:\n\n status = 'Too high current'\n\n if status != '':\n\n for j in range(0, num_cycles):\n\n self.my_inst.write('BC')\n\n self.my_inst.write('WV 101,3,0,{},{},{},0.1,0.1'.format(Vs1, Vs2, num_Vs))\n self.error_check()\n\n self.my_inst.write('XE')\n\n self.my_inst.query('*OPC?')\n\n self.my_inst.query('NUB?')\n self.error_check()\n\n data_raw = self.my_inst.read_ascii_values()\n\n for k in range(0, 2 * num_Vs):\n\n self.data[2 * i * num_Vs + k, 0] = Vc[i]\n self.data[2 * i * num_Vs + k, 1] = float(data_raw[3 * k + 2])\n self.data[2 * i * num_Vs + k, 2 + 2 * j] = float(data_raw[3 * k + 1])\n self.data[2 * i * num_Vs + k, 3 + 2 * j] = float(data_raw[3 * k])\n\n # max_Isd = np.zeros((j + 1))\n # min_Isd = np.zeros((j + 1))\n # max_Ig = np.zeros((j + 1))\n # min_Ig = np.zeros((j + 1))\n #\n # plt.clf()\n #\n # for k in range(0, j+1):\n #\n # ax[0].plot(self.data[2*i*num_Vs:2*(i+1)*num_Vs, 1],\n # self.data[2*i*num_Vs:2*(i+1)*num_Vs, 2 + 2 * k] * np.sign(Vc[i]))\n #\n # max_Isd[k] = np.max(self.data[2*i*num_Vs:2*(i+1)*num_Vs, 2 + 2 * k] * np.sign(Vc[i]))\n # min_Isd[k] = np.min(self.data[2*i*num_Vs:2*(i+1)*num_Vs, 2 + 2 * k] * np.sign(Vc[i]))\n #\n # ax[1].plot(self.data[2*i * num_Vs:2*(i + 1) * num_Vs, 1],\n # self.data[2*i * num_Vs:2*(i + 1) * num_Vs, 3 + 2 * k])\n #\n # max_Ig[k] = np.max(self.data[2*i * num_Vs:2*(i + 1) * num_Vs, 3 + 2 * k])\n # min_Ig[k] = np.min(self.data[2*i * num_Vs:2*(i + 1) * num_Vs, 3 + 2 * k])\n #\n # ax[1].suptitle('Vsd = {} V'.format(Vc[i]))\n #\n # ax[0].set_xlabel('Vg')\n # ax[0].set_ylabel('Isd*sgn(Isd)')\n #\n # ax[1].set_xlabel('Vg')\n # ax[1].set_ylabel('Ig')\n #\n # ax[0].yaxis.set_major_locator(locmax)\n # ax[0].yaxis.set_minor_locator(locmin)\n # ax[0].yaxis.set_minor_formatter(mpl.ticker.LogFormatter())\n #\n # ticks_Isd = np.array(list(ax[0].axes.get_yticks()))\n # ticks_Isd = ticks_Isd[(abs(ticks_Isd) > 5 * 1e-14) | (ticks_Isd == 0)]\n # ticks_Isd = ticks_Isd[ticks_Isd < max(max_Isd)]\n # ticks_Isd = ticks_Isd[ticks_Isd > min(min_Isd)]\n # ax[0].axes.set_yticks(ticks_Isd)\n #\n # ax[1].yaxis.set_major_locator(locmax)\n # ax[1].yaxis.set_minor_locator(locmin)\n # ax[1].yaxis.set_minor_formatter(mpl.ticker.LogFormatter())\n #\n # ticks_Ig = np.array(list(ax[1].axes.get_yticks()))\n # ticks_Ig = ticks_Ig[(abs(ticks_Ig) > 5 * 1e-14) | (ticks_Ig == 0)]\n # ticks_Ig = ticks_Ig[ticks_Ig < max(max_Ig)]\n # ticks_Ig = ticks_Ig[ticks_Ig > min(min_Ig)]\n # ax[1].axes.set_yticks(ticks_Ig)\n #\n # plt.show()\n\n # Ask for any error\n\n self.error_check()\n\n if np.equal(self.data, 0).all():\n\n self.data = self.test_data\n\n def write_to_csv(self, filepath_kernel, current_x, current_y):\n\n filepath = filepath_kernel + r'\\\\data'\n os.makedirs(filepath, exist_ok=True)\n\n df = pd.DataFrame(self.data)\n header = []\n\n if self.measurement_type == 'Isd-Vg':\n\n header.append('Vsd')\n header.append('Vg')\n\n for i in range(0, self.num_cycles):\n header.append('Isd%s' % (i + 1))\n header.append('Ig%s' % (i + 1))\n\n else:\n\n header.append('Vg')\n header.append('Vsd')\n\n for i in range(0, self.num_cycles):\n header.append('Ig%s' % (i + 1))\n header.append('Isd%s' % (i + 1))\n\n pd.DataFrame.to_csv(df, path_or_buf=filepath + r'\\\\fet_%s_%s.csv' % (current_x, current_y),\n index = False, header = header)\n\n def drive_commands_move(self, command_array):\n\n k = self.drive.read_all()\n\n # if k != b'':\n #\n # print('Initial: {}'.format(k))\n\n time.sleep(0.05)\n\n for item in command_array:\n\n self.drive.write((item + '\\r').encode())\n print('Send: {}'.format((item + '\\r').encode()))\n time.sleep(0.05)\n\n k = self.drive.read_all()\n\n # if k != b'':\n #\n # print('Recieve: {}'.format(k))\n\n time.sleep(0.05)\n\n while item.encode() not in k:\n\n k = self.drive.read_all()\n\n # if k != b'':\n #\n # print('Recieve: {}'.format(k))\n\n time.sleep(0.05)\n\n while 'e'.encode() not in k:\n\n k = self.drive.read_all()\n\n # if k != b'':\n #\n # print('Final recieve: {}'.format(k))\n\n time.sleep(0.05)\n\n def move_xyz(self, x_dist, y_dist, cols, rows, current_x, current_y):\n\n if current_x < cols:\n\n arr = ['2', '3', '0', '0', '1600']\n self.drive_commands_move(arr)\n\n arr = ['2', '1', '0', '0']\n arr.append(str(int(3.2 * x_dist)))\n self.drive_commands_move(arr)\n\n arr = ['2', '3', '1', '0', '1600']\n self.drive_commands_move(arr)\n\n else:\n\n if current_y < rows:\n\n arr = ['2', '3', '0', '0', '1600']\n self.drive_commands_move(arr)\n\n arr = ['2', '1', '1', '0']\n arr.append(str(int(3.2*x_dist*(cols-1))))\n self.drive_commands_move(arr)\n\n arr = ['2', '2', '0', '0']\n arr.append(str(int(3.2*y_dist)))\n self.drive_commands_move(arr)\n\n arr = ['2', '3', '1', '0', '1600']\n self.drive_commands_move(arr)\n\n def move_to_zero(self):\n\n arr = ['4', '3']\n self.drive_commands_move(arr)\n\n arr = ['4', '2']\n self.drive_commands_move(arr)\n\n arr = ['4', '1']\n self.drive_commands_move(arr)\n\n self.drive.close()\n self.drive.__exit__\n\n\n\n\n\n"
] |
[
[
"numpy.equal",
"pandas.DataFrame",
"matplotlib.ticker.SymmetricalLogLocator",
"matplotlib.pyplot.subplots",
"numpy.mean",
"pandas.DataFrame.to_csv"
]
] |
redachhaibi/FreeNN
|
[
"3323fed33a0a990f137c11f66c91c4195db9bd92"
] |
[
"experiments/run_experiment.py"
] |
[
"import torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport numpy\n\n#from fiberedae.utils import nn as nnutils\n#from icecream import ic\n\nimport click\n\ndef _get_attr(obj, attr_name, human_err_message):\n try:\n thing = getattr(obj, attr_name)\n except (AttributeError, torch.nn.modules.module.ModuleAttributeError):\n raise ValueError(human_err_message.format(attr_name=attr_name))\n return thing\n\nclass DataMaster:\n def __init__(self, name, mininatch_size, num_workers=8):\n \n self.mininatch_size = mininatch_size\n self.num_workers = num_workers\n\n self.trainset = None\n self.trainloader = None\n self.testset = None\n self.testloader = None\n self.classes = None\n self.in_size = None\n self.out_size = None\n \n fct = _get_attr(self, \"load_\"+ name.lower(), \"There's no dataset by the name: {attr_name}\")\n fct()\n\n def load_cifar10(self):\n transform = transforms.Compose(\n [\n transforms.ToTensor(),\n ]\n )\n\n self.trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.mininatch_size, shuffle=True, num_workers=self.num_workers)\n self.testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.mininatch_size, shuffle=False, num_workers=self.num_workers)\n self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n self.in_size = 32*32*3\n self.out_size = 10\n\n def load_mnist(self):\n transform = transforms.Compose(\n [\n transforms.ToTensor(),\n ]\n )\n\n self.trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.mininatch_size, shuffle=True, num_workers=self.num_workers)\n self.testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.mininatch_size, shuffle=False, num_workers=self.num_workers)\n self.classes = range(10)\n self.in_size = 28*28\n self.out_size = 10\n\nclass MLP(torch.nn.Module):\n def __init__(self, build_name, build_kwargs):\n \"\"\"build_name is fonction from self that starts with _build_, build_kwargs is a dict for the arguments given to that function\"\"\"\n super(MLP, self).__init__()\n \n fct = _get_attr(self, \"_build_\" + build_name.lower(), \"There's no build named: {attr_name}\")\n fct(**build_kwargs)\n\n for parameter in self.parameters():\n parameter.requires_grad_(True)\n parameter.retain_grad()\n\n def _reset(self):\n self.layers = []\n\n def _get_non_linearity(self, non_linearity_name):\n return _get_attr(torch.nn, non_linearity_name, \"There's no non_linearity named: {attr_name}\")\n\n def _append_layer(self, layer, non_linearity_name):\n self.layers.append(layer)\n non_lin = self._get_non_linearity(non_linearity_name)()\n self.layers.append(non_lin)\n\n def _build_from_list(self, layer_description:list):\n \"\"\"expects a list of dicts,each with keys: in_size, out_size, bias, non_linearity\"\"\"\n self._reset()\n for elmt in layer_description:\n layer = torch.nn.Linear(elmt[\"in_size\"], elmt[\"out_size\"], bias=elmt[\"bias\"])\n self._append_layer(layer, elmt[\"non_linearity\"])\n\n self.layers = torch.nn.Sequential(*self.layers)\n\n def _build_procedural(self, in_size, hid_size, out_size, nb_hidden, non_linearity=\"ReLU\", last_non_linearity=\"LogSoftmax\", bias=True):\n \"\"\"procedurally builds an MLP with all hiddens the same size\"\"\"\n self._reset()\n if nb_hidden == 0 :\n layer = torch.nn.Linear(in_size, out_size, bias=bias)\n self._append_layer(layer, non_linearity) \n else:\n layer_first = torch.nn.Linear(in_size, hid_size, bias=bias)\n self._append_layer(layer_first, non_linearity) \n \n for _ in range(nb_hidden):\n layer = torch.nn.Linear(hid_size, hid_size, bias=bias)\n self._append_layer(layer, non_linearity) \n \n layer_last = torch.nn.Linear(hid_size, out_size, bias=bias)\n self._append_layer(layer_last, last_non_linearity) \n\n self.layers = torch.nn.Sequential(*self.layers)\n\n def initialize(self, name, torch_kwargs={}):\n \"\"\"name must be in torch.nn.init\"\"\"\n init = _get_attr(torch.nn.init, name, \"torch.nn.init has no initialization named: {attr_name}\")\n for layer in self.layers:\n try:\n init(layer.weight, **torch_kwargs)\n except Exception as e: #torch.nn.modules.module.ModuleAttributeError:\n # print( \"Skipping initialization for layer following Exception:\")\n # print( e )\n # print( \"Layer: \", layer)\n # print( \"\")\n pass\n\n def forward(self, x_inputs):\n x_inputs = torch.flatten(x_inputs, 1)\n return self.layers(x_inputs)\n\n def _get_data(self, attr_name):\n ret = {}\n for aidi, layer in enumerate(self.layers):\n attrs = attr_name.split(\".\")\n obj = layer\n add_it = True\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except Exception as e: #torch.nn.modules.module.ModuleAttributeError:\n #print( \"Exception \", e)\n #print( \"\")\n add_it = False\n break\n \n if add_it:\n ret[\"layer_%s\" % aidi] = obj\n\n return ret\n\n def get_weights(self):\n \"\"\"returns a dict of weights, one entry per layer\"\"\"\n return self._get_data(\"weight\")\n\n def get_biases(self):\n \"\"\"returns a dict of biases, one entry per layer\"\"\"\n return self._get_data(\"bias\")\n \n def get_weights_gradients(self):\n \"\"\"returns a dict of weights, one entry per layer\"\"\"\n return self._get_data(\"weight.grad\")\n \n def get_biases_gradients(self):\n \"\"\"returns a dict of biases, one entry per layer\"\"\"\n return self._get_data(\"bias.grad\")\n\nclass BatchReporter:\n def __init__(self, period, variable):\n self.period = period\n self.variable = variable\n self.current_tick = 0\n self.dir = None\n\n def set_dir(self, path):\n import os\n self.dir = os.path.join(path, self.variable)\n os.mkdir(self.dir)\n\n def tick(self, model):\n if self.current_tick % self.period == 0:\n self.save_report(model)\n self.current_tick += 1\n\n def save_report(self, model):\n import os\n\n fct_name = \"get_\" + self.variable.lower()\n fct = _get_attr(model, fct_name, \"Model has no function: {attr_name}\")\n\n for key, value in fct().items():\n path = os.path.join( self.dir, key)\n if not os.path.exists(path):\n os.mkdir(path)\n\n filename = os.path.join( path, \"%s.npy\" % self.current_tick ) \n value = value.cpu().detach().numpy()\n numpy.save(filename, value)\n\nclass Trainer(object):\n \"\"\"docstring for Trainer\"\"\"\n def __init__(self, net, data_master, reporters):\n \"\"\"loss_name should be in torch.nn, optimizer kwargs is a dict with a field name that must in torcj.optim. The rest must be the parameters for the optimizer\"\"\"\n super(Trainer, self).__init__()\n\n self.net = net\n self.data_master = data_master\n self.reporters = reporters\n \n def _one_pass(self, criterion, optimizer, data, training):\n optimizer.zero_grad()\n\n inputs, labels = data\n # Add cuda here\n inputs = inputs.cuda()\n labels = labels.cuda()\n #\n outputs = self.net(inputs)\n loss = criterion(outputs, labels)\n if training:\n loss.backward(retain_graph = True)\n optimizer.step()\n for reporter in self.reporters:\n reporter.tick(self.net)\n\n return loss.item()\n\n def train(self, loss_name, optimizer_kwargs, nb_epochs):\n from tqdm import trange\n\n criterion = _get_attr(torch.nn, loss_name, \"There's no loss: {attr_name}\")()\n optimizer_fct = _get_attr(torch.optim, optimizer_kwargs[\"name\"], \"There's no optimizer: {attr_name}\")\n del optimizer_kwargs[\"name\"]\n optimizer = optimizer_fct(self.net.parameters(), **optimizer_kwargs)\n\n learnin_curves = {\n \"train\": [],\n \"test\": []\n }\n\n pbar = trange(nb_epochs)\n for epoch in pbar:\n train_loss = 0\n for train_batch_id, data in enumerate(self.data_master.trainloader):\n train_loss += self._one_pass(criterion, optimizer, data, training=True)\n train_loss = train_loss / (train_batch_id+1)\n learnin_curves[\"train\"].append(train_loss)\n\n test_loss = 0\n for test_batch_id, data in enumerate(self.data_master.testloader):\n test_loss += self._one_pass(criterion, optimizer, data, training=False)\n test_loss = test_loss / (test_batch_id+1)\n learnin_curves[\"test\"].append(test_loss)\n \n label = \"epoch: %d, train: %.4f, test: %.4f\" % (epoch, train_loss, test_loss)\n pbar.set_description( label )\n\n return learnin_curves\n\ndef run_as_module(json_file):\n import json\n import os\n import shutil\n\n def _mkdir(json_file):\n import time\n fix = os.path.basename(json_file).split(\".\")[0]\n date = time.ctime().replace(\":\", \"-\").replace(\" \", \"_\").replace(\"__\", \"_\")\n \n fn = fix +\"_\"+ date\n print(\"making folder:\", fn)\n os.mkdir(fn)\n return fn\n\n print(\"initializing...\")\n with open(json_file) as fifi:\n config = json.load(fifi)\n\n data_master = DataMaster(**config[\"dataset\"])\n mlp = MLP(config[\"network\"][\"build_type\"], config[\"network\"][\"build_kwargs\"])\n mlp.initialize(config[\"initialization\"][\"name\"], config[\"initialization\"][\"torch_kwargs\"])\n # Cuda\n mlp.cuda()\n\n reporters = [ BatchReporter(**elmt) for elmt in config[\"reporters\"] ]\n experiment_dir = _mkdir(json_file)\n for reporter in reporters:\n reporter.set_dir(experiment_dir)\n\n print(\"copying json file for reference...\")\n shutil.copy2(json_file, experiment_dir)\n\n print(\"training...\")\n trainer = Trainer(mlp, data_master, reporters)\n learning_curves = trainer.train(**config[\"trainer\"])\n\n print(\"saving learning curves...\")\n for key, value in learning_curves.items():\n fn = os.path.join(experiment_dir, \"learning_curve_%s.npy\" %key)\n numpy.save(fn, numpy.array(value, dtype=\"float32\"))\n \n print(\"done.\")\n\n return experiment_dir\n\n@click.command()\n@click.option('-j', '--json_file', required=True, help='The JSON file to run the experiment')\ndef run(json_file):\n return run_as_module(json_file)\n\nif __name__ == '__main__':\n run()"
] |
[
[
"torch.nn.Linear",
"numpy.array",
"torch.nn.Sequential",
"numpy.save",
"torch.utils.data.DataLoader",
"torch.flatten"
]
] |
sbhadra2020/fastMRI
|
[
"a2b25fed53621c5d5c648993af13971b2d365fc3"
] |
[
"banding_removal/fastmri/model/classifiers/resnet_r1.py"
] |
[
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport numpy as np\nimport pdb\n\nkernel_size = 5\n\nclass Discriminator(nn.Module):\n \"\"\"\n Known to work well as a GAN discriminator\n \n \"\"\"\n def __init__(self, num_classes=1, args=None):\n super().__init__()\n #self.embed_size = 1\n #s0 = self.s0 = args.smallest_res\n nf = self.nf = 64 #args.ndf\n #nf_max = self.nf_max = args.ndf_max\n\n # Submodules\n nlayers = 1\n self.nf0 = nf * 2**nlayers\n\n blocks = [\n ResnetBlock(nf, nf),\n ResnetBlock(nf, nf),\n #ResnetBlock(nf, nf),\n ]\n\n for i in range(nlayers):\n nf0 = nf * 2**i\n nf1 = nf * 2**(i+1)\n blocks += [\n #nn.AvgPool2d(2, stride=2, padding=0),\n nn.MaxPool2d(4, stride=4, padding=0),\n ResnetBlock(nf0, nf1),\n ResnetBlock(nf1, nf1),\n #ResnetBlock(nf1, nf1),\n ]\n\n # Initial up-channeling conv\n self.conv_img = nn.Conv2d(3, 1*nf, kernel_size=kernel_size, padding=kernel_size//2)\n\n self.resnet = nn.Sequential(*blocks)\n\n # Final stage is standard avg-pool followed by linear\n self.pool_max = nn.MaxPool2d(4, stride=4, padding=0)\n self.pool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(self.nf0, num_classes)\n self.norm = nn.InstanceNorm2d(3, affine=False, eps=0.0)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n batch_size = x.size(0)\n out = x\n \n out = self.norm(out)\n #pdb.set_trace()\n out = self.conv_img(out)\n out = self.resnet(out)\n out = self.pool_max(out)\n out = self.pool(out)\n out = out.view(batch_size, self.nf0)\n out = self.fc(actvn(out))\n \n return out\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, fin, fout, fhidden=None):\n super().__init__()\n # Attributes\n self.learned_shortcut = (fin != fout)\n self.fin = fin\n self.fout = fout\n if fhidden is None:\n self.fhidden = min(fin, fout)\n else:\n self.fhidden = fhidden\n\n # Submodules\n self.norm_0 = nn.GroupNorm(self.fin//32, self.fin)\n\n self.conv_0 = nn.Conv2d(self.fin, self.fhidden, \n kernel_size, stride=1, padding=kernel_size//2, bias=False)\n\n self.norm_1 = nn.GroupNorm(self.fhidden//32, self.fhidden)\n\n self.conv_1 = nn.Conv2d(self.fhidden, self.fout, \n kernel_size, stride=1, padding=kernel_size//2, bias=False)\n\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(self.fin, self.fout, \n 1, stride=1, padding=0, bias=False)\n\n def forward(self, x):\n x_s = self._shortcut(x)\n dx = self.conv_0(actvn(self.norm_0(x)))\n dx = self.conv_1(actvn(self.norm_1(dx)))\n out = x_s + dx\n\n return out\n\n def _shortcut(self, x):\n if self.learned_shortcut:\n x_s = self.conv_s(x)\n else:\n x_s = x\n return x_s\n\n\ndef actvn(x):\n return F.relu(x)\n #return F.leaky_relu(x, 2e-1)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.relu"
]
] |
HelloDyson/Sketch2Voxel
|
[
"55799301ece904f1b6e8696f569647748a2197c3"
] |
[
"models/base_gru_net.py"
] |
[
"from models.net import Net\nfrom lib.layers import SoftmaxWithLoss3D\n\nimport torch\nfrom torch.autograd import Variable\n\n\nclass BaseGRUNet(Net):\n \"\"\"\n This class is used to define some common attributes and methods that both GRUNet and \n ResidualGRUNet have. Note that GRUNet and ResidualGRUNet have the same loss function\n and forward pass. The only difference is different encoder and decoder architecture.\n \"\"\"\n def __init__(self):\n# print(\"\\ninitializing \\\"BaseGRUNet\\\"\")\n super(BaseGRUNet, self).__init__()\n \"\"\"\n Set the necessary data of the network\n \"\"\"\n self.is_x_tensor4 = False\n \n self.n_gru_vox = 4\n #the size of x is (num_views, batch_size, 3, img_w, img_h)\n self.input_shape = (self.batch_size, 3, self.img_w, self.img_h)\n #number of filters for each convolution layer in the encoder\n self.n_convfilter = [96, 128, 256, 256, 256, 256]\n #the dimension of the fully connected layer\n self.n_fc_filters = [1024]\n #number of filters for each 3d convolution layer in the decoder\n self.n_deconvfilter = [128, 128, 128, 64, 32, 2]\n #the size of the hidden state\n '''this influence the final data filter [0,1,:,:,:] that for VOXELtoOBJ'''\n self.h_shape = (self.batch_size, self.n_deconvfilter[0], self.n_gru_vox, self.n_gru_vox, self.n_gru_vox)\n #the filter shape of the 3d convolutional gru unit\n self.conv3d_filter_shape = (self.n_deconvfilter[0], self.n_deconvfilter[0], 3, 3, 3)\n \n #set the last layer \n self.SoftmaxWithLoss3D = SoftmaxWithLoss3D()\n \n #set the encoder and the decoder of the network\n self.encoder = None\n self.decoder = None\n \n def forward(self, x, y=None, test=True):\n #ensure that the network has encoder and decoder attributes\n if self.encoder is None:\n raise Exception(\"subclass network of BaseGRUNet must define the \\\"encoder\\\" attribute\")\n if self.decoder is None:\n raise Exception(\"subclass network of BaseGRUNet must define the \\\"decoder\\\" attribute\")\n\n #initialize the hidden state and update gate\n h = self.initHidden(self.h_shape)\n u = self.initHidden(self.h_shape)\n \n #a list used to store intermediate update gate activations\n u_list = []\n \n \"\"\"\n x is the input and the size of x is (num_views, batch_size, channels, heights, widths).\n h and u is the hidden state and activation of last time step respectively.\n The following loop computes the forward pass of the whole network. \n \"\"\"\n for time in range(x.size(0)):\n# print(self.input_shape)\n gru_out, update_gate = self.encoder(x[time], h, u, time)\n \n h = gru_out\n \n u = update_gate\n u_list.append(u)\n \n out = self.decoder(h)\n \n \"\"\"\n If test is True and y is None, then the out is the [prediction].\n If test is True and y is not None, then the out is [prediction, loss].\n If test is False and y is not None, then the out is loss.\n \n Q: so is this the same with solver.py?\n \"\"\"\n out = self.SoftmaxWithLoss3D(out, y=y, test=test)\n if test:\n out.extend(u_list)\n return out\n \n def initHidden(self, h_shape):\n h = torch.zeros(h_shape)\n if torch.cuda.is_available():\n h = h.type(torch.cuda.FloatTensor)\n return Variable(h)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n "
] |
[
[
"torch.zeros",
"torch.cuda.is_available",
"torch.autograd.Variable"
]
] |
akinoriosamura/TorchSeg-mirror
|
[
"34033fe85fc24015bcef7a92aad39d2a25a001a5",
"34033fe85fc24015bcef7a92aad39d2a25a001a5",
"34033fe85fc24015bcef7a92aad39d2a25a001a5",
"34033fe85fc24015bcef7a92aad39d2a25a001a5"
] |
[
"model/cpn/ablation_study/cityscapes.cpn.R101_v1c.v2/train.py",
"model/cpn/ade.cpn.R50_v1c.v7/network.py",
"model/cpn/ade.cpn.R50_v1c.v37/train.py",
"model/bisenet/cityscapes.bisenet.X39.speed/config.py"
] |
[
"from __future__ import division\nimport os.path as osp\nimport sys\nimport argparse\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\n\nfrom config import config\nfrom dataloader import get_train_loader\nfrom network import CPNet\nfrom datasets import Cityscapes\nfrom utils.init_func import init_weight, group_weight\nfrom engine.lr_policy import PolyLR\nfrom engine.logger import get_logger\nfrom engine.engine import Engine\n# from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d\nfrom seg_opr.seg_oprs import one_hot\nfrom seg_opr.loss_opr import ProbOhemCrossEntropy2d\n\ntry:\n from apex.parallel import SyncBatchNorm, DistributedDataParallel\nexcept ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex .\")\n\nlogger = get_logger()\n\ntorch.manual_seed(config.seed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(config.seed)\n\nparser = argparse.ArgumentParser()\n\nwith Engine(custom_parser=parser) as engine:\n args = parser.parse_args()\n\n cudnn.benchmark = True\n if engine.distributed:\n torch.cuda.set_device(engine.local_rank)\n\n # data loader\n train_loader, train_sampler = get_train_loader(engine, Cityscapes)\n\n # config network and criterion\n min_kept = int(\n config.batch_size // len(\n engine.devices) * config.image_height * config.image_width // 16)\n criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7,\n min_kept=min_kept,\n use_weight=False)\n\n if engine.distributed:\n logger.info('Use the Multi-Process-SyncBatchNorm')\n BatchNorm2d = SyncBatchNorm\n # else:\n # BatchNorm2d = BatchNorm2d\n model = CPNet(config.num_classes, criterion=criterion,\n pretrained_model=config.pretrained_model,\n norm_layer=BatchNorm2d)\n init_weight(model.business_layer, nn.init.kaiming_normal_,\n BatchNorm2d, config.bn_eps, config.bn_momentum,\n mode='fan_in', nonlinearity='relu')\n\n # group weight and config optimizer\n base_lr = config.lr\n # if engine.distributed:\n # base_lr = config.lr * engine.world_size\n\n params_list = []\n params_list = group_weight(params_list, model.backbone,\n BatchNorm2d, base_lr)\n params_list = group_weight(params_list, model.head_layer,\n BatchNorm2d, base_lr)\n params_list = group_weight(params_list, model.aux_layer,\n BatchNorm2d, base_lr)\n params_list = group_weight(params_list, model.context,\n BatchNorm2d, base_lr * 10)\n\n # config lr policy\n total_iteration = config.nepochs * config.niters_per_epoch\n lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)\n optimizer = torch.optim.SGD(params_list,\n lr=base_lr,\n momentum=config.momentum,\n weight_decay=config.weight_decay)\n\n if engine.distributed:\n if torch.cuda.is_available():\n model.cuda()\n model = DistributedDataParallel(model)\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # model = DataParallelModel(model, engine.devices)\n model.to(device)\n\n engine.register_state(dataloader=train_loader, model=model,\n optimizer=optimizer)\n if engine.continue_state_object:\n engine.restore_checkpoint()\n\n optimizer.zero_grad()\n model.train()\n\n for epoch in range(engine.state.epoch, config.nepochs):\n if engine.distributed:\n train_sampler.set_epoch(epoch)\n bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'\n pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,\n bar_format=bar_format)\n dataloader = iter(train_loader)\n for idx in pbar:\n engine.update_iteration(epoch, idx)\n\n minibatch = dataloader.next()\n imgs = minibatch['data']\n gts = minibatch['label']\n\n imgs = imgs.cuda(non_blocking=True)\n gts = gts.cuda(non_blocking=True)\n\n b, h, w = gts.size()\n scaled_gts = F.interpolate((gts.view(b, 1, h, w)).float(),\n scale_factor=0.125,\n mode=\"nearest\")\n scaled_gts[scaled_gts == 255] = config.num_classes\n b, c, h, w = scaled_gts.size()\n scaled_gts = scaled_gts.squeeze_()\n C = config.num_classes + 1\n one_hot_gts = one_hot(scaled_gts, C).view(b, C, -1)\n similarity_gts = torch.bmm(one_hot_gts.permute(0, 2, 1),\n one_hot_gts)\n\n loss = model(imgs, gts, similarity_gts)\n\n # reduce the whole loss over multi-gpu\n if engine.distributed:\n dist.all_reduce(loss, dist.ReduceOp.SUM)\n loss = loss / engine.world_size\n # else:\n # loss = Reduce.apply(*loss) / len(loss)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n current_idx = epoch * config.niters_per_epoch + idx\n lr = lr_policy.get_lr(current_idx)\n\n for i in range(6):\n optimizer.param_groups[i]['lr'] = lr\n for i in range(6, len(optimizer.param_groups)):\n optimizer.param_groups[i]['lr'] = lr * 10\n\n print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \\\n + ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \\\n + ' lr=%.2e' % lr \\\n + ' loss=%.2f' % loss.item()\n\n pbar.set_description(print_str, refresh=False)\n\n if (epoch >= config.nepochs - 20) or (\n epoch % config.snapshot_iter == 0):\n if engine.distributed and (engine.local_rank == 0):\n engine.save_and_link_checkpoint(config.snapshot_dir,\n config.log_dir,\n config.log_dir_link)\n elif not engine.distributed:\n engine.save_and_link_checkpoint(config.snapshot_dir,\n config.log_dir,\n config.log_dir_link)\n",
"# encoding: utf-8\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom config import config\nfrom base_model import resnet50\nfrom seg_opr.seg_oprs import ConvBnRelu\n\n\nclass CPNet(nn.Module):\n def __init__(self, out_planes, criterion, pretrained_model=None,\n norm_layer=nn.BatchNorm2d):\n super(CPNet, self).__init__()\n self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,\n bn_eps=config.bn_eps,\n bn_momentum=config.bn_momentum,\n deep_stem=True, stem_width=64)\n self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))\n self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))\n\n self.business_layer = []\n\n self.context = ObjectContext(2048, 512, norm_layer)\n\n self.head_layer = nn.Sequential(\n ConvBnRelu(2048 + 1024, 512, 3, 1, 1,\n has_bn=True,\n has_relu=True, has_bias=False, norm_layer=norm_layer),\n nn.Dropout2d(0.1, inplace=False),\n nn.Conv2d(512, out_planes, kernel_size=1)\n )\n self.aux_layer = nn.Sequential(\n ConvBnRelu(1024, 512, 3, 1, 1,\n has_bn=True,\n has_relu=True, has_bias=False, norm_layer=norm_layer),\n nn.Dropout2d(0.1, inplace=False),\n nn.Conv2d(512, out_planes, kernel_size=1)\n )\n self.business_layer.append(self.context)\n self.business_layer.append(self.head_layer)\n self.business_layer.append(self.aux_layer)\n\n self.criterion = criterion\n self.bce_criterion = nn.BCELoss(reduction='mean')\n\n def forward(self, data, label=None, aux_label=None):\n blocks = self.backbone(data)\n\n fm, intra_sim_map = self.context(blocks[-1])\n\n fm = self.head_layer(fm)\n fm = F.interpolate(fm, scale_factor=8, mode='bilinear',\n align_corners=True)\n softmax_fm = F.log_softmax(fm, dim=1)\n\n aux_fm = self.aux_layer(blocks[-2])\n aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',\n align_corners=True)\n\n if label is not None:\n main_loss = self.criterion(fm, label)\n aux_loss = self.criterion(aux_fm, label)\n intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)\n loss = main_loss + 0.4 * aux_loss + intra_sim_loss\n return loss\n\n return softmax_fm\n\n # @staticmethod\n def _nostride_dilate(self, m, dilate):\n if isinstance(m, nn.Conv2d):\n if m.stride == (2, 2):\n m.stride = (1, 1)\n if m.kernel_size == (3, 3):\n m.dilation = (dilate // 2, dilate // 2)\n m.padding = (dilate // 2, dilate // 2)\n else:\n if m.kernel_size == (3, 3):\n m.dilation = (dilate, dilate)\n m.padding = (dilate, dilate)\n\n\nclass ObjectContext(nn.Module):\n def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):\n super(ObjectContext, self).__init__()\n self.in_channels = in_channels\n self.inner_channel = inner_channel\n\n self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,\n 1, 1, 0,\n has_bn=True, has_relu=True,\n has_bias=False, norm_layer=norm_layer)\n\n self.intra_similarity_branch = nn.Sequential(\n ConvBnRelu(self.inner_channel, self.inner_channel, 1, 1, 0,\n has_bn=True, has_relu=True,\n has_bias=False, norm_layer=norm_layer),\n ConvBnRelu(self.inner_channel, 3600, 1, 1, 0,\n has_bn=True, has_relu=False,\n has_bias=False, norm_layer=norm_layer),\n )\n\n self.intra_post_conv = ConvBnRelu(self.inner_channel,\n self.inner_channel,\n 1, 1, 0, has_bn=True, has_relu=True,\n has_bias=False, norm_layer=norm_layer)\n self.inter_post_conv = ConvBnRelu(self.inner_channel,\n self.inner_channel,\n 1, 1, 0, has_bn=True, has_relu=True,\n has_bias=False, norm_layer=norm_layer)\n\n def forward(self, x):\n b, h, w = x.size(0), x.size(2), x.size(3)\n\n value = self.reduce_conv(x)\n\n intra_similarity_map = self.intra_similarity_branch(value)\n intra_similarity_map = intra_similarity_map.view(b, h * w, -1)\n intra_similarity_map = intra_similarity_map.permute(0, 2, 1)\n intra_similarity_map = torch.sigmoid(intra_similarity_map)\n\n inter_similarity_map = 1 - intra_similarity_map\n\n value = value.view(b, self.inner_channel, -1)\n value = value.permute(0, 2, 1)\n\n intra_context = torch.bmm(intra_similarity_map, value)\n intra_mask = torch.ge(intra_similarity_map, 0.5).float()\n intra_mask_count = intra_mask.sum(dim=-1, keepdim=True)\n intra_mask_count = intra_mask_count.masked_fill_(intra_mask_count.eq(0),\n 1)\n intra_context = intra_context.div(intra_mask_count)\n intra_context = intra_context.permute(0, 2, 1).contiguous()\n intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])\n intra_context = self.intra_post_conv(intra_context)\n\n inter_context = torch.bmm(inter_similarity_map, value)\n inter_mask = torch.ge(inter_similarity_map, 0.5).float()\n inter_mask_count = inter_mask.sum(dim=-1, keepdim=True)\n inter_mask_count = inter_mask_count.masked_fill_(inter_mask_count.eq(0),\n 1)\n inter_context = inter_context.div(inter_mask_count)\n inter_context = inter_context.permute(0, 2, 1).contiguous()\n inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])\n inter_context = self.inter_post_conv(inter_context)\n\n output = torch.cat([x, intra_context, inter_context], dim=1)\n return output, intra_similarity_map\n\n\nif __name__ == \"__main__\":\n model = PSPNet(150, None)\n print(model)\n",
"from __future__ import division\nimport os.path as osp\nimport sys\nimport argparse\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\n\nfrom config import config\nfrom dataloader import get_train_loader\nfrom network import CPNet\nfrom datasets import ADE\nfrom utils.init_func import init_weight, group_weight\nfrom engine.lr_policy import PolyLR\nfrom engine.logger import get_logger\nfrom engine.engine import Engine\n# from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d\nfrom seg_opr.loss_opr import AutoOhemCrossEntropy2d\n\ntry:\n from apex.parallel import SyncBatchNorm, DistributedDataParallel\nexcept ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex .\")\n\nlogger = get_logger()\n\ntorch.manual_seed(config.seed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(config.seed)\n\nparser = argparse.ArgumentParser()\n\nwith Engine(custom_parser=parser) as engine:\n args = parser.parse_args()\n\n cudnn.benchmark = True\n if engine.distributed:\n torch.cuda.set_device(engine.local_rank)\n\n # data loader\n train_loader, train_sampler = get_train_loader(engine, ADE)\n\n # config network and criterion\n criterion = AutoOhemCrossEntropy2d(reduction='mean',\n ignore_label=-1, drop_ratio=0.3)\n\n if engine.distributed:\n logger.info('Use the Multi-Process-SyncBatchNorm')\n BatchNorm2d = SyncBatchNorm\n # else:\n # BatchNorm2d = BatchNorm2d\n model = CPNet(config.num_classes, criterion=criterion,\n pretrained_model=config.pretrained_model,\n norm_layer=BatchNorm2d)\n init_weight(model.business_layer, nn.init.kaiming_normal_,\n BatchNorm2d, config.bn_eps, config.bn_momentum,\n mode='fan_in', nonlinearity='relu')\n\n # group weight and config optimizer\n base_lr = config.lr\n\n params_list = []\n params_list = group_weight(params_list, model.backbone,\n BatchNorm2d, base_lr)\n for module in model.business_layer:\n params_list = group_weight(params_list, module, BatchNorm2d,\n base_lr * 10)\n\n # config lr policy\n total_iteration = config.nepochs * config.niters_per_epoch\n lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)\n optimizer = torch.optim.SGD(params_list,\n lr=base_lr,\n momentum=config.momentum,\n weight_decay=config.weight_decay)\n\n if engine.distributed:\n if torch.cuda.is_available():\n model.cuda()\n model = DistributedDataParallel(model)\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # model = DataParallelModel(model, engine.devices)\n model.to(device)\n\n engine.register_state(dataloader=train_loader, model=model,\n optimizer=optimizer)\n if engine.continue_state_object:\n engine.restore_checkpoint()\n\n optimizer.zero_grad()\n model.train()\n\n for epoch in range(engine.state.epoch, config.nepochs):\n if engine.distributed:\n train_sampler.set_epoch(epoch)\n bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'\n pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,\n bar_format=bar_format)\n dataloader = iter(train_loader)\n for idx in pbar:\n engine.update_iteration(epoch, idx)\n\n minibatch = dataloader.next()\n imgs = minibatch['data']\n gts = minibatch['label']\n\n imgs = imgs.cuda(non_blocking=True)\n gts = gts.cuda(non_blocking=True)\n\n loss = model(imgs, gts)\n\n # reduce the whole loss over multi-gpu\n if engine.distributed:\n dist.all_reduce(loss, dist.ReduceOp.SUM)\n loss = loss / engine.world_size\n # else:\n # loss = Reduce.apply(*loss) / len(loss)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n current_idx = epoch * config.niters_per_epoch + idx\n lr = lr_policy.get_lr(current_idx)\n\n optimizer.param_groups[0]['lr'] = lr\n optimizer.param_groups[1]['lr'] = lr\n for i in range(2, len(optimizer.param_groups)):\n optimizer.param_groups[i]['lr'] = lr * 10\n\n print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \\\n + ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \\\n + ' lr=%.2e' % lr \\\n + ' loss=%.2f' % loss.item()\n\n pbar.set_description(print_str, refresh=False)\n\n if (epoch >= config.nepochs - 20) or (\n epoch % config.snapshot_iter == 0):\n if engine.distributed and (engine.local_rank == 0):\n engine.save_and_link_checkpoint(config.snapshot_dir,\n config.log_dir,\n config.log_dir_link)\n elif not engine.distributed:\n engine.save_and_link_checkpoint(config.snapshot_dir,\n config.log_dir,\n config.log_dir_link)\n",
"# encoding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\nimport time\nimport numpy as np\nfrom easydict import EasyDict as edict\nimport argparse\n\nimport torch.utils.model_zoo as model_zoo\n\nC = edict()\nconfig = C\ncfg = C\n\nC.seed = 12345\n\n\"\"\"please config ROOT_dir and user when u first using\"\"\"\nC.repo_name = 'TorchSeg'\nC.abs_dir = osp.realpath(\".\")\nC.this_dir = C.abs_dir.split(osp.sep)[-1]\nC.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]\nC.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir))\nC.log_dir_link = osp.join(C.abs_dir, 'log')\nC.snapshot_dir = osp.abspath(osp.join(C.log_dir, \"snapshot\"))\n\nexp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\nC.log_file = C.log_dir + '/log_' + exp_time + '.log'\nC.link_log_file = C.log_file + '/log_last.log'\nC.val_log_file = C.log_dir + '/val_' + exp_time + '.log'\nC.link_val_log_file = C.log_dir + '/val_last.log'\n\n\"\"\"Data Dir and Weight Dir\"\"\"\nC.dataset_path = \"./Cityscapes/\"\nC.img_root_folder = C.dataset_path\nC.gt_root_folder = C.dataset_path\nC.train_source = osp.join(C.dataset_path, \"config_new/train.txt\")\nC.eval_source = osp.join(C.dataset_path, \"config_new/val.txt\")\nC.test_source = osp.join(C.dataset_path, \"config_new/test.txt\")\nC.is_test = False\n\n\"\"\"Path Config\"\"\"\n\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\nadd_path(osp.join(C.root_dir, 'furnace'))\n\n\"\"\"Image Config\"\"\"\nC.num_classes = 19\nC.background = -1\nC.image_mean = np.array([0.485, 0.456, 0.406]) # 0.485, 0.456, 0.406\nC.image_std = np.array([0.229, 0.224, 0.225])\nC.target_size = 1024\nC.base_size = 832\nC.image_height = 768\nC.image_width = 1536\nC.gt_down_sampling = 8\nC.num_train_imgs = 2975\nC.num_eval_imgs = 500\n\n\"\"\" Settings for network, this would be different for each kind of model\"\"\"\nC.fix_bias = True\nC.fix_bn = False\nC.sync_bn = True\nC.bn_eps = 1e-5\nC.bn_momentum = 0.1\nC.pretrained_model = None\n\n\"\"\"Train Config\"\"\"\nC.lr = 1e-2\nC.lr_power = 0.9\nC.momentum = 0.9\nC.weight_decay = 5e-4\nC.batch_size = 16 #4 * C.num_gpu\nC.nepochs = 140\nC.niters_per_epoch = 1000\nC.num_workers = 24\nC.train_scale_array = [0.5, 0.75, 1, 1.25, 1.5, 1.75]\n\n\"\"\"Eval Config\"\"\"\nC.eval_iter = 30\nC.eval_stride_rate = 2 / 3\nC.eval_scale_array = [1, ]\nC.eval_flip = False\nC.eval_height = 768\nC.eval_width = 1536\n\n\"\"\"Display Config\"\"\"\nC.snapshot_iter = 50\nC.record_info_iter = 20\nC.display_iter = 50\n\n\ndef open_tensorboard():\n pass\n\n\nif __name__ == '__main__':\n print(config.epoch_num)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-tb', '--tensorboard', default=False, action='store_true')\n args = parser.parse_args()\n\n if args.tensorboard:\n open_tensorboard()\n"
] |
[
[
"torch.cuda.manual_seed",
"torch.optim.SGD",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.distributed.all_reduce"
],
[
"torch.sigmoid",
"torch.cat",
"torch.nn.functional.interpolate",
"torch.bmm",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.ge",
"torch.nn.Dropout2d"
],
[
"torch.cuda.manual_seed",
"torch.optim.SGD",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.distributed.all_reduce"
],
[
"numpy.array"
]
] |
johnhw/complexitygraph
|
[
"2ed89bee365939665236fe434597f21144a68d96"
] |
[
"docs/mkplot.py"
] |
[
"from complexitygraph import complexity_graph\nimport matplotlib.pyplot as plt\n\ndef quadratic_time(n):\n s = 0\n for i in range(n):\n for j in range(n):\n s = s + 1\n\ncomplexity_graph(quadratic_time, range(1, 500, 20), reps=12, number=6)\nplt.savefig(\"quadratic.png\")\n"
] |
[
[
"matplotlib.pyplot.savefig"
]
] |
kunwu522/cs584-project
|
[
"4f93229bb7d9b9e185d692b47192dfa46e51d1fa"
] |
[
"code/han_bias.py"
] |
[
"import os\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nimport keras\nfrom keras.preprocessing import text\nfrom keras.engine.topology import Layer\nimport keras.layers as L\nfrom keras.models import Model\nfrom keras import initializers as initializers\nfrom keras import backend as K\n\nimport pandas as pd\nimport numpy as np\nfrom nltk.tokenize import sent_tokenize\n\nfrom glovevectorizer import load_glove_weights, generate_weights\n\n# BASE_DIR = '/home/kwu14/data/cs584_course_project'\nBASE_DIR = '../data/'\n\nVOCAB_SIZE = 10000\n\nMAX_SENTS = 43\nMAX_SENT_LEN = 300\n\nAUX_COLUMNS = ['severe_toxicity', 'obscene',\n 'identity_attack', 'insult', 'threat']\nIDENTITY_COLUMNS = [\n 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness'\n]\n\n\nclass AttentionLayer(Layer):\n \"\"\"\n Hierarchial Attention Layer as described by Hierarchical\n Attention Networks for Document Classification(2016)\n - Yang et. al.\n Source: \n https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf\n Theano backend\n \"\"\"\n def __init__(self, attention_dim=100, return_coefficients=False, **kwargs):\n # Initializer\n self.supports_masking = True\n self.return_coefficients = return_coefficients\n self.init = initializers.get('glorot_uniform') # initializes values with uniform distribution\n self.attention_dim = attention_dim\n super(AttentionLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # Builds all weights\n # W = Weight matrix, b = bias vector, u = context vector\n assert len(input_shape) == 3\n self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W')\n self.b = K.variable(self.init((self.attention_dim, )), name='b')\n self.u = K.variable(self.init((self.attention_dim, 1)), name='u')\n self.trainable_weights = [self.W, self.b, self.u]\n\n super(AttentionLayer, self).build(input_shape)\n\n def compute_mask(self, input, input_mask=None):\n return None\n\n def call(self, hit, mask=None):\n # Here, the actual calculation is done\n uit = K.bias_add(K.dot(hit, self.W),self.b)\n uit = K.tanh(uit)\n \n ait = K.dot(uit, self.u)\n ait = K.squeeze(ait, -1)\n ait = K.exp(ait)\n \n if mask is not None:\n ait *= K.cast(mask, K.floatx())\n\n ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n ait = K.expand_dims(ait)\n weighted_input = hit * ait\n \n if self.return_coefficients:\n return [K.sum(weighted_input, axis=1), ait]\n else:\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n if self.return_coefficients:\n return [(input_shape[0], input_shape[-1]), (input_shape[0], input_shape[-1], 1)]\n else:\n return input_shape[0], input_shape[-1]\n\n\ndef load_data():\n train_df = pd.read_csv(os.path.join(BASE_DIR, 'preprocessed_train.csv'))\n # Preprocess data\n for column in IDENTITY_COLUMNS + ['target']:\n train_df[column] = train_df[column].apply(\n lambda x: 1 if x >= 0.5 else 0)\n\n sample_weights = np.ones(train_df.shape[0], dtype=np.float32)\n sample_weights += train_df[IDENTITY_COLUMNS].sum(axis=1)\n sample_weights += train_df['target'] * \\\n (~train_df[IDENTITY_COLUMNS]).sum(axis=1)\n sample_weights += (~train_df['target']) * \\\n train_df[IDENTITY_COLUMNS].sum(axis=1) * 5\n sample_weights /= sample_weights.mean()\n\n text_train = train_df['comment_text'].astype(str).values\n y_train = train_df['target'].values\n y_aux_train = train_df[AUX_COLUMNS].values\n\n tk = text.Tokenizer(num_words=VOCAB_SIZE)\n tk.fit_on_texts(text_train)\n\n comments = []\n for t in text_train:\n comments.append(sent_tokenize(t))\n\n x_train = np.zeros((len(comments), MAX_SENTS, MAX_SENT_LEN),\n dtype='int32')\n for i, sents in enumerate(comments):\n for j, sent in enumerate(sents):\n if j >= MAX_SENTS:\n continue\n tokens = tk.texts_to_sequences(sent)\n k = 0\n for idx in tokens:\n if len(idx) == 0:\n continue\n if k < MAX_SENT_LEN and idx[0] < VOCAB_SIZE:\n x_train[i, j, k] = idx[0]\n k += 1\n\n embedding_matrix = generate_weights(\n load_glove_weights(os.path.join(BASE_DIR, 'glove.6B.300d.txt')),\n tk.word_index,\n VOCAB_SIZE - 1,\n )\n\n # load test data\n test_df = pd.read_csv(os.path.join(BASE_DIR, 'preprocessed_test.csv'))\n text_test = test_df['comment_text'].astype(str).values\n test_comments = []\n for t in text_test:\n test_comments.append(sent_tokenize(t))\n\n x_test = np.zeros((len(test_comments), MAX_SENTS, MAX_SENT_LEN),\n dtype='int32')\n for i, sents in enumerate(test_comments):\n for j, sent in enumerate(sents):\n if j >= MAX_SENTS:\n continue\n tokens = tk.texts_to_sequences(sent)\n k = 0\n for idx in tokens:\n if len(idx) == 0:\n continue\n if k < MAX_SENT_LEN and idx[0] < VOCAB_SIZE:\n x_test[i, j, k] = idx[0]\n k += 1\n\n return x_train, y_train, y_aux_train, test_df.id, x_test, \\\n embedding_matrix, sample_weights\n\n\ndef load_model(weights, hidden_size=100):\n # Words level attention model\n word_input = L.Input(shape=(MAX_SENT_LEN,), dtype='int32')\n word_sequences = L.Embedding(weights.shape[0], weights.shape[1], weights=[weights], input_length=MAX_SENT_LEN, trainable=False, name='word_embedding')(word_input)\n word_gru = L.Bidirectional(L.GRU(hidden_size, return_sequences=True))(word_sequences)\n word_dense = L.Dense(100, activation='relu', name='word_dense')(word_gru)\n word_att, word_coeffs = AttentionLayer(100, True, name='word_attention')(word_dense)\n wordEncoder = Model(inputs=word_input, outputs=word_att)\n\n # Sentence level attention model\n sent_input = L.Input(shape=(MAX_SENTS, MAX_SENT_LEN), dtype='int32', name='sent_input')\n sent_encoder = L.TimeDistributed(wordEncoder, name='sent_linking')(sent_input)\n sent_gru = L.Bidirectional(L.GRU(50, return_sequences=True))(sent_encoder)\n sent_dense = L.Dense(100, activation='relu', name='sent_dense')(sent_gru)\n sent_att, sent_coeffs = AttentionLayer(100, return_coefficients=True, name='sent_attention')(sent_dense)\n sent_drop = L.Dropout(0.5, name='sent_dropout')(sent_att)\n preds = L.Dense(1, activation='sigmoid', name='output')(sent_drop)\n\n # Model compile\n model = Model(sent_input, preds)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n print(wordEncoder.summary())\n print(model.summary())\n return model\n\n\nif __name__ == \"__main__\":\n # hyper-paramters\n batch_size = 1024\n epochs = 50\n hidden_size = 128\n\n # load data\n x_train, y_train, y_aux_train, test_id,\\\n x_test, weights, sample_weights = load_data()\n\n checkpoint = keras.callbacks.ModelCheckpoint(\n 'han_bias_model.h5', save_best_only=True, verbose=1)\n es = keras.callbacks.EarlyStopping(patience=3, verbose=1)\n model = load_model(weights, hidden_size)\n history = model.fit(\n x_train, y_train,\n batch_size=batch_size,\n validation_split=0.2,\n epochs=epochs,\n callbacks=[es, checkpoint],\n verbose=2\n # sample_weight=[sample_weights.values, np.ones_like(sample_weights)]\n )\n\n # evaluation\n model.load_weights('han_bias_model.h5')\n test_preds = model.predict(x_test)\n\n submission = pd.DataFrame.from_dict({\n 'id': test_id,\n 'prediction': test_preds\n })\n submission.to_csv('han_bias_submission.csv', index=False)\n"
] |
[
[
"pandas.DataFrame.from_dict",
"numpy.ones"
]
] |
mikeireland/chronostar
|
[
"fcf37614e1d145f3a5e265e54512bf8cd98051a0",
"fcf37614e1d145f3a5e265e54512bf8cd98051a0",
"fcf37614e1d145f3a5e265e54512bf8cd98051a0"
] |
[
"chronostar/tabletool.py",
"chronostar/naivefit-bak.py",
"chronostar/datatool.py"
] |
[
"\"\"\"\ntabletool.py\n\nA bunch of functions that help handle stellar data stored as\nastropy table.\n\"\"\"\n\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.units.core import UnitConversionError\nimport string\n\nfrom . import coordinate\nfrom . import transform\n\ndef load(filename, **kwargs):\n \"\"\"Cause I'm too lazy to import Astropy.table.Table in terminal\"\"\"\n return Table.read(filename, **kwargs)\n\ndef read(filename, **kwargs):\n \"\"\"Cause I'm too lazy to import Astropy.table.Table in terminal\"\"\"\n return load(filename, **kwargs)\n\n\ndef get_historical_cart_colnames():\n \"\"\"\n Colnames look like X, Y, Z...\n dX, dY, dZ\n c_XY, c_CU\n\n (as opposed to modern colnames:\n X, Y, Z...\n X_error, Y_error ...\n X_Y_corr,\n \"\"\"\n main_colnames = 'XYZUVW'\n error_colnames = ['d'+el for el in main_colnames]\n corr_colnames = []\n for i, colname1 in enumerate(main_colnames):\n for colname2 in main_colnames[i+1:]:\n corr_colnames.append('c_{}{}'.format(colname1, colname2))\n return main_colnames, error_colnames, corr_colnames\n\n\ndef get_colnames(main_colnames=None, error_colnames=None, corr_colnames=None,\n cartesian=True):\n \"\"\"\n Utility function for generating standard column names\n\n Parameters\n ----------\n main_colnames: [6] str array_like {None}\n The column names of the measurements. If left as None then\n if `cartesian` is true:\n ['X', 'Y', 'Z', 'U', 'V', 'W']\n if `cartesian` is false:\n ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n error_colnames: [6] str array_like {None}\n The column names of the measurements. If left as None then\n we try to infer the names by appending '_error' to the main\n column names.\n corr_colnames: [15] str array_like {None}\n The column names of the correlations between the errors of\n each measurement pair. If left as None we try to infer the\n names by pairing each measurmenet and appending '_corr', e.g.:\n 'X_Y_corr'.\n\n Notes\n -----\n If all column names are provided as argument, this function does\n nothing.\n\n The default format for column names for errors and correlations is,\n e.g.:\n X_error, Y_error, ...\n X_Y_corr, X_Z_corr, X_U_corr, X_V_corr, X_W_corr, Y_Z_corr, ...\n The correlations are listed in the same way one would read the upper\n triangle of the correlation matrix, where the rows (and columns) of\n the matrix are in the same order as `main_colnames`.\n \"\"\"\n if main_colnames is None:\n if cartesian:\n # main_colnames = [el for el in 'XYZUVW']\n main_colnames = ['X', 'Y', 'Z', 'U', 'V', 'W']\n else: # provide astrometric column names\n main_colnames = [\n 'ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity',\n ]\n if error_colnames is None:\n error_colnames = [el+'_error' for el in main_colnames]\n if corr_colnames is None:\n corr_colnames = []\n for i, colname1 in enumerate(main_colnames):\n for colname2 in main_colnames[i + 1:]:\n corr_colnames.append('{}_{}_corr'.format(\n colname1, colname2\n ))\n return main_colnames, error_colnames, corr_colnames\n\n\ndef build_data_dict_from_table(table, main_colnames=None, error_colnames=None,\n corr_colnames=None, cartesian=True,\n historical=False, only_means=False,\n get_background_overlaps=True,\n background_colname=None,\n return_table_ixs=False):\n \"\"\"\n Use data in tale columns to construct arrays of means and covariance\n matrices.\n\n Parameters\n ----------\n table: astropy table -or- string\n The table (or path to table) which holds the required data\n main_colnames: [6] string array_like\n Set of column names of the main measurements\n e.g. ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n would be the input for the default format of Gaia data\n error_colnames: [6] string array_like {None}\n Set of column names for the errors. If left as None will be\n generated by appending '_error' to each of the main_colnames\n corr_colnames: [15] string array_like {None}\n Set of column names for the pairwise correlations between each\n of the six main measurements. If left as None will be generated\n by joining each pair of main_colnames with an underscore then\n appending '_corr'.\n It is assumed that the correlation column names are given in\n a certain order based on input order of main_colnames.\n e.g. ['ra_dec_corr', 'ra_parallax_corr', ... 'ra_radial_velocity_corr',\n 'dec_parallax_corr', ... 'dec_radial_velocity_corr',\n 'parallax_pmra_corr' ... etc]\n cartesian: bool {True}\n Set to false if trying to build astrometric data\n historical: bool {True}\n Set to True if data set is from historical uses of chronostar. This\n function will then look for different column names by default\n only_means: bool {True}\n Set to True if only after the means of the data. This will save\n time by not building covariance matrices\n get_background_overlaps: bool {True}\n Set to True if after background overlaps too\n background_colname: str {None}\n Set which column name to use for background overlaps. If left as\n None, uses 'background_log_overlap' as default.\n return_table_ixs: boolean {False}\n If set, returns a mapping taking the indices of elements in dictionary\n to rows from original table. This is useful when table rows have\n been skipped due to missing data.\n Convert data to row indices for table assignment e.g. recording of\n membership to `comp_A` thusly:\n >>> my_table['comp_A'][table_ixs] = final_memb[:,0]\n\n Or to extract gaia ids of comp A members:\n >>> my_table['gaia_dr2'][table_ixs][np.where(final_memb[:,0]>0.5)]\n\n where `final_memb` is a [nstars, ncomps] array recording membership\n probabilities.\n\n Returns\n -------\n means: [n,6] float array_like\n Array of the mean measurements\n covs: [n,6,6] float array_like\n Array of the covariance matrix for each of the `n` measured objects\n Comment by Marusa: it is actually a dictionary that is returned.\n \"\"\"\n # Tidy up input\n if isinstance(table, str):\n table = Table.read(table)\n if historical:\n main_colnames, error_colnames, corr_colnames =\\\n get_historical_cart_colnames()\n else:\n main_colnames, error_colnames, corr_colnames = get_colnames(\n main_colnames=main_colnames, error_colnames=error_colnames,\n corr_colnames=corr_colnames, cartesian=cartesian\n )\n\n # Generate means\n if table.masked:\n raise UserWarning('Table is masked! Replace or remove problem columns')\n means = np.vstack([table[col] for col in main_colnames]).T\n if only_means:\n return means\n results_dict = {'means':means}\n\n # Generate covariance matrices\n nstars = len(table)\n standard_devs = np.vstack([table[col] for col in error_colnames]).T\n\n # Detect mismatch in units and scale standard_devs appropriately\n # If units can't be converted\n for ix, (main_colname, error_colname) in\\\n enumerate(zip(main_colnames, error_colnames)):\n if table[main_colname].unit != table[error_colname].unit:\n try:\n scale_factor =\\\n table[error_colname].unit.to(table[main_colname].unit)\n standard_devs[:,ix] *= scale_factor\n except UnitConversionError:\n print(main_colname, error_colname)\n raise UserWarning('Units are not convertible between '\n 'measurments and errors. Are you sure '\n 'you provided column names in a consistent '\n 'ordering?')\n except AttributeError:\n # Units haven't been provided. Which is allowed but discouraged\n pass\n\n # Initialise an array of 6x6 identity matrices\n covs = np.array(nstars * [np.eye(6)])\n\n # Then turn into correlation matrices by incorporating correlation columns\n indices = np.triu_indices(6,1) # the indices of the upper right\n # triangle, excluding main diagonal\n for ix in range(len(corr_colnames)):\n try:\n fst_ix = indices[0][ix]\n snd_ix = indices[1][ix]\n covs[:, fst_ix, snd_ix] = table[corr_colnames[ix]]\n covs[:, snd_ix, fst_ix] = table[corr_colnames[ix]]\n except KeyError: # Correlations are allowed to be missing\n pass\n\n # Now multiply through the standard deviations along both axes\n # First along each column\n # We use einstein notation here such that 'ijk,ij->ijk' means\n # multiply the 'ijk'th element from covs by the 'ij'th element from\n # standard_devs. More thoroughly: for the i'th covariance matrix,\n # and the i'th 6D standard deviation vector, multiply the j'th row\n # by the j'th std\n covs = np.einsum('ijk,ij->ijk', covs, standard_devs) # the rows\n covs = np.einsum('ijk,ik->ijk', covs, standard_devs) # the columsn\n results_dict['covs'] = covs\n\n # Checks for any nans in the means or covariances\n bad_mean_mask = np.any(np.isnan(means), axis=1)\n bad_cov_mask = np.any(np.isnan(covs), axis=(1,2))\n\n good_row_mask = np.logical_not(np.logical_or(bad_mean_mask, bad_cov_mask))\n \n # Notify what stars have been excluded!\n nexcluded = np.sum(np.logical_not(good_row_mask))\n if nexcluded>0:\n print('%d stars MASKED OUT!'%nexcluded)\n print(np.where(np.logical_not(good_row_mask)))\n print(table[np.logical_not(good_row_mask)])\n\n results_dict = {\n 'means':means[good_row_mask],\n 'covs':covs[good_row_mask],\n }\n\n # Insert background overlaps\n if get_background_overlaps:\n if background_colname is None:\n background_colname = 'background_log_overlap'\n if background_colname in table.colnames:\n results_dict['bg_lnols'] = np.array(table[background_colname])[good_row_mask]\n\n if return_table_ixs:\n return results_dict, np.where(good_row_mask)\n else:\n return results_dict\n\ndef construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(table, \n memb_probs, comps, output_filename, get_background_overlaps=True, stellar_id_colname=None, overwrite_fits=False):\n \"\"\"\n MZ 2020 - 04 - 16\n Create an astropy table with Gaia DR2 ids and membership probabilities\n for all components, including background.\n \n This shoul NOT append to the original table because the number of\n components is increasing each iteration.\n Parameters\n ----------\n table: astropy table -or- string\n The table (or path to table) which holds the required data\n get_background_overlaps: bool {True}\n Set to True if after background overlaps too\n \n Returns\n -------\n None\n \n \"\"\"\n\n # Read table\n if isinstance(table, str):\n table = Table.read(table)\n ids = table[stellar_id_colname]\n tab = Table((ids,), names=(stellar_id_colname,))\n\n # compnames\n # TODO: This should be generated once in the component class!!\n ncomps = len(comps)\n if ncomps>26:\n print('*** number of components>26, cannot name them properly with letters.')\n abc=string.ascii_uppercase\n compnames = [abc[i] for i in range(ncomps)]\n\n # Membership\n for i, c in enumerate(compnames):\n tab['membership%s'%c.replace('comp', '')] = memb_probs[:,i]\n\n #~ todo='background_log_overlap'\n if get_background_overlaps:\n tab['membership_bg'] = memb_probs[:,-1]\n\n print(tab)\n tab.write(output_filename, format='fits', overwrite=overwrite_fits)\n\n #add number of components in the file. and a timestamp or random number so nothing gets overwritten.\n\n\ndef append_cart_cols_to_table(table, main_colnames=None, error_colnames=None,\n corr_colnames=None):\n \"\"\"\n Insert empty place holder columns for cartesian values\n\n Parameters\n ----------\n table: astropy.table.Table object\n Modifies table in place by appending empty columns for cartesian\n values. Default values in column are `np.nan`.\n main_colnames: str {None}\n See\n\n\n Returns\n -------\n None\n \"\"\"\n # Tidy input\n if isinstance(table, str):\n table = Table.read(table)\n main_colnames, error_colnames, corr_colnames =\\\n get_colnames(main_colnames, error_colnames, corr_colnames,\n cartesian=True)\n\n # Set up order of column names in table\n cart_colnames = []\n for measure, error in zip(main_colnames, error_colnames):\n cart_colnames.append(measure)\n cart_colnames.append(error)\n for corr in corr_colnames:\n cart_colnames.append(corr)\n\n # Insert blank rows (default value 'np.nan') with appropriate units\n nrows = len(table)\n empty_col = np.array(nrows * [np.nan])\n units = 6*['pc'] + 6*['km/s'] + 15*[None]\n for col_name, unit in zip(cart_colnames, units):\n table[col_name] = empty_col\n table[col_name].unit = unit\n\n\ndef convert_astro2cart(astr_mean, astr_cov):\n \"\"\"\n Convert astrometry data (mean and covariance) into cartesian\n coordinates, centred on the local standard of rest (Schoenrich 2010).\n\n Parameters\n ----------\n astr_mean: [6] float array_like\n The central estimate of a star's astrometry values. Provided in\n the order:\n ra [deg]\n dec [deg]\n parallax [mas]\n pmra*cos(dec) [mas/yr]\n pmdec [mas/yr]\n radial velocity [km/s]\n astr_cov: [6,6] float array_like\n The covariance matrix of the measurments with columns (and rows)\n in same order as `astr_mean`.\n\n Returns\n -------\n xyzuvw_mean: [6] float array_like\n The cartesian mean (XYZUVW)\n xyzuvw_cov: [6,6] float array_like\n The carteisan covariance matrix\n \"\"\"\n xyzuvw_mean = coordinate.convert_astrometry2lsrxyzuvw(astr_mean)\n xyzuvw_cov = transform.transform_covmatrix(\n cov=astr_cov, trans_func=coordinate.convert_astrometry2lsrxyzuvw,\n loc=astr_mean\n )\n\n return xyzuvw_mean, xyzuvw_cov\n\n\ndef insert_data_into_row(row, mean, cov, main_colnames=None, error_colnames=None,\n corr_colnames=None, cartesian=True):\n \"\"\"\n Insert data, error and correlations into a single row\n\n Given the mean and covariance matrix, we derive the standard\n deviations in each dimension as well as each pair-wise correlation,\n which are then inserted into the row (as per the provided column names).\n\n The columns must already exist!\n\n Parameters\n row: astropy table row\n The row in which the data will be inserted, with required columns\n already existing\n mean: [6] float array\n The mean of data\n cov: [6,6] float array\n The covariance matrix of data\n \"\"\"\n\n main_colnames, error_colnames, corr_colnames = get_colnames(\n main_colnames, error_colnames, corr_colnames, cartesian=cartesian\n )\n # Insert mean data\n for ix, main_colname in enumerate(main_colnames):\n row[main_colname] = mean[ix]\n\n # Insert errors\n standard_devs = np.sqrt(np.diagonal(cov))\n for ix, error_colname in enumerate(error_colnames):\n row[error_colname] = standard_devs[ix]\n\n # Build correlation matrix by dividing through by stdevs in both axes\n corr_matrix = cov / standard_devs / standard_devs.reshape(6, 1)\n\n # Insert correlations\n indices = np.triu_indices(6,1) # the indices of the upper right\n # triangle, excluding main diagonal\n for ix in range(len(corr_colnames)):\n try:\n fst_ix = indices[0][ix]\n snd_ix = indices[1][ix]\n row[corr_colnames[ix]] = corr_matrix[fst_ix, snd_ix]\n except KeyError:\n # It's fine if some correlation columns are missing\n pass\n\n\ndef insert_column(table, col_data, col_name, filename=''):\n \"\"\"\n Little helper to insert column data\n\n Parameters\n ----------\n table: astropy table\n the table in which the new column will be inserted\n col_data: array_like\n An array of the column data. Must be same length as table\n (we don't check this)\n col_name: str\n The name of the new column\n filename: str {''}\n If not empty, save the new table to file\n\n Returns\n -------\n table: astropy table\n The same table, with the modification.\n \"\"\"\n table[col_name] = col_data\n if filename != '':\n # TODO work out proper way to writ etables that is consistnet across python\n # Table.write(table, filename, overwrite=True, format='ascii') # TC\n table.write(filename, overwrite=True)\n return table\n\n\ndef convert_table_astro2cart(table, return_table=False, write_table=False,\n astr_main_colnames=None,\n astr_error_colnames=None,\n astr_corr_colnames=None,\n cart_main_colnames=None,\n cart_error_colnames=None,\n cart_corr_colnames=None,\n filename=''):\n \"\"\"\n Use this function to convert astrometry data to cartesian data.\n\n Parameters\n ----------\n table: astropy table (or string)\n The table with astrometry data (and radial velocities), either\n with column names consistent with defaults, or provided as input.\n If column names aren't specified we assume the measurements\n have column names:\n ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n With the error column names:\n ['ra_error', 'dec_error', ... ]\n And correlation column names:\n ['ra_dec_corr', 'ra_parallax_corr', 'ra_pmra_corr' ... ,\n 'dec_parallax_corr', 'dec_pmra_corr' ... ,\n 'parallax_pmra_corr', ... ,\n ... ]\n return_table: bool {False}\n Whether to return the converted table\n write_table: bool {False}\n Whether to write the converted table to filename. It is not\n sufficient to simply supply a filename to write as we do not\n want to risk overwriting someone's table (even though we simply\n extend with new columns).\n main_colnames: [6] string array_like\n Set of column names of the main measurements\n e.g. ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n would be the input for the default format of Gaia data\n error_colnames: [6] string array_like {None}\n Set of column names for the errors. If left as None will be\n generated by appending '_error' to each of the main_colnames\n corr_colnames: [15] string array_like {None}\n Set of column names for the pairwise correlations between each\n of the six main measurements. If left as None will be generated\n by joining each pair of main_colnames with an underscore then\n appending '_corr'.\n It is assumed that the correlation column names are given in\n a certain order based on input order of main_colnames.\n e.g. ['ra_dec_corr', 'ra_parallax_corr', ... 'ra_radial_velocity_corr',\n 'dec_parallax_corr', ... 'dec_radial_velocity_corr',\n 'parallax_pmra_corr' ... etc]\n filename: str {''}\n Save filename for storing the resulting table\n\n Returns\n -------\n res: astropy table\n If `return_table` flag is set, will return the resulting\n astropy table\n \"\"\"\n if isinstance(table, str):\n if filename and not write_table:\n raise UserWarning('Specify how to handle result, I won\\'t'\n 'overwrite without explicit permission.')\n filename = table\n table = Table.read(table)\n\n # Get astrometric column names\n astr_main_colnames, astr_error_colnames, astr_corr_colnames =\\\n get_colnames(main_colnames=astr_main_colnames,\n error_colnames=astr_error_colnames,\n corr_colnames=astr_corr_colnames,\n cartesian=False)\n\n data = build_data_dict_from_table(table,\n astr_main_colnames,\n astr_error_colnames,\n astr_corr_colnames)\n\n # Establish what column names are used\n cart_main_colnames, cart_error_colnames, cart_corr_colnames = \\\n get_colnames(cart_main_colnames,\n cart_error_colnames,\n cart_corr_colnames,\n cartesian=True)\n\n # if cartesian columns don't exist, then insert them\n if cart_corr_colnames[0] not in table.keys():\n append_cart_cols_to_table(table,\n cart_main_colnames,\n cart_error_colnames,\n cart_corr_colnames)\n\n # Iteratively transform data to cartesian coordinates, storing as we go\n for row, astr_mean, astr_cov in zip(table, data['means'], data['covs']):\n cart_mean, cart_cov = convert_astro2cart(astr_mean, astr_cov)\n insert_data_into_row(row, cart_mean, cart_cov,\n main_colnames=cart_main_colnames,\n error_colnames=cart_error_colnames,\n corr_colnames=cart_corr_colnames\n )\n\n # Save data\n if filename and write_table:\n table.write(filename, overwrite=True)\n\n if return_table:\n return table\n\n",
"\"\"\"\nnaivefit.py\n\n\nA NaiveFit follows the approach described in Crundall et al. (2019).\n\nNaiveFit begins with an initial guess provided by user of an N component fit.\nIf no guess is provided, all provided stars are assumed to be members of one\ncomponent.\n\nNaiveFit will perform an Expectation Maximisation on this N component fit until\nconverged.\n\nThen NaiveFit will test increasing the compoennt count to N+1. This is done by\nfor each component out of the N existing, substituting it for 2 similar\ncomponents with slight age offsets, and running an EM fit. The result\nis N separate \"N+1 component\" fits. The best one will be compared to the\n\"N component\" fit using the Bayesian Information Criterion (BIC). If the\nBIC has improved, this \"N+1 component fit\" will be taken as the best fit so far.\n\nThis process iterates until adding a component fails to yield a better fit.\n\"\"\"\nimport numpy as np\nimport os\nimport sys\nimport logging\nfrom distutils.dir_util import mkpath\nimport random\nimport uuid\n\n#~ from emcee.utils import MPIPool\nfrom multiprocessing import Pool\n\nfrom multiprocessing import cpu_count\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom . import expectmax\nfrom . import readparam\nfrom . import tabletool\nfrom . import component\nfrom . import traceorbit\n\n# python3 throws FileNotFoundError that is essentially the same as IOError\ntry:\n FileNotFoundError\nexcept NameError:\n FileNotFoundError = IOError\n\ndef dummy_trace_orbit_func(loc, times=None):\n \"\"\"\n Purely for testing purposes\n\n Dummy trace orbit func to skip irrelevant computation\n A little constraint on age (since otherwise its a free floating\n parameter)\n \"\"\"\n if times is not None:\n if np.all(times > 1.):\n return loc + 1000.\n return loc\n\n\ndef log_message(msg, symbol='.', surround=False):\n \"\"\"Little formatting helper\"\"\"\n res = '{}{:^40}{}'.format(5 * symbol, msg, 5 * symbol)\n if surround:\n res = '\\n{}\\n{}\\n{}'.format(50 * symbol, res, 50 * symbol)\n logging.info(res)\n\n\nclass NaiveFit(object):\n \"\"\"\n Many arguments can be taken straight from the fit_pars dictionary,\n so no point explicitly looking for them.\n\n Description of parameters can be found in README.md along with their\n default values and whether they are required.\n \"\"\"\n\n # Internal filestems that Chronostar uses to store results throughout a fit\n # Should not be changed, otherwise Chronostar may struggle to retreive progress\n # from previous fits.\n final_comps_file = 'final_comps.npy'\n final_med_and_spans_file = 'final_med_and_spans.npy'\n final_memb_probs_file = 'final_membership.npy'\n\n\n # For detailed description of parameters, see the main README.md file\n # in parent directory.\n DEFAULT_FIT_PARS = {\n 'results_dir':'',\n\n # Output from dataprep, XYZUVW data, plus background overlaps\n # Can be a filename to a astropy table, or an actual table\n 'data_table':None,\n\n # Whether to look for dX, .. c_XY or X_error, .. corr_X_Y in\n # the column names\n 'historical_colnames':False,\n\n # Column name for stellar IDs. This is used at the end when generating\n # final fits table with IDs and membership probabilities.\n # This is optional.\n 'stellar_id_colname': None,\n\n # File name that points to a stored list of components, typically from\n # a previous fit. Some example filenames could be:\n # - 'some/prev/fit/final_comps.npy\n # - 'some/prev/fit/2/A/final_comps.npy\n # Alternatively, if you already have the list of components, just\n # provide them to `init_comps`. Don't do both.\n # 'init_comps_file':None, # TODO: Is this redundant with 'init_comps'\n 'init_comps':None,\n\n # One of these two are required if initialising a run with ncomps != 1\n\n # One can also initialise a Chronostar run with memberships.\n # Array is [nstars, ncomps] float array\n # Each row should sum to 1.\n # Same as in 'final_membership.npy'\n # TODO: implement this in a way that info can be passed in from text file\n # e.g. a path to a file name\n # for now, can only be used from within a script, i.e. given a numpy\n # array object\n 'init_memb_probs':None,\n\n # Provide a string name that corresponds to a ComponentClass\n # An actual Component Class will be inserted into the paramter\n # dictionary to be passed into expectmax\n 'component':'sphere',\n\n 'max_comp_count':20,\n 'max_em_iterations':200,\n 'nthreads':1, # TODO: NOT IMPLEMENTED\n 'use_background':True,\n\n 'overwrite_prev_run':False,\n 'burnin':500,\n 'sampling_steps':1000,\n 'store_burnin_chains':False,\n 'ignore_stable_comps':True,\n\n # If loading parameters from text file, can provide strings:\n # - 'epicyclic' for epicyclic\n # - 'dummy_trace_orbit_func' for a trace orbit funciton that doens't do antyhing (for testing)\n # Alternativley, if building up parameter dictionary in a script, can\n # provide actual function.\n 'trace_orbit_func':traceorbit.trace_cartesian_orbit,\n \n # MZ\n # Specify what optimisation method in the maximisation step of\n # the EM algorithm to use. Default: emcee. Also available:\n # In principle any method from scipy.optimise.minimise, but \n # here we recommend Nelder-Mead (because the initialisation\n # with any additional arguments, e.g. Jacobian etc. is not \n # implemented in Chronostar).\n # 'emcee' | 'Nelder-Mead'\n 'optimisation_method': 'emcee',\n \n # Optimise components in parallel in expectmax.maximise.\n 'nprocess_ncomp': False,\n \n # Overwrite final results in a fits file\n 'overwrite_fits': False,\n \n # How to split group: in age or in space?\n 'split_group': 'age',\n\n 'par_log_file':'fit_pars.log',\n }\n\n def __init__(self, fit_pars):\n \"\"\"\n Parameters\n ----------\n fit_pars : str -or- dictionary\n If a string, `fit_pars` should be a path to a parameter file which\n can be parsed by readparam.readParam, to construct a dictionary.\n Alternatively, an actual dictionary can be passed in. See README.md\n for a description of parameters.\n \"\"\"\n # Parse parameter file if required\n if type(fit_pars) is str:\n fit_pars = readparam.readParam(fit_pars, default_pars=self.DEFAULT_FIT_PARS)\n\n # Make a new dictionary, with priority given to contents of fit_pars\n self.fit_pars = dict(self.DEFAULT_FIT_PARS)\n self.fit_pars.update(fit_pars)\n assert type(self.fit_pars) is dict\n\n # MZ: Make sure 'par_log_file' is written into the results folder\n self.fit_pars['par_log_file'] = os.path.join(self.fit_pars['results_dir'], self.fit_pars['par_log_file'])\n\n # Data prep should already have been completed, so we simply build\n # the dictionary of arrays from the astropy table\n self.data_dict = tabletool.build_data_dict_from_table(self.fit_pars['data_table'],\n historical=self.fit_pars['historical_colnames'])\n\n # The NaiveFit approach is to assume starting with 1 component\n self.ncomps = 1\n\n # Import suitable component class\n if self.fit_pars['component'] == 'sphere':\n self.Component = component.SphereComponent\n self.fit_pars['Component'] = component.SphereComponent\n elif self.fit_pars['component'] == 'ellip':\n self.Component = component.EllipComponent\n self.fit_pars['Component'] = component.EllipComponent\n else:\n raise UserWarning('Unknown (or missing) component parametrisation')\n\n # Check results directory is valid\n # If path exists, make a new results_directory with a random int\n if os.path.exists(self.fit_pars['results_dir']) and \\\n not self.fit_pars['overwrite_prev_run']:\n rdir = '{}_{}'.format(self.fit_pars['results_dir'].rstrip('/'),\n random.randint(0, 1000))\n else:\n rdir = self.fit_pars['results_dir']\n self.rdir = rdir.rstrip('/') + '/'\n mkpath(self.rdir)\n assert os.access(self.rdir, os.W_OK)\n\n # Log fit parameters,\n readparam.log_used_pars(self.fit_pars, default_pars=self.DEFAULT_FIT_PARS)\n\n # Now that results directory is set up, can set up log file\n logging.basicConfig(filename=self.rdir + 'log.log', level=logging.INFO)\n\n # Make some logs about how many iterations (+ other stuff) code can run for\n log_message(msg='Component count cap set to {}'.format(\n self.fit_pars['max_comp_count']),\n symbol='+', surround=True)\n log_message(msg='Iteration count cap set to {}'.format(\n self.fit_pars['max_em_iterations']),\n symbol='+', surround=True)\n print('printed')\n\n # Check nthreads does not exceed hardware\n if self.fit_pars['nthreads'] > cpu_count() - 1:\n raise UserWarning('Provided nthreads exceeds cpu count on this machine. '\n 'Rememeber to leave one cpu free for master thread!')\n\n # MZ: If nthreads>1: create an MPIPool\n if self.fit_pars['nthreads']>1:\n #self.pool = MPIPool()\n log_message('pool = Pool(nthreads) = pool(%d)'%self.fit_pars['nthreads'])\n self.fit_pars['pool']=Pool(self.fit_pars['nthreads'])\n else:\n self.pool = None\n\n # ------------------------------------------------------------\n # ----- SETTING UP RUN CUSTOMISATIONS ----------------------\n # ------------------------------------------------------------\n\n # Set up trace_orbit_func\n if self.fit_pars['trace_orbit_func'] == 'dummy_trace_orbit_func':\n self.fit_pars['trace_orbit_func'] = dummy_trace_orbit_func\n elif self.fit_pars['trace_orbit_func'] == 'epicyclic':\n log_message('trace_orbit: epicyclic')\n self.fit_pars['trace_orbit_func'] = traceorbit.trace_epicyclic_orbit\n else:\n self.fit_pars['trace_orbit_func'] = traceorbit.trace_cartesian_orbit\n\n if type(self.fit_pars['init_comps']) is str:\n self.fit_pars['init_comps'] = self.Component.load_raw_components(\n self.fit_pars['init_comps'])\n self.ncomps = len(self.fit_pars['init_comps'])\n print('Managed to load in init_comps from file')\n else:\n self.fit_pars['init_comps'] = None\n print(\"'Init comps' is initialised as none\")\n\n # TODO: If initialising with membership probabilities, adjust self.ncomps\n\n\n def build_comps_from_chains(self, run_dir):\n \"\"\"\n Build compoennt objects from stored emcee chains and cooresponding\n lnprobs.\n\n Parameters\n ----------\n run_dir: str\n Directory of an EM fit, which in the context of NaiveFit will be\n e.g. 'myfit/1', or 'myfit/2/A'\n\n Returns\n -------\n comps: [Component]\n A list of components that correspond to the best fit from the\n run in question.\n \"\"\"\n logging.info('Component class has been modified, reconstructing '\n 'from chain')\n\n comps = self.ncomps * [None]\n for i in range(self.ncomps):\n final_cdir = run_dir + 'final/comp{}/'.format(i)\n chain = np.load(final_cdir + 'final_chain.npy')\n lnprob = np.load(final_cdir + 'final_lnprob.npy')\n npars = len(self.Component.PARAMETER_FORMAT)\n best_ix = np.argmax(lnprob)\n best_pars = chain.reshape(-1, npars)[best_ix]\n comps[i] = self.Component(emcee_pars=best_pars)\n self.Component.store_raw_components(\n str(run_dir + 'final/' + self.final_comps_file),\n comps)\n\n return comps\n\n\n def log_score_comparison(self, prev, new):\n \"\"\"\n Purely a logging helper function.\n Log BIC comparisons.\n\n Parameters\n ----------\n prev: dict\n A dictinoary of scores from the previous run with the following entries\n - bic: the Bayesian Information Criterion\n - lnlike : the log likelihood\n - lnpost : the log posterior\n new: dict\n A dictinoary of scores from the new run, with identical entries as\n `prev`\n\n Result\n ------\n None\n \"\"\"\n if new['bic'] < prev['bic']:\n logging.info(\"Extra component has improved BIC...\")\n logging.info(\n \"New BIC: {} < Old BIC: {}\".format(new['bic'], prev['bic']))\n else:\n logging.info(\"Extra component has worsened BIC...\")\n logging.info(\n \"New BIC: {} > Old BIC: {}\".format(new['bic'], prev['bic']))\n\n logging.info(\"lnlike: {} | {}\".format(new['lnlike'], prev['lnlike']))\n logging.info(\"lnpost: {} | {}\".format(new['lnpost'], prev['lnpost']))\n\n\n def build_init_comps(self, prev_comps, split_comp_ix, prev_med_and_spans,\n memb_probs):\n \"\"\"\n Given a list of converged components from a N component fit, generate\n a list of N+1 components with which to initialise an EM run.\n\n This is done by taking the target component, `prev_comps[comp_ix]`,\n replacing it in the list of comps, by splitting it into two components\n with a lower and higher age,\n\n Parameters\n ----------\n prev_comps : [N] list of Component objects\n List of components from the N component fit\n split_comp_ix : int\n The index of component which is to be split into two\n prev_med_and_spans : [ncomps,npars,3] np.array\n The median and spans of\n\n Return\n ------\n init_comps: [N+1] list of Component objects\n\n Side effects\n ------------\n Updates self.fit_pars['init_comps'] with a [N+1] list of Component\n objects\n \"\"\"\n target_comp = prev_comps[split_comp_ix]\n\n assert isinstance(target_comp, self.Component)\n # Decompose and replace the ith component with two new components\n # by using the 16th and 84th percentile ages from previous run\n \n if self.fit_pars['split_group']=='age':\n if self.fit_pars['optimisation_method']=='emcee':\n split_comps = target_comp.split_group_age(\n lo_age=prev_med_and_spans[split_comp_ix, -1, 1],\n hi_age=prev_med_and_spans[split_comp_ix, -1, 2])\n elif self.fit_pars['optimisation_method']=='Nelder-Mead':\n age = target_comp.get_age()\n split_comps = target_comp.split_group_age( # TODO: Maybe even smaller change\n lo_age=0.8*age,\n hi_age=1.2*age)\n elif self.fit_pars['split_group']=='spatial':\n split_comps = target_comp.split_group_spatial(self.data_dict, \n memb_probs[:,split_comp_ix])\n \n init_comps = list(prev_comps)\n init_comps.pop(split_comp_ix)\n init_comps.insert(split_comp_ix, split_comps[1])\n init_comps.insert(split_comp_ix, split_comps[0])\n\n return init_comps\n\n\n def run_em_unless_loadable(self, run_dir):\n \"\"\"\n Run and EM fit, but only if not loadable from a previous run\n\n \"\"\"\n try:\n # This fails when gradient descent is used and med_and_spans are not meaningful.\n try:\n med_and_spans = np.load(os.path.join(run_dir, 'final/', self.final_med_and_spans_file))\n except ValueError:\n logging.info('med_and_spans not read. Presumably you are using gradient descent optimisation procedure?')\n med_and_spans = [None]\n memb_probs = np.load(os.path.join(\n run_dir, 'final/', self.final_memb_probs_file))\n comps = self.Component.load_raw_components(\n str(os.path.join(run_dir, 'final/', self.final_comps_file)))\n logging.info('Loaded from previous run')\n\n # Handle case where Component class has been modified and can't\n # load the raw components\n except AttributeError:\n # TODO: check that the final chains looked for are guaranteed to be saved\n comps = self.build_comps_from_chains(run_dir)\n\n # Handle the case where files are missing, which means we must\n # perform the fit.\n #~ except (IOError, FileNotFoundError) as e:\n except IOError:\n comps, med_and_spans, memb_probs = \\\n expectmax.fit_many_comps(data=self.data_dict,\n ncomps=self.ncomps, rdir=run_dir,\n **self.fit_pars)\n\n # Since init_comps and init_memb_probs are only meant for one time uses\n # we clear them to avoid any future usage\n self.fit_pars['init_comps'] = None\n self.fit_pars['init_memb_probs'] = None\n\n return {'comps':comps, 'med_and_spans':med_and_spans, 'memb_probs':memb_probs}\n\n\n def iter_end_log(self, best_split_ix, prev_result, new_result):\n logging.info(\"Selected {} as best decomposition\".format(\n chr(ord('A') + best_split_ix)))\n logging.info(\n \"Turned\\n{}\".format(prev_result['comps'][best_split_ix].get_pars()))\n logging.info('with {} members'.format(\n prev_result['memb_probs'].sum(axis=0)[best_split_ix]))\n logging.info(\"into\\n{}\\n&\\n{}\".format(\n new_result['comps'][best_split_ix].get_pars(),\n new_result['comps'][best_split_ix + 1].get_pars(),\n ))\n logging.info('with {} and {} members'.format(\n new_result['memb_probs'].sum(axis=0)[best_split_ix],\n new_result['memb_probs'].sum(axis=0)[best_split_ix + 1],\n ))\n logging.info(\"for an overall membership breakdown\\n{}\".format(\n new_result['memb_probs'].sum(axis=0)\n ))\n\n\n def log_final_log(self, prev_result, prev_score):\n logging.info('Final best fits:')\n [logging.info(c.get_pars()) for c in prev_result['comps']]\n logging.info('Final age med and span:')\n if self.fit_pars['optimisation_method']=='emcee':\n [logging.info(row[-1]) for row in prev_result['med_and_spans']]\n logging.info('Membership distribution: {}'.format(\n prev_result['memb_probs'].sum(axis=0)))\n logging.info('Final membership:')\n logging.info('\\n{}'.format(np.round(prev_result['memb_probs'] * 100)))\n logging.info('Final lnlikelihood: {}'.format(prev_score['lnlike']))\n logging.info('Final lnposterior: {}'.format(prev_score['lnpost']))\n logging.info('Final BIC: {}'.format(prev_score['bic']))\n logging.info('#########################')\n logging.info('### END #################')\n logging.info('#########################')\n\n\n def calc_score(self, comps, memb_probs):\n \"\"\"\n Calculate global score of fit for comparison with future fits with different\n component counts\n\n Parameters\n ----------\n :param comps:\n :param memb_probs:\n :return:\n\n TODO: Establish relevance of bg_ln_ols\n \"\"\"\n lnlike = expectmax.get_overall_lnlikelihood(self.data_dict,\n comps,\n old_memb_probs=memb_probs,\n # bg_ln_ols=bg_ln_ols,\n )\n lnpost = expectmax.get_overall_lnlikelihood(self.data_dict,\n comps,\n # bg_ln_ols=bg_ln_ols,\n old_memb_probs=memb_probs,\n inc_posterior=True)\n\n bic = expectmax.calc_bic(self.data_dict, self.ncomps, lnlike,\n memb_probs=memb_probs,\n Component=self.Component)\n\n return {'bic':bic, 'lnlike':lnlike, 'lnpost':lnpost}\n\n\n def run_fit(self):\n \"\"\"\n Perform a fit (as described in Paper I) to a set of prepared data.\n\n Results are outputted as two dictionaries\n results = {'comps':best_fit, (list of components)\n 'med_and_spans':median and spans of model parameters,\n 'memb_probs': membership probability array (the standard one)}\n scores = {'bic': the bic,\n 'lnlike': log likelihood of that run,\n 'lnpost': log posterior of that run}\n \"\"\"\n\n log_message('Beginning Chronostar run',\n symbol='_', surround=True)\n\n # ------------------------------------------------------------\n # ----- EXECUTE RUN ----------------------------------------\n # ------------------------------------------------------------\n\n if self.fit_pars['store_burnin_chains']:\n log_message(msg='Storing burnin chains', symbol='-')\n\n # ------------------------------------------------------------\n # ----- STAGE 1: ESTABLISHING INITIAL FIT -----------\n # ------------------------------------------------------------\n\n # Handle special case of very first run\n # Either by fitting one component (default) or by using `init_comps`\n # to initialise the EM fit.\n\n # Check if not provided with init comps or membs\n if (self.fit_pars['init_comps'] is None) and (self.fit_pars['init_memb_probs'] is None):\n # NaiveFit doesn't know how to blindly intiialise runs with ncomps > 1\n assert self.ncomps == 1, 'If no initialisation set, can only accept ncomp==1'\n # If no init conditions provided, assume all stars are members and begine\n # fit with 1 component.\n init_memb_probs = np.zeros((len(self.data_dict['means']),\n self.ncomps + self.fit_pars[\n 'use_background']))\n init_memb_probs[:, 0] = 1.\n # Otherwise, we must have been given an init_comps, or an init_memb_probs\n # to start things with\n else:\n log_message(msg='Initialising with init_comps or init_memb_probs with'\n '%i components'%self.ncomps, symbol='*', surround=True)\n pass\n\n log_message(msg='FITTING {} COMPONENT'.format(self.ncomps),\n symbol='*', surround=True)\n run_dir = self.rdir + '{}/'.format(self.ncomps)\n\n prev_result = self.run_em_unless_loadable(run_dir)\n prev_score = self.calc_score(prev_result['comps'], prev_result['memb_probs'])\n\n self.ncomps += 1\n\n # ------------------------------------------------------------\n # ----- STAGE 2: EXPLORE EXTRA COMPONENT BY DECOMPOSITION --\n # ------------------------------------------------------------\n\n # Calculate global score of fit for comparison with future fits with different\n # component counts\n\n # Begin iterative loop, each time trialing the incorporation of a new component\n while self.ncomps <= self.fit_pars['max_comp_count']:\n log_message(msg='FITTING {} COMPONENT'.format(self.ncomps),\n symbol='*', surround=True)\n\n all_results = []\n all_scores = []\n\n # Iteratively try subdividing each previous component\n # target_comp is the component we will split into two.\n # This will make a total of ncomps (the target comp split into 2,\n # plus the remaining components from prev_result['comps']\n for i, target_comp in enumerate(prev_result['comps']):\n div_label = chr(ord('A') + i)\n run_dir = self.rdir + '{}/{}/'.format(self.ncomps, div_label)\n log_message(msg='Subdividing stage {}'.format(div_label),\n symbol='+', surround=True)\n mkpath(run_dir)\n\n self.fit_pars['init_comps'] = self.build_init_comps(\n prev_result['comps'], split_comp_ix=i,\n prev_med_and_spans=prev_result['med_and_spans'],\n memb_probs = prev_result['memb_probs'])\n\n result = self.run_em_unless_loadable(run_dir)\n all_results.append(result)\n\n score = self.calc_score(result['comps'], result['memb_probs'])\n all_scores.append(score)\n\n logging.info(\n 'Decomposition {} finished with \\nBIC: {}\\nlnlike: {}\\n'\n 'lnpost: {}'.format(\n div_label, all_scores[-1]['bic'],\n all_scores[-1]['lnlike'], all_scores[-1]['lnpost'],\n ))\n\n # identify the best performing decomposition\n all_bics = [score['bic'] for score in all_scores]\n best_split_ix = np.nanargmin(all_bics)\n\n new_result = all_results[best_split_ix]\n new_score = all_scores[best_split_ix]\n\n self.iter_end_log(best_split_ix, prev_result=prev_result, new_result=new_result)\n\n # Check if the fit has improved\n self.log_score_comparison(new=new_score,\n prev=prev_score)\n if new_score['bic'] < prev_score['bic']:\n prev_score = new_score\n prev_result = new_result\n\n self.ncomps += 1\n log_message(msg=\"Commencing {} component fit on {}{}\".format(\n self.ncomps, self.ncomps - 1,\n chr(ord('A') + best_split_ix)), symbol='+'\n )\n else:\n # WRITING THE FINAL RESULTS INTO FILES\n logging.info(\"... saving previous fit as best fit to data\")\n self.Component.store_raw_components(self.rdir + self.final_comps_file,\n prev_result['comps'])\n np.save(self.rdir + self.final_med_and_spans_file, prev_result['med_and_spans'])\n np.save(self.rdir + self.final_memb_probs_file, prev_result['memb_probs'])\n np.save(self.rdir + 'final_likelihood_post_and_bic',\n prev_score)\n\n\n # Save components in fits file\n tabcomps = self.Component.convert_components_array_into_astropy_table(prev_result['comps'])\n \n if self.fit_pars['overwrite_fits']:\n tabcomps.write(os.path.join(self.rdir, 'final_comps_%d.fits'%len(prev_result['comps'])), overwrite=self.fit_pars['overwrite_fits'])\n else:\n filename_comps_fits_random = os.path.join(self.rdir, 'final_comps_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))\n tabcomps.write(filename_comps_fits_random, overwrite=self.fit_pars['overwrite_fits'])\n\n # Save membership fits file\n try:\n if self.fit_pars['overwrite_fits']:\n tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], os.path.join(self.rdir, 'final_memberships_%d.fits'%len(prev_result['comps'])), get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])\n else:\n filename_memb_probs_fits_random = os.path.join(self.rdir, 'final_memberships_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))\n tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], filename_memb_probs_fits_random, get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])\n except:\n logging.info(\"[WARNING] Couldn't print membership.fits file. Check column id.\")\n\n self.log_final_log(prev_result, prev_score)\n break\n\n logging.info(\"Best fit:\\n{}\".format(\n [group.get_pars() for group in prev_result['comps']]))\n\n if self.ncomps >= self.fit_pars['max_comp_count']:\n log_message(msg='REACHED MAX COMP LIMIT', symbol='+',\n surround=True)\n\n return prev_result, prev_score\n",
"\"\"\"\nPrimary entry point for preparing and converting data for\ninput into Chronostar\n\"\"\"\nfrom __future__ import print_function, division, unicode_literals\n\nfrom astropy.table import Table\nfrom datetime import datetime\nimport logging\nimport numpy as np\nimport os.path\n\nfrom . import tabletool\nfrom . import readparam\nfrom . import expectmax\n\nDEFAULT_PARS = {\n 'input_file':'',\n 'convert_astrometry':True,\n\n 'astr_main_colnames':None,\n 'astr_error_colnames':None,\n 'astr_corr_colnames':None,\n\n 'cart_main_colnames':None,\n 'cart_error_colnames':None,\n 'cart_corr_colnames':None,\n\n# 'modernise_colnames':False, # If table has \"historic\" column names,\n# # like dX, dY,... c_XY, .. etc\n# # then update to X_error, ... corr_X_Y\n\n 'apply_cart_cuts':False,\n 'cut_on_region':False,\n 'pos_margin':10.,\n 'vel_margin':2.,\n 'cut_ref_table':None,\n 'convert_ref_table':False,\n 'cut_assoc_name':None,\n 'cut_colname':None,\n 'cut_on_bounds':False,\n 'cut_bound_min':None,\n 'cut_bound_max':None,\n\n 'calc_overlaps':False,\n 'bg_ref_table':'',\n 'bg_main_colnames':None,\n 'bg_col_name':'background_log_overlap',\n 'par_log_file':'data_pars.log',\n\n 'overwrite_datafile':False,\n 'output_file':None,\n\n 'return_data_table':True,\n}\n\ndef get_region(ref_table, assoc_name=None,\n pos_margin=10., vel_margin=2.,\n scale_margin=None, mg_colname=None):\n \"\"\"\n Get a 6D box surrounding a set of stars identified in a fits table,\n with the entry `assoc_name` in the column `mg_colname`.\n\n Reference table should already have data in LSR-centric cartesian\n coordinates with colnames {'X', 'Y', ... 'W'}\n (as assumed by tabletool.build_data_dict), but does not need to\n have covariance matrix elements.\n\n Parameters\n ----------\n ref_table: str or astropy.table.Table {None}\n filename to fits table, e.g. the BANYAN reference table\n assoc_name: str {None}\n Name of the association as listed in the `mg_colname` column of\n provided table. If left as None, then all entries in table are\n used.\n E.g. if ref_table is the BANYAN, then accepted\n moving group names are:\n {'118 Tau', '32 Orionis', 'AB Doradus', 'Carina', 'Carina-Near',\n 'Columba', 'Coma Ber', 'Corona Australis', 'Hyades', 'IC 2391',\n 'IC 2602', 'Lower Centaurus-Crux', 'Octans', 'Platais 8',\n 'Pleiades', 'TW Hya', 'Taurus', 'Tucana-Horologium',\n 'Upper Centaurus Lupus', 'Upper CrA', 'Upper Scorpius',\n 'Ursa Major', 'beta Pictoris', 'chi{ 1 For (Alessi 13)',\n 'epsilon Cha', 'eta Cha', 'rho Ophiuci'}\n pos_margin: float {30.}\n Margin in position space around known members from which new candidate\n members are included\n vel_margin: float {5.}\n Margin in velocity space around known members from which new candidate\n members are included\n mg_colname: str {'Moving group'}\n The column name of the moving group\n\n Returns\n -------\n box_lower_bounds: [6] float array\n The lower bounds of the 6D box [X,Y,Z,U,V,W]\n box_upper_bounds: [6] float array\n The upper bounds of the 6D box [X,Y,Z,U,V,W]\n \"\"\"\n\n ## Commenting this out for now. Usage by me (Tim) should be the same\n ## as everyone else. i.e. no hardcoded filenames for convenience.\n # if gagne_reference_data is None:\n # gagne_reference_data =\\\n # '/home/tcrun/chronostar/data/gagne_bonafide_full_kinematics_with_lit_and_best_radial_velocity' \\\n # '_comb_binars_with_banyan_radec.fits'\n\n if mg_colname is None:\n mg_colname = 'Moving group'\n\n # If reference table is provided as str, convert to table\n if type(ref_table) is str:\n ref_table = Table.read(ref_table)\n\n # Extract all stars\n if assoc_name is None:\n subtable = ref_table\n else:\n if assoc_name not in set(ref_table[mg_colname]):\n raise UserWarning(\n 'Association name must be one of:\\n{}\\nReceived: \"{}\"'.format(\n list(set(ref_table[mg_colname])), assoc_name\n ))\n subtable = ref_table[np.where(ref_table[mg_colname] == assoc_name)]\n logging.info('Initial membership list has {} members'.format(len(subtable)))\n\n star_means = tabletool.build_data_dict_from_table(subtable, only_means=True,\n cartesian=True)\n\n data_upper_bound = np.nanmax(star_means, axis=0)\n data_lower_bound = np.nanmin(star_means, axis=0)\n logging.info('Stars span from {} to {}'.format(\n np.round(data_lower_bound),\n np.round(data_upper_bound)\n ))\n\n # First try and scale box margins by provided scale margin.\n # scale_margin of 1 would double total span (1 + 1)\n if scale_margin is not None:\n data_span = data_upper_bound - data_lower_bound\n box_margin = 0.5 * scale_margin * data_span\n\n # Set up boundaries of box that span double the association\n box_lower_bound = data_lower_bound - box_margin\n box_upper_bound = data_upper_bound + box_margin\n\n # Set margin based on provided (or default) constant amounts\n else:\n data_margin = np.array(3*[pos_margin] + 3*[vel_margin])\n box_lower_bound = data_lower_bound - data_margin\n box_upper_bound = data_upper_bound + data_margin\n\n logging.info('Range extended.\\nLower: {}\\nUpper: {}'.format(\n np.round(box_lower_bound),\n np.round(box_upper_bound)\n ))\n\n return box_lower_bound, box_upper_bound\n\n\ndef prepare_data(custom_pars):\n \"\"\"\n Entry point for complete data preparation.\n\n Set parameters as desired in either a .par file or as a dictionary.\n This function will go through the main data file (as named in\n `data_pars`) and apply all requested data conversions.\n\n Key functionalities include:\n - convert astrometry measurements, errors and (opt.) correlations\n into cartesian means, standard devs and correlations\n - calculate background overlaps of stars with typical Gaia 6D\n phase-space densities\n - Apply a phase-space data cut based on a cartesian region\n\n Parameters\n ----------\n data_pars : dict -or- filename\n Parameters that govern behaviour of prepare_data. Exhaustive\n list of options is included in main README.md.\n data_pars can be provided as a dictionary, or as a filename with\n [key] = [value] pairs, to be parsed by `readparam.readParam`\n\n Returns\n -------\n data_table [opt.]: astropy.Table object\n\n Notes\n -----\n TODO: update background overlaps to allow for multiprocessing\n TODO: test functionality of overlap calculations\n TODO: Implement initialising synethetic datasets?\n TODO: Implement various input checks\n TODO: Allow for checkpoint saves after each stage\n TODO: Add a logging.log output\n \"\"\"\n if type(custom_pars) is str:\n custom_pars = readparam.readParam(custom_pars, default_pars=DEFAULT_PARS)\n data_pars = dict(DEFAULT_PARS)\n data_pars.update(custom_pars)\n\n readparam.log_used_pars(data_pars, default_pars=DEFAULT_PARS)\n\n # Input quality checks\n # --------------------------------------------------\n # -- INPUT PARAMETER QUALITY CHECKS --------------\n # --------------------------------------------------\n # Check if output_file is viable\n if data_pars['output_file'] == '' or data_pars['output_file'] is None:\n logging.info('No output file provided, so result will not be '\n 'saved to file.')\n if not data_pars['return_data_table']:\n raise UserWarning('Neither an output file was provided, nor '\n '`return_data_table` set to True. No way to '\n 'return the result!')\n # If output_file is provided, but overwrite is not set, ensure file\n # doesn't already exist.\n elif (not data_pars['overwrite_datafile'] and\n os.path.isfile(data_pars['output_file'])):\n raise UserWarning('Output file exists, yet you have not set'\n ' `overwrite_data = True` in the input parameters.')\n\n # Prevent users from overwriting an input file if data cuts are\n # being applied. Note: if future cuts are implemented, extend this\n # condition\n if data_pars['apply_cart_cuts']:\n try:\n assert data_pars['input_file'] != data_pars['output_file']\n except:\n raise UserWarning('You have set `input_file` to be the same as '\n '`output_file`, but are applying data cuts. '\n 'This would overwrite your original data file '\n 'with a subset. For safety, choose a different '\n 'output file name.')\n\n # If applying cartesian cuts, ensure either cut_on_region or\n # cut_on_bounds has necessary parameters set.\n if data_pars['apply_cart_cuts']:\n if data_pars['cut_on_region']:\n try:\n if type(data_pars['cut_ref_table']) is str:\n assert os.path.isfile(data_pars['cut_ref_table'])\n elif type(data_pars['cut_ref_table']) == Table:\n pass\n else:\n raise TypeError\n except (AssertionError, TypeError):\n raise UserWarning('You have set `cut_on_region`, but there is'\n ' an issue with the provided table '\n ' cut_ref_table`.')\n elif data_pars['cut_on_bounds']:\n print('here')\n try:\n assert len(data_pars['cut_bound_min']) == 6\n assert len(data_pars['cut_bound_max']) == 6\n except (TypeError, AssertionError):\n raise UserWarning('You have set `cut_on_bounds` yet there'\n ' is an issue with your provided cartesian'\n ' boundaries.')\n else:\n UserWarning('If setting `apply_cart_cuts` to True, then'\n ' either `cut_on_region` or `cut_on_bounds` must'\n ' also be set.')\n\n\n # If calculating background, ensure all required info is provided\n if data_pars['calc_overlaps']:\n try:\n if type(data_pars['bg_ref_table']) is str:\n assert os.path.isfile(data_pars['bg_ref_table'])\n else:\n assert type(data_pars['bg_ref_table']) is Table\n except (AssertionError, TypeError):\n raise UserWarning('You have set `calc_overlaps`, but there is'\n ' an issue with the provided table '\n ' bg_ref_table`.')\n\n # Establish what column names are\n try:\n data_table = Table.read(data_pars['input_file'])\n except AttributeError:\n data_table = data_pars['input_file']\n\n if data_pars['convert_astrometry']:\n # --------------------------------------------------\n # -- CONVERT ASTROMETRY INTO CARTESIAN -----------\n # --------------------------------------------------\n data_table = tabletool.convert_table_astro2cart(\n table=data_table,\n astr_main_colnames=data_pars['astr_main_colnames'],\n astr_error_colnames=data_pars['astr_error_colnames'],\n astr_corr_colnames=data_pars['astr_corr_colnames'],\n cart_main_colnames=data_pars['cart_main_colnames'],\n cart_error_colnames=data_pars['cart_error_colnames'],\n cart_corr_colnames=data_pars['cart_corr_colnames'],\n return_table=True,\n )\n\n\n if data_pars['apply_cart_cuts']:\n # --------------------------------------------------\n # -- APPLY DATA CUTS IN CARTESIAN SPACE ----------\n # --------------------------------------------------\n # First try and form region around a subset of reference\n # stars.\n if data_pars['cut_on_region']:\n bounds_min, bounds_max = get_region(\n ref_table=data_pars['cut_ref_table'],\n assoc_name=data_pars['cut_assoc_name'],\n mg_colname=data_pars['cut_colname'],\n pos_margin=data_pars['pos_margin'],\n vel_margin=data_pars['vel_margin'],\n )\n # Otherwise, use some mins and maxs from the pars file\n elif data_pars['cut_on_bounds']:\n bounds_min = np.array(data_pars['cut_bound_min'])\n bounds_max = np.array(data_pars['cut_bound_max'])\n else:\n UserWarning('If setting `apply_cart_cuts` to True, then'\n ' either `cut_on_region` or `cut_on_bounds` must'\n ' also be set.')\n\n input_means = tabletool.build_data_dict_from_table(\n table=data_table,\n main_colnames=data_pars['cart_main_colnames'],\n only_means=True,\n )\n cart_cut_mask = np.where(\n np.all(input_means > bounds_min, axis=1)\n & np.all(input_means < bounds_max, axis=1)\n )\n data_table = data_table[cart_cut_mask]\n\n\n if data_pars['calc_overlaps']:\n # --------------------------------------------------\n # -- CALCULATE BACKGROUND OVERLAPS ---------------\n # --------------------------------------------------\n # Only accessing the main column names\n bg_star_means = tabletool.build_data_dict_from_table(\n table=data_pars['bg_ref_table'],\n main_colnames=data_pars['bg_main_colnames'],\n only_means=True,\n )\n input_data_dict = tabletool.build_data_dict_from_table(\n table=data_table,\n main_colnames=data_pars['cart_main_colnames'],\n error_colnames=data_pars['cart_error_colnames'],\n corr_colnames=data_pars['cart_corr_colnames'],\n )\n\n #TODO: A parallelised version of this exists, incorporate it?\n #TODO: Check database for precomputed bgoverlaps\n ln_bg_ols = expectmax.get_background_overlaps_with_covariances(\n background_means=bg_star_means,\n star_means=input_data_dict['means'],\n star_covs=input_data_dict['covs'],\n )\n\n tabletool.insert_column(table=data_table,\n col_data=ln_bg_ols,\n col_name=data_pars['bg_col_name'],\n )\n\n # Store output. Since this everything above is so computationally\n # expensive, if writing to the prescribed output fails, make sure\n # the result is stored somewhere.\n try:\n data_table.write(data_pars['output_file'],\n overwrite=data_pars['overwrite_datafile'])\n except:\n emergency_filename = 'emergency_data_save_{:.0f}.fits'.format(\n datetime.timestamp(datetime.now())\n )\n data_table.write(emergency_filename, overwrite=True)\n print(\"COULDN'T SAVE TO DESIGNATED OUTPUT FILE.\\n\"\n \"Managed an emergency save to {}\".format(emergency_filename))\n\n if data_pars['return_data_table']:\n return data_table\n\n"
] |
[
[
"numpy.logical_not",
"numpy.logical_or",
"numpy.array",
"numpy.isnan",
"numpy.triu_indices",
"numpy.diagonal",
"numpy.eye",
"numpy.where",
"numpy.einsum",
"numpy.vstack"
],
[
"numpy.round",
"numpy.load",
"numpy.save",
"numpy.argmax",
"numpy.all",
"numpy.nanargmin"
],
[
"numpy.array",
"numpy.round",
"numpy.nanmin",
"numpy.where",
"numpy.all",
"numpy.nanmax"
]
] |
Morisset/python-workshop
|
[
"ec8b0c4f08a24833e53a22f6b52566a08715c9d0"
] |
[
"Day_2_Software_engineering_best_practices/solutions/04_modules/spectra_analysis/regression.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nSpectra analysis utilities for regression\n\n\n\"\"\"\n\nimport numpy as np\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.pipeline import make_pipeline\n\nfrom .plotting import plot_regression\n \n\ndef regression_experiment(X_train, X_test, y_train, y_test):\n \"\"\"Perform regression experiment.\n \n Build a pipeline using PCA and either a Ridge\n or a RandomForestRegressor model.\n \n Parameters\n ----------\n X_train : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing training Raman spectra.\n \n X_test : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing testing Raman spectra.\n \n y_training : pandas Serie, shape (n_spectra,)\n Serie containing the training concentrations acting as targets.\n \n y_testing : pandas Serie, shape (n_spectra,)\n Serie containing the testing concentrations acting as targets.\n \n Returns\n -------\n None\n \n \"\"\"\n for reg in [RidgeCV(), RandomForestRegressor(random_state=0)]:\n pipeline = make_pipeline(PCA(n_components=100), reg)\n y_pred = pipeline.fit(X_train, y_train).predict(X_test)\n plot_regression(y_test, y_pred,\n 'Regression using {}'.format(reg.__class__.__name__))\n \n \ndef fit_params(data):\n \"\"\"Compute statistics for robustly scale data.\n \n Compute the median and the variance, i.e. the difference\n between the 75th and 25th percentiles.\n These statistics are used later to scale data.\n \n Parameters\n ----------\n data : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all Raman spectra.\n \n Returns\n -------\n median : ndarray, shape (n_freq_point,)\n Median for each wavelength.\n \n variance : ndarray, shape (n_freq_point,)\n Variance (difference between the 75th and 25th\n percentiles) for each wavelength.\n \n \"\"\"\n median = np.median(data, axis=0)\n percentile_25 = np.percentile(data, 25, axis=0)\n percentile_75 = np.percentile(data, 75, axis=0)\n return median, (percentile_75 - percentile_25)\n\n \ndef transform(data, median, var_25_75):\n \"\"\"Scale data using robust estimators.\n \n Scale the data by subtracting the median and dividing by the\n variance, i.e. the difference between the 75th and 25th percentiles.\n \n Parameters\n ----------\n data : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all Raman spectra.\n \n median : ndarray, shape (n_freq_point,)\n Median for each wavelength.\n \n var_25_75 : ndarray, shape (n_freq_point,)\n Variance (difference between the 75th and 25th\n percentiles) for each wavelength.\n \n Returns\n -------\n data_scaled : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all scaled Raman spectra.\n \n \"\"\"\n return (data - median) / var_25_75\n"
] |
[
[
"sklearn.linear_model.RidgeCV",
"numpy.median",
"numpy.percentile",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.decomposition.PCA"
]
] |
YukiHata-ITS/uda_c3_3D_Object_Detection
|
[
"f831f7098e0c37586199b1ef355f5880e4d6ee16"
] |
[
"student/objdet_detect.py"
] |
[
"# ---------------------------------------------------------------------\n# Project \"Track 3D-Objects Over Time\"\n# Copyright (C) 2020, Dr. Antje Muntzinger / Dr. Andreas Haja.\n#\n# Purpose of this file : Detect 3D objects in lidar point clouds using deep learning\n#\n# You should have received a copy of the Udacity license together with this program.\n#\n# https://www.udacity.com/course/self-driving-car-engineer-nanodegree--nd013\n# ----------------------------------------------------------------------\n#\n\n# general package imports\nimport numpy as np\nimport torch\nfrom easydict import EasyDict as edict\n\n# add project directory to python path to enable relative imports\nimport os\nimport sys\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\n# model-related\nfrom tools.objdet_models.resnet.models import fpn_resnet\nfrom tools.objdet_models.resnet.utils.evaluation_utils import decode, post_processing \n\nfrom tools.objdet_models.darknet.models.darknet2pytorch import Darknet as darknet\nfrom tools.objdet_models.darknet.utils.evaluation_utils import post_processing_v2\n\n#[S3_EX1 addition]\nimport argparse\nimport math\nfrom tools.objdet_models.resnet.utils.torch_utils import _sigmoid\n\ndef parse_test_configs(configs):\n parser = argparse.ArgumentParser(description='Testing config for the Implementation')\n parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',\n help='The name using for saving logs, models,...')\n parser.add_argument('-a', '--arch', type=str, default='fpn_resnet_18', metavar='ARCH',\n help='The name of the model architecture')\n parser.add_argument('--pretrained_path', type=str,\n default='../checkpoints/fpn_resnet_18/fpn_resnet_18_epoch_300.pth', metavar='PATH',\n help='the path of the pretrained checkpoint')\n parser.add_argument('--K', type=int, default=50,\n help='the number of top K')\n parser.add_argument('--no_cuda', action='store_true',\n help='If true, cuda is not used.')\n parser.add_argument('--gpu_idx', default=0, type=int,\n help='GPU index to use.')\n parser.add_argument('--num_samples', type=int, default=None,\n help='Take a subset of the dataset to run and debug')\n parser.add_argument('--num_workers', type=int, default=1,\n help='Number of threads for loading data')\n parser.add_argument('--batch_size', type=int, default=1,\n help='mini-batch size (default: 4)')\n parser.add_argument('--peak_thresh', type=float, default=0.2)\n parser.add_argument('--save_test_output', action='store_true',\n help='If true, the output image of the testing phase will be saved')\n parser.add_argument('--output_format', type=str, default='image', metavar='PATH',\n help='the type of the test output (support image or video)')\n parser.add_argument('--output_video_fn', type=str, default='out_fpn_resnet_18', metavar='PATH',\n help='the video filename if the output format is video')\n parser.add_argument('--output-width', type=int, default=608,\n help='the width of showing output, the height maybe vary')\n\n# configs = edict(vars(parser.parse_args()))\n \n configs.pin_memory = True\n configs.distributed = False # For testing on 1 GPU only\n\n configs.input_size = (608, 608)\n configs.hm_size = (152, 152)\n configs.down_ratio = 4\n configs.max_objects = 50\n\n configs.imagenet_pretrained = False\n configs.head_conv = 64\n configs.num_classes = 3\n configs.num_center_offset = 2\n configs.num_z = 1\n configs.num_dim = 3\n configs.num_direction = 2 # sin, cos\n\n configs.heads = {\n 'hm_cen': configs.num_classes,\n 'cen_offset': configs.num_center_offset,\n 'direction': configs.num_direction,\n 'z_coor': configs.num_z,\n 'dim': configs.num_dim\n }\n configs.num_input_features = 4\n\n ##### ビルドエラー解消のため追加\n configs.arch = 'fpn_resnet'\n configs.k = 50\n configs.conf_thresh = 0.5 # 不明\n configs.min_iou = 0.5 # 暫定\n #####\n\n ####################################################################\n ##############Dataset, Checkpoints, and results dir configs#########\n ####################################################################\n# configs.root_dir = '../'\n# configs.dataset_dir = os.path.join(configs.root_dir, 'dataset', 'kitti')\n\n# if configs.save_test_output:\n# configs.results_dir = os.path.join(configs.root_dir, 'results', configs.saved_fn)\n# make_folder(configs.results_dir)\n\n return configs\n\n# load model-related parameters into an edict\ndef load_configs_model(model_name='darknet', configs=None):\n\n # init config file, if none has been passed\n if configs==None:\n configs = edict() \n\n # get parent directory of this file to enable relative paths\n curr_path = os.path.dirname(os.path.realpath(__file__))\n parent_path = configs.model_path = os.path.abspath(os.path.join(curr_path, os.pardir)) \n \n # set parameters according to model type\n if model_name == \"darknet\":\n configs.model_path = os.path.join(parent_path, 'tools', 'objdet_models', 'darknet')\n configs.pretrained_filename = os.path.join(configs.model_path, 'pretrained', 'complex_yolov4_mse_loss.pth')\n configs.arch = 'darknet'\n configs.batch_size = 4\n configs.cfgfile = os.path.join(configs.model_path, 'config', 'complex_yolov4.cfg')\n configs.conf_thresh = 0.5\n configs.distributed = False\n configs.img_size = 608\n configs.nms_thresh = 0.4\n configs.num_samples = None\n configs.num_workers = 4\n configs.pin_memory = True\n configs.use_giou_loss = False\n\n elif model_name == 'fpn_resnet':\n ####### ID_S3_EX1-3 START ####### \n #######\n print(\"student task ID_S3_EX1-3\")\n\n configs = parse_test_configs(configs)\n configs.model_path = os.path.join(parent_path, 'tools', 'objdet_models', 'resnet')\n configs.pretrained_filename = os.path.join(configs.model_path, 'pretrained', 'fpn_resnet_18_epoch_300.pth')\n configs.pretrained_path = os.path.join(configs.model_path, 'pretrained', 'fpn_resnet_18_epoch_300.pth')\n \n #######\n ####### ID_S3_EX1-3 END ####### \n\n else:\n raise ValueError(\"Error: Invalid model name\")\n\n # GPU vs. CPU\n configs.no_cuda = True # if true, cuda is not used\n configs.gpu_idx = 0 # GPU index to use.\n configs.device = torch.device('cpu' if configs.no_cuda else 'cuda:{}'.format(configs.gpu_idx))\n\n return configs\n\n\n# load all object-detection parameters into an edict\ndef load_configs(model_name='fpn_resnet', configs=None):\n\n # init config file, if none has been passed\n if configs==None:\n configs = edict() \n\n # birds-eye view (bev) parameters\n configs.lim_x = [0, 50] # detection range in m\n configs.lim_y = [-25, 25]\n configs.lim_z = [-1, 3]\n configs.lim_r = [0, 1.0] # reflected lidar intensity\n configs.bev_width = 608 # pixel resolution of bev image\n configs.bev_height = 608 \n\n # add model-dependent parameters\n configs = load_configs_model(model_name, configs)\n\n # visualization parameters\n configs.output_width = 608 # width of result image (height may vary)\n configs.obj_colors = [[0, 255, 255], [0, 0, 255], [255, 0, 0]] # 'Pedestrian': 0, 'Car': 1, 'Cyclist': 2\n \n configs.min_iou = 0.5 # 暫定\n \n return configs\n\n\n# create model according to selected model type\ndef create_model(configs):\n\n # check for availability of model file\n assert os.path.isfile(configs.pretrained_filename), \"No file at {}\".format(configs.pretrained_filename)\n\n # create model depending on architecture name\n if (configs.arch == 'darknet') and (configs.cfgfile is not None):\n print('using darknet')\n model = darknet(cfgfile=configs.cfgfile, use_giou_loss=configs.use_giou_loss) \n \n elif 'fpn_resnet' in configs.arch:\n print('using ResNet architecture with feature pyramid')\n \n ####### ID_S3_EX1-4 START ####### \n #######\n print(\"student task ID_S3_EX1-4\")\n ##### ファイル(file) :SFA3D/blob/master/sfa/models/fpn_resnet.py \n ##### 関数(function) :get_pose_net(num_layers, heads, head_conv, imagenet_pretrained), return: model [type:class'PoseResNet']\n num_layers = 18 # ファイル名から\n model = fpn_resnet.get_pose_net(num_layers, configs.heads, configs.head_conv, configs.imagenet_pretrained) \n\n #######\n ####### ID_S3_EX1-4 END ####### \n \n else:\n assert False, 'Undefined model backbone'\n\n # load model weights\n model.load_state_dict(torch.load(configs.pretrained_filename, map_location='cpu'))\n print('Loaded weights from {}\\n'.format(configs.pretrained_filename))\n\n # set model to evaluation state\n configs.device = torch.device('cpu' if configs.no_cuda else 'cuda:{}'.format(configs.gpu_idx))\n model = model.to(device=configs.device) # load model to either cpu or gpu\n model.eval() \n\n return model\n\n# detect trained objects in birds-eye view\n# 鳥瞰図で訓練されたオブジェクトを検出する\ndef detect_objects(input_bev_maps, model, configs):\n\n # deactivate autograd engine during test to reduce memory usage and speed up computations\n # テスト中にautogradエンジンを非アクティブ化して、メモリ使用量を減らし、計算を高速化します\n with torch.no_grad(): \n\n # perform inference\n # 推論を実行する\n outputs = model(input_bev_maps)\n\n # decode model output into target object format\n # モデル出力をターゲットオブジェクト形式にデコード\n if 'darknet' in configs.arch:\n\n # perform post-processing\n # 後処理を行う\n output_post = post_processing_v2(outputs, conf_thresh=configs.conf_thresh, nms_thresh=configs.nms_thresh) \n detections = []\n for sample_i in range(len(output_post)):\n if output_post[sample_i] is None:\n continue\n detection = output_post[sample_i]\n for obj in detection:\n x, y, w, l, im, re, _, _, _ = obj\n yaw = np.arctan2(im, re)\n detections.append([1, x, y, 0.0, 1.50, w, l, yaw]) \n\n elif 'fpn_resnet' in configs.arch:\n # decode output and perform post-processing\n \n ####### ID_S3_EX1-5 START ####### \n #######\n print(\"student task ID_S3_EX1-5\")\n ##### 参考元:SFA3D/blob/master/sfa/test.py, 134行目\n outputs['hm_cen'] = _sigmoid(outputs['hm_cen'])\n outputs['cen_offset'] = _sigmoid(outputs['cen_offset'])\n # detections size (batch_size, K, 10)\n detections = decode(outputs['hm_cen'], outputs['cen_offset'], outputs['direction'], outputs['z_coor'],\n outputs['dim'], K=configs.k)\n detections = detections.cpu().numpy().astype(np.float32)\n# detections = post_processing(detections, configs.num_classes, configs.down_ratio, configs.peak_thresh)\n detections = post_processing(detections, configs)\n\n detections = detections[0][1] \n #######\n ####### ID_S3_EX1-5 END ####### \n\n ####### ID_S3_EX2 START ####### \n #######\n # Extract 3d bounding boxes from model response\n # モデル応答から3Dバウンディングボックスを抽出します\n ##### detectionのフォーマット:[1, x, y, z, h, w, l, yaw] 最初の1 は TYPE_VEHICLE\n print(\"student task ID_S3_EX2\")\n objects = [] \n \n ## step 1 : check whether there are any detections\n ## ステップ1:検出があるかどうかを確認します\n print(\"detections\",detections)\n if detections[1] is not None:\n \n ## step 2 : loop over all detections\n ## ステップ2:すべての検出をループします\n for a_detection in detections:\n _, bev_x, bev_y, z, h, bev_w, bev_l, yaw = a_detection\n \n ## step 3 : perform the conversion using the limits for x, y and z set in the configs structure\n ## ステップ3:configs構造で設定されたx、y、zの制限を使用して変換を実行します\n x = bev_y / configs.bev_height * (configs.lim_x[1] - configs.lim_x[0])\n y = bev_x / configs.bev_width * (configs.lim_y[1] - configs.lim_y[0]) - (configs.lim_y[1] - configs.lim_y[0])/2.0 \n w = bev_w / configs.bev_width * (configs.lim_y[1] - configs.lim_y[0]) \n l = bev_l / configs.bev_height * (configs.lim_x[1] - configs.lim_x[0])\n\n ## step 4 : append the current object to the 'objects' array\n ## ステップ4:現在のオブジェクトを 'objects'配列に追加します\n objects.append([1, x, y, z, h, w, l, yaw])\n \n #######\n ####### ID_S3_EX2 START ####### \n \n return objects \n\n"
] |
[
[
"torch.no_grad",
"numpy.arctan2",
"torch.load"
]
] |
joesingo/tom_education
|
[
"9bf9ea3d465f83040e4618ce89efbab2a087b2fa"
] |
[
"tom_education/tests.py"
] |
[
"from datetime import datetime\nfrom io import BytesIO, StringIO\nimport json\nimport os\nfrom unittest.mock import patch\nimport tempfile\n\nfrom astropy.io import fits\nfrom django import forms\nfrom django.core import mail\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.uploadedfile import File\nfrom django.core.management import call_command\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models.query import QuerySet\nfrom django.urls import reverse\nfrom django.test import SimpleTestCase, TestCase, override_settings\nfrom django.contrib.auth.models import User\nfrom guardian.shortcuts import assign_perm\nimport imageio\nimport numpy as np\nfrom tom_dataproducts.models import DataProduct, ReducedDatum, DataProductGroup\nfrom tom_targets.models import Target\nfrom tom_observations.models import ObservationRecord\nfrom tom_observations.tests.factories import ObservingRecordFactory\nfrom tom_observations.tests.utils import FakeFacility, FakeFacilityForm\n\nfrom tom_education.forms import DataProductActionForm, GalleryForm\nfrom tom_education.facilities import EducationLCOForm\nfrom tom_education.models import (\n ASYNC_STATUS_CREATED,\n ASYNC_STATUS_FAILED,\n ASYNC_STATUS_PENDING,\n AsyncError,\n AsyncProcess,\n crop_image,\n InvalidPipelineError,\n ObservationAlert,\n ObservationTemplate,\n PipelineProcess,\n PipelineOutput,\n TIMELAPSE_GIF,\n TIMELAPSE_MP4,\n TIMELAPSE_WEBM,\n TimelapsePipeline,\n)\nfrom tom_education.templatetags.tom_education_extras import dataproduct_selection_buttons\nfrom tom_education.tasks import run_pipeline\n\n\nclass FakeTemplateFacilityForm(FakeFacilityForm):\n # Add some extra fields so we can check that the correct field is used as\n # the identifier\n extra_field = forms.CharField()\n another_extra_field = forms.IntegerField()\n\n def get_extra_context(self):\n return {'extra_variable_from_form': 'hello'}\n\n\nclass FakeTemplateFacility(FakeFacility):\n name = 'TemplateFake'\n\n def get_form(self, *args):\n return FakeTemplateFacilityForm\n\n\nclass AnotherFakeFacility(FakeFacility):\n name = 'AnotherFake'\n\n def get_form(self, *args):\n return FakeTemplateFacilityForm\n\n\nFAKE_FACILITIES = [\n 'tom_education.tests.FakeTemplateFacility',\n 'tom_education.tests.AnotherFakeFacility',\n]\n\n\ndef write_fits_image_file(data, date=None):\n primary_hdu = fits.PrimaryHDU()\n primary_hdu.header['XTENSION'] = 'IMAGE'\n if date:\n primary_hdu.header['DATE-OBS'] = date.isoformat()\n img = fits.hdu.compressed.CompImageHDU(data)\n hdul = fits.HDUList([primary_hdu, img])\n buf = BytesIO()\n hdul.writeto(buf)\n return buf\n\n\nclass TestDataHandler:\n \"\"\"\n Small class to handle creating and deleting temporary directories to use as\n the `MEDIA_ROOT` setting for data files created during tests.\n\n Provides a `media_root` property which returns the path to the temp dir.\n \"\"\"\n def __init__(self):\n self.tmpdir = None\n self.create()\n\n def create(self):\n if not self.tmpdir:\n self.tmpdir = tempfile.TemporaryDirectory()\n\n def delete(self):\n self.tmpdir.cleanup()\n self.tmpdir = None\n\n @property\n def media_root(self):\n return self.tmpdir.name\n\n\nTEST_DATA_HANDLER = TestDataHandler()\n\n\n@override_settings(MEDIA_ROOT=TEST_DATA_HANDLER.media_root)\nclass TomEducationTestCase(TestCase):\n \"\"\"\n Base class for tom_education tests\n \"\"\"\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n TEST_DATA_HANDLER.create()\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n TEST_DATA_HANDLER.delete()\n\n\n@override_settings(TOM_FACILITY_CLASSES=FAKE_FACILITIES)\n@patch('tom_education.models.ObservationTemplate.get_identifier_field', return_value='test_input')\n@patch('tom_education.views.TemplatedObservationCreateView.supported_facilities', ('TemplateFake',))\nclass ObservationTemplateTestCase(TomEducationTestCase):\n facility = 'TemplateFake'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create(username='someuser', password='somepass', is_staff=True)\n cls.non_staff = User.objects.create(username='another', password='aaa')\n cls.target = Target.objects.create(name='my target')\n\n def setUp(self):\n super().setUp()\n self.client.force_login(self.user)\n\n def get_base_url(self, facility=None):\n # Return URL for create form without target ID\n facility = facility or self.facility\n return reverse('tom_education:create_obs', kwargs={'facility': facility})\n\n def get_url(self, target, facility=None):\n return '{}?target_id={}'.format(self.get_base_url(facility), target.pk)\n\n def test_existing_templates_shown(self, mock):\n template = ObservationTemplate.objects.create(\n name='mytemplate',\n target=self.target,\n facility=self.facility,\n fields='{\"one\": \"two\"}'\n )\n # Make another template for a different facility: should not be shown\n template2 = ObservationTemplate.objects.create(\n name='other-template',\n target=self.target,\n facility='made up facility',\n fields='{\"1\": \"2\"}'\n )\n\n response = self.client.get(self.get_url(self.target))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn(b'mytemplate', response.content)\n self.assertNotIn(b'other-template', response.content)\n\n # Go to create page for a different target: template should not be\n # shown\n target2 = Target.objects.create(name='another')\n response2 = self.client.get(self.get_url(target2))\n self.assertEqual(response2.status_code, 200)\n self.assertNotIn(b'mytemplate', response2.content)\n\n def test_create_button(self, mock):\n response = self.client.get(self.get_url(self.target))\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'create-template', response.content)\n self.assertIn(b'Create new template', response.content)\n\n # Should not be present if facility not supported\n response2 = self.client.get(self.get_url(self.target, facility='AnotherFake'))\n self.assertEqual(response2.status_code, 200)\n self.assertNotIn(b'create-template', response2.content)\n self.assertNotIn(b'Create new template', response2.content)\n\n # Should not be present as non-staff\n self.client.force_login(self.non_staff)\n response3 = self.client.get(self.get_url(self.target))\n self.assertEqual(response3.status_code, 200)\n self.assertNotIn(b'create-template', response3.content)\n self.assertNotIn(b'Create new template', response3.content)\n\n def test_create_template(self, mock):\n self.assertEqual(ObservationTemplate.objects.all().count(), 0)\n fields = {\n 'test_input': 'some-name',\n 'extra_field': 'this is some extra text',\n 'another_extra_field': 4,\n 'target_id': self.target.pk,\n 'facility': self.facility,\n 'observation_type': '',\n }\n post_params = dict(fields, **{\n 'create-template': 'yes please'\n })\n\n # Should not be able to POST as non-staff user\n self.client.force_login(self.non_staff)\n response = self.client.post(self.get_base_url(), post_params)\n self.assertEqual(response.status_code, 403)\n\n # Should not be able to POST if facility is not supported\n self.client.force_login(self.user)\n wrong_facility = dict(post_params, facility='AnotherFake')\n response2 = self.client.post(self.get_base_url(facility='AnotherFake'), wrong_facility)\n self.assertEqual(response2.status_code, 403)\n\n # Should be able to create as staff user for a valid facility\n response3 = self.client.post(self.get_base_url(), post_params)\n self.assertEqual(response3.status_code, 302)\n self.assertEqual(response3.url, self.get_url(self.target) + '&template_id=1')\n\n # ObservationTemplate object should have been created\n self.assertEqual(ObservationTemplate.objects.all().count(), 1)\n template = ObservationTemplate.objects.all()[0]\n\n self.assertEqual(template.name, 'some-name')\n self.assertEqual(template.target, self.target)\n self.assertEqual(template.facility, self.facility)\n self.assertEqual(json.loads(template.fields), fields)\n\n @patch('tom_education.models.observation_template.datetime')\n def test_instantiate_template(self, dt_mock, _):\n dt_mock.now.return_value = datetime(\n year=2019, month=1, day=2, hour=3, minute=4, second=5, microsecond=6\n )\n\n template = ObservationTemplate.objects.create(\n name='mytemplate',\n target=self.target,\n facility=self.facility,\n fields='{\"test_input\": \"mytemplate\", \"extra_field\": \"someextravalue\", '\n '\"another_extra_field\": 5}'\n )\n url = self.get_url(self.target) + '&template_id=' + str(template.pk)\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n initial = response.context['form'].initial\n self.assertEqual(initial['test_input'], 'mytemplate-2019-01-02-030405')\n self.assertEqual(initial['extra_field'], 'someextravalue')\n self.assertEqual(initial['another_extra_field'], 5)\n\n def test_extra_form_context(self, mock):\n response = self.client.get(self.get_url(self.target))\n self.assertIn('extra_variable_from_form', response.context)\n\n\n# The following test causes an error if run in a DB transaction since it\n# causes an IntegrityError, after which no more DB queries can be performed.\n# To work around this, put the test in its own SimpleTestCase so that no\n# transaction is used\n@override_settings(TOM_FACILITY_CLASSES=FAKE_FACILITIES)\n@patch('tom_education.models.ObservationTemplate.get_identifier_field', return_value='test_input')\n@patch('tom_education.views.TemplatedObservationCreateView.supported_facilities', ('TemplateFake',))\nclass InvalidObservationTemplateNameTestCase(SimpleTestCase):\n databases = '__all__'\n\n def setUp(self):\n super().setUp()\n\n def test_invalid_template_name(self, mock):\n user = User.objects.create(username='someuser', password='somepass', is_staff=True)\n self.client.force_login(user)\n\n target = Target.objects.create(name='my target')\n template = ObservationTemplate.objects.create(\n name=\"cool-template-name\",\n target=target,\n facility='TemplateFake',\n fields='...'\n )\n\n url = reverse('tom_education:create_obs', kwargs={'facility': 'TemplateFake'})\n response = self.client.post(url, {\n 'test_input': 'cool-template-name',\n 'extra_field': 'blah',\n 'another_extra_field': 4,\n 'target_id': target.pk,\n 'facility': 'TemplateFake',\n 'create-template': 'yep'\n })\n self.assertEqual(response.status_code, 200)\n\n err_msg = 'Template name \"cool-template-name\" already in use'\n self.assertIn(err_msg, response.context['form'].errors['__all__'])\n\n # Double check that no template was created\n temp_count = ObservationTemplate.objects.all().count()\n self.assertEqual(temp_count, 1)\n\n\n@override_settings(TOM_FACILITY_CLASSES=['tom_observations.tests.utils.FakeFacility'])\nclass DataProductTestCase(TomEducationTestCase):\n \"\"\"\n Class providing a setUpClass method which creates a target, observation\n record and several FITS data products\n \"\"\"\n # Shape for dummy FITS files created in setUpClass\n test_fits_shape = (500, 50)\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.target = Target.objects.create(name='my target')\n cls.observation_record = ObservingRecordFactory.create(\n target_id=cls.target.id,\n facility=FakeFacility.name,\n parameters='{}'\n )\n\n # Create some FITS image files and DataProducts from them\n cls.prods = []\n\n dates = [ # Note: dates are not in order\n datetime(year=2019, month=1, day=2, hour=3, minute=4),\n datetime(year=2019, month=1, day=2, hour=3, minute=5),\n datetime(year=2019, month=1, day=2, hour=3, minute=7),\n datetime(year=2019, month=1, day=2, hour=3, minute=6)\n ]\n # Create dummy image data. Make sure data is not constant to avoid\n # warnings from fits2image\n # TODO: consider using real fits files here...\n cls.image_data = np.ones(cls.test_fits_shape, dtype=np.float)\n cls.image_data[20, :] = np.linspace(1, 100, num=50)\n\n for i, date in enumerate(dates):\n product_id = 'test{}'.format(i)\n prod = DataProduct.objects.create(\n product_id=product_id,\n target=cls.target,\n observation_record=cls.observation_record,\n )\n buf = write_fits_image_file(cls.image_data, date)\n prod.data.save('{}.fits.fz'.format(product_id), File(buf), save=True)\n cls.prods.append(prod)\n\n # Save pk to class for convinience\n setattr(cls, 'pk{}'.format(i), str(prod.pk))\n\n\nclass TargetDataViewTestCase(DataProductTestCase):\n def setUp(self):\n super().setUp()\n self.user = User.objects.create_user(username='test', email='test@example.com')\n self.client.force_login(self.user)\n assign_perm('tom_targets.view_target', self.user, self.target)\n self.url = reverse('tom_education:target_data', kwargs={'pk': self.target.pk})\n\n def test_selection_buttons(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'Select all', response.content)\n self.assertIn(b'Select reduced', response.content)\n self.assertIn(b'Deselect all', response.content)\n\n def test_data_product_group_selection(self):\n group1 = DataProductGroup.objects.create(name='First group')\n group2 = DataProductGroup.objects.create(name='Second group')\n group3 = DataProductGroup.objects.create(name='Third group')\n self.prods[0].group.add(group1)\n self.prods[1].group.add(group1, group2)\n self.prods[2].group.add(group2)\n self.prods[0].save()\n self.prods[1].save()\n self.prods[2].save()\n\n # First test the inclusion tag which provides the list of groups for\n # the page\n ctx = {'target': self.target}\n self.assertNotIn('data_product_groups', dataproduct_selection_buttons(ctx, False))\n button_context = dataproduct_selection_buttons(ctx, True)\n self.assertIn('data_product_groups', button_context)\n # Group 3 should not be included, since no DPs for this target are part\n # of it\n self.assertEqual(len(button_context['data_product_groups']), 2)\n self.assertEqual(set(button_context['data_product_groups']), {group1, group2})\n\n # Test the view and the rendered HTML\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'Select group', response.content)\n self.assertIn(b'First group', response.content)\n self.assertIn(b'Second group', response.content)\n self.assertIn(b'dpgroup-1', response.content)\n self.assertIn(b'dpgroup-2', response.content)\n\n def test_null_product_id(self):\n \"\"\"\n The data product action buttons should still work with products with\n None product_id (unfortunately, such data products may exist -- e.g.\n ones created by a user uploading a file)\n \"\"\"\n dp1 = DataProduct.objects.create(product_id='hello', target=self.target)\n dp2 = DataProduct.objects.create(target=self.target)\n\n dp1.data.save('file1', File(BytesIO()))\n dp1.data.save('file2', File(BytesIO()))\n\n response = self.client.post(self.url, data={\n 'action': 'view_gallery',\n dp1.pk: 'on',\n dp2.pk: 'on'\n })\n\n\ndef mock_fits_to_jpg(inputfiles, outputfile, **kwargs):\n f = open(outputfile, 'wb')\n f.close()\n return True\n\n@override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={\n 'format': 'gif', 'fps': 10, 'size': 500, 'crop_scale': 0.5\n})\nclass TimelapseTestCase(DataProductTestCase):\n def setUp(self):\n super().setUp()\n self.user = User.objects.create_user(username='test', email='test@example.com')\n self.client.force_login(self.user)\n assign_perm('tom_targets.view_target', self.user, self.target)\n\n def create_timelapse_pipeline(self, products, **kwargs):\n pipeline = TimelapsePipeline.objects.create(\n identifier='test_{}'.format(datetime.now().isoformat()),\n target=self.target,\n **kwargs\n )\n pipeline.input_files.add(*products)\n pipeline.save()\n return pipeline\n\n # Methods to check a buffer for file signatures.\n # See https://www.garykessler.net/library/file_sigs.html\n def assert_gif_data(self, data):\n data.seek(0)\n self.assertEqual(data.read(6), b'GIF89a')\n\n def assert_mp4_data(self, data):\n data.seek(4)\n self.assertEqual(data.read(8), b'ftypisom')\n\n def assert_webm_data(self, data):\n data.seek(0)\n self.assertEqual(data.read(4), b'\\x1a\\x45\\xdf\\xa3')\n\n @override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': TIMELAPSE_GIF, 'fps': 16})\n @patch('tom_education.models.pipelines.datetime')\n def test_create_timelapse_form(self, dt_mock):\n \"\"\"\n Test the view and form, and check that the timelapse is created\n successfully\n \"\"\"\n d = datetime(\n year=2019, month=1, day=2, hour=3, minute=4, second=5, microsecond=6\n )\n dt_mock.now.return_value = d\n dt_mock.fromisoformat.return_value = d\n\n # GET page and check form is in the context\n url = reverse('tom_education:target_data', kwargs={'pk': self.target.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('dataproducts_form', response.context)\n self.assertIsInstance(response.context['dataproducts_form'], DataProductActionForm)\n\n pre_tlpipe_count = TimelapsePipeline.objects.count()\n self.assertEqual(pre_tlpipe_count, 0)\n self.assertFalse(DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0]).exists())\n\n # POST form\n response2 = self.client.post(url, {\n 'action': 'pipeline',\n 'pipeline_name': 'Timelapse',\n self.pk0: 'on',\n self.pk3: 'on',\n self.pk2: 'on',\n })\n # Should get JSON response\n self.assertEqual(response2.status_code, 200)\n self.assertEqual(response2.json(), {'ok': True})\n\n # TimelapsePipeline object should have been created\n post_tlpipe_count = TimelapsePipeline.objects.count()\n self.assertEqual(post_tlpipe_count, pre_tlpipe_count + 1)\n pipe = TimelapsePipeline.objects.last()\n self.assertIn('Processing frame 1/3', pipe.logs)\n self.assertIn('Processing frame 2/3', pipe.logs)\n self.assertIn('Processing frame 3/3', pipe.logs)\n\n # DataProduct with timelapse tag should have been created\n tls = DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0])\n self.assertTrue(tls.exists())\n dp = tls.first()\n\n # Check the fields are correct\n self.assertEqual(dp.target, self.target)\n self.assertEqual(dp.observation_record, None)\n self.assertEqual(dp.data_product_type, settings.DATA_PRODUCT_TYPES['timelapse'][0])\n expected_filename = 'tl_{}_20190102030405_t.gif'.format(self.target.pk)\n self.assertEqual(dp.product_id, expected_filename)\n self.assertTrue(os.path.basename(dp.data.name), expected_filename)\n\n # Check the timelapse data\n self.assert_gif_data(dp.data.file)\n\n def test_empty_form(self):\n form = DataProductActionForm(target=self.target, data={})\n self.assertFalse(form.is_valid())\n\n form2 = DataProductActionForm(target=self.target, data={'action': 'blah'})\n self.assertFalse(form2.is_valid())\n\n form3 = DataProductActionForm(target=self.target, data={self.pk0: 'on', 'action': 'blah'})\n self.assertTrue(form3.is_valid())\n\n def test_fits_file_sorting(self):\n correct_order = [self.prods[0], self.prods[1], self.prods[3], self.prods[2]]\n pipeline = self.create_timelapse_pipeline(self.prods)\n self.assertEqual(pipeline.sorted_frames(), correct_order)\n\n def test_multiple_observations(self):\n \"\"\"\n Should be able to create a timelapse of data from several observations\n \"\"\"\n other_obs = ObservingRecordFactory.create(\n target_id=self.target.id,\n facility=FakeFacility.name,\n parameters='{}'\n )\n other_obs_prod = DataProduct.objects.create(\n product_id='different observation',\n target=self.target,\n observation_record=other_obs,\n data=self.prods[0].data.name\n )\n pipeline = self.create_timelapse_pipeline([\n self.prods[0], self.prods[1], other_obs_prod\n ])\n pipeline.write_timelapse(BytesIO(), 'gif', 30, 500)\n\n def test_create_gif(self):\n pipeline = self.create_timelapse_pipeline(self.prods)\n buf = BytesIO()\n pipeline.write_timelapse(buf, fmt='gif')\n self.assert_gif_data(buf)\n\n # Check the number of frames is correct\n buf.seek(0)\n frames = imageio.mimread(buf)\n self.assertEqual(len(frames), len(self.prods))\n # Check the size of the first frame\n self.assertEqual(frames[0].shape, self.image_data.shape)\n\n def test_create_mp4(self):\n pipeline = self.create_timelapse_pipeline(self.prods)\n buf = BytesIO()\n pipeline.write_timelapse(buf, fmt='mp4')\n self.assert_mp4_data(buf)\n buf.seek(0)\n # Load and check the mp4 with imageio\n frames = imageio.mimread(buf, format='mp4')\n self.assertEqual(len(frames), len(self.prods))\n\n def test_create_webm(self):\n pipeline = self.create_timelapse_pipeline(self.prods)\n buf = BytesIO()\n pipeline.write_timelapse(buf, fmt='webm')\n buf.seek(0)\n self.assert_webm_data(buf)\n\n def test_invalid_fps(self):\n invalid_fpses = (0, -1)\n for fps in invalid_fpses:\n with self.settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'fps': fps}):\n pip = self.create_timelapse_pipeline(self.prods)\n with self.assertRaises(AsyncError):\n pip.run()\n\n @patch('tom_education.models.TimelapsePipeline.FITS_DATE_FIELD', new='hello')\n def test_no_observation_date_view(self):\n \"\"\"\n Check we get the expected error when a FITS file does not contain the\n header for the date of the observation. This is achieved by patching\n the field name and setting it to 'hello'\n \"\"\"\n pipeline = self.create_timelapse_pipeline(self.prods)\n with self.assertRaises(AsyncError):\n pipeline.write_timelapse(BytesIO())\n\n @override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': TIMELAPSE_GIF, 'fps': 16})\n def test_run_pipeline_wrapper(self):\n pipeline = self.create_timelapse_pipeline(self.prods)\n # Cause an 'expected' error by patching date field: should get proper\n # failure message\n with patch('tom_education.models.TimelapsePipeline.FITS_DATE_FIELD', new='hello') as _mock:\n run_pipeline(pipeline.pk, 'Timelapse')\n pipeline.refresh_from_db()\n self.assertEqual(pipeline.status, ASYNC_STATUS_FAILED)\n self.assertTrue(isinstance(pipeline.failure_message, str))\n self.assertIn('could not find observation date', pipeline.failure_message)\n\n # Cause an 'unexpected' error: should get generic failure message\n pipeline2 = self.create_timelapse_pipeline(self.prods)\n with patch('tom_education.models.timelapse.imageio', new='hello') as _mock:\n run_pipeline(pipeline2.pk, 'Timelapse')\n pipeline2.refresh_from_db()\n self.assertEqual(pipeline2.status, ASYNC_STATUS_FAILED)\n self.assertTrue(isinstance(pipeline2.failure_message, str))\n self.assertEqual(pipeline2.failure_message, 'An unexpected error occurred')\n\n # Create a timelapse successfully\n pipeline3 = self.create_timelapse_pipeline(self.prods)\n run_pipeline(pipeline3.pk, 'Timelapse')\n pipeline3.refresh_from_db()\n self.assertEqual(pipeline3.status, ASYNC_STATUS_CREATED)\n self.assertTrue(pipeline3.group)\n dps = pipeline3.group.dataproduct_set.all()\n self.assertTrue(dps.exists())\n self.assert_gif_data(dps.first().data.file)\n\n @override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': TIMELAPSE_GIF, 'fps': 16},\n TOM_EDUCATION_TIMELAPSE_GROUP_NAME='timelapsey')\n def test_management_command(self):\n pre_tlpipe_count = TimelapsePipeline.objects.count()\n self.assertEqual(pre_tlpipe_count, 0)\n\n # Make first 3 products in timelapse group, but have the third one a\n # raw file\n group = DataProductGroup.objects.create(name='timelapsey')\n for prod in self.prods[:3]:\n prod.group.add(group)\n prod.save()\n rawfilename = 'somerawfile_{}.e00.fits.fz'.format(datetime.now().strftime('%s'))\n self.prods[2].data.save(rawfilename, File(BytesIO()), save=True)\n\n # Make a product in the group but for a different target: it should\n # not be included in the timelapse\n other_target = Target.objects.create(name='someothertarget')\n other_prod = DataProduct.objects.create(product_id='someotherproduct', target=other_target)\n other_prod.group.add(group)\n other_prod.save()\n\n buf = StringIO()\n call_command('create_timelapse', self.target.pk, stdout=buf)\n\n # Check timelapse pipeline object created\n post_tlpipe_count = TimelapsePipeline.objects.count()\n self.assertEqual(post_tlpipe_count, pre_tlpipe_count + 1)\n\n # Check fields in the pipeline look correct\n pipe = TimelapsePipeline.objects.first()\n self.assertEqual(pipe.target, self.target)\n self.assertEqual(set(pipe.input_files.all()), set(self.prods[:2]))\n\n # Check the timelapse itself\n tls = DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0])\n self.assertEqual(tls.count(), 1)\n tl = tls.first()\n self.assert_gif_data(tl.data.file)\n\n # Check the command output\n output = buf.getvalue()\n self.assertTrue(\"Creating timelapse of 2 files for target 'my target'...\" in output)\n self.assertTrue('Created timelapse' in output)\n\n @patch('tom_education.models.timelapse.TimelapsePipeline.FITS_DATE_FIELD', 'hello')\n @override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': TIMELAPSE_GIF, 'fps': 16},\n TOM_EDUCATION_TIMELAPSE_GROUP_NAME='timelapsey')\n def test_management_command_failure(self):\n group = DataProductGroup.objects.create(name='timelapsey')\n for prod in self.prods[:3]:\n prod.group.add(group)\n prod.save()\n\n buf = StringIO()\n call_command('create_timelapse', self.target.pk, stderr=buf)\n self.assertIn('could not find observation date', buf.getvalue())\n\n def test_management_command_no_dataproducts(self):\n buf = StringIO()\n call_command('create_timelapse', self.target.pk, stdout=buf)\n output = buf.getvalue()\n self.assertTrue('Nothing to do' in output, 'Output was: {}'.format(output))\n self.assertEqual(DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0]).count(), 0)\n # The timelapse group should have been created\n self.assertEqual(DataProductGroup.objects.count(), 1)\n\n @patch('tom_education.models.timelapse.normalise_background')\n @patch('tom_education.models.timelapse.imageio.imread', return_value=np.array([[0,0,0],[0,10,0]]))\n @patch('tom_education.models.timelapse.fits_to_jpg', mock_fits_to_jpg)\n def test_background_normalisation(self, im_mock, norm_mock):\n ## TODO: Don't really understand how this test avoid exception in fit2image\n pipeline = self.create_timelapse_pipeline(self.prods)\n\n # With processing, the normalisation method should be called for each\n # frame\n buf = BytesIO()\n pipeline.write_timelapse(buf, normalise_background=True)\n self.assertEqual(norm_mock.call_count, len(self.prods))\n\n # With processing disabled, it shouldn't be called any more times\n buf = BytesIO()\n pipeline.write_timelapse(buf, normalise_background=False)\n self.assertEqual(norm_mock.call_count, len(self.prods))\n\n @override_settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'crop_scale': 0.8})\n def test_timelapse_cropping(self):\n pipeline = self.create_timelapse_pipeline(self.prods)\n\n buf = BytesIO()\n pipeline.write_timelapse(buf, crop=True)\n buf.seek(0)\n frames = imageio.mimread(buf)\n # Check all frames are the same shape\n shape = frames[0].shape\n self.assertTrue(all(f.shape == shape for f in frames[1:]))\n # Check the shape is as expected\n self.assertEqual(\n shape,\n (int(0.8 * self.test_fits_shape[0]), int(0.8 * self.test_fits_shape[1]))\n )\n\n # Repeat of the above test but with cropping disabled: the shape of the\n # output frames should be identical to the shape of the inputs\n buf = BytesIO()\n pipeline.write_timelapse(buf, crop=False)\n buf.seek(0)\n frames = imageio.mimread(buf)\n shape = frames[0].shape\n self.assertTrue(all(f.shape == shape for f in frames[1:]))\n self.assertEqual(shape, self.test_fits_shape)\n\n def test_cropping(self):\n K = 0.5\n data = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, K, K, K, K, 0, 0, 0],\n [0, 0, 0, K, K, K, K, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype=np.float)\n buf = write_fits_image_file(data)\n buf.seek(0)\n # hdul = fits.open(buf)\n hdu, hdr = fits.getdata(buf, header=True)\n # For some reason (float errors?) the non-zero values are changed after\n # saving and reloading the FITS file. Get the 'new' K to compare the\n # cropped image with\n K2 = np.max(hdu)\n\n hdu,hdr = crop_image(hdu, hdr, scale=0.5)\n\n # Note that size of cropped image is not exactly half; it is off by one\n # due to rounding\n self.assertEqual(hdu.shape, (2, 4))\n self.assertTrue(np.all(hdu == np.full((2, 4), K2)), hdu)\n\n\nclass GalleryTestCase(TomEducationTestCase):\n def setUp(self):\n super().setUp()\n self.url = reverse('tom_education:gallery')\n self.target = Target.objects.create(name='my target')\n self.prods = []\n\n image_data = np.ones((500, 4), dtype=np.float)\n image_data[20, :] = np.array([10, 20, 30, 40])\n for i in range(4):\n product_id = 'test{}'.format(i)\n prod = DataProduct.objects.create(\n product_id=product_id,\n target=self.target\n )\n buf = write_fits_image_file(image_data)\n prod.data.save(product_id, File(buf), save=True)\n self.prods.append(prod)\n setattr(self, 'pk{}'.format(i), str(prod.pk))\n\n def test_no_products(self):\n response = self.client.get(self.url)\n self.assertIn('messages', response.context)\n messages = list(response.context['messages'])\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), 'No data products provided')\n self.assertNotIn('show_form', response.context)\n\n def test_context(self):\n pks = ','.join(map(str, [self.prods[0].pk, self.prods[2].pk]))\n response = self.client.get(self.url + '?product_pks={}'.format(pks))\n\n self.assertIn('form', response.context)\n form = response.context['form']\n self.assertTrue(isinstance(form, GalleryForm))\n self.assertEqual(form.product_pks, {str(self.prods[0].pk), str(self.prods[2].pk)})\n\n self.assertIn('product_pks', response.context)\n self.assertEqual(response.context['product_pks'], pks)\n self.assertIn('products', response.context)\n self.assertEqual(response.context['products'], {self.prods[0], self.prods[2]})\n\n def test_post(self):\n mygroup = DataProductGroup.objects.create(name='mygroup')\n\n response = self.client.post(self.url, {\n 'product_pks': ','.join([str(p.pk) for p in self.prods]),\n 'group': mygroup.pk,\n self.pk0: 'on',\n self.pk1: 'on',\n })\n\n # Products should have been added to the group\n for prod in self.prods[:2]:\n self.assertEqual(set(prod.group.all()), {mygroup})\n # Check no other products were added\n for prod in self.prods[2:]:\n self.assertEqual(set(prod.group.all()), set([]))\n\n # Should be redirected to group detail page\n self.assertEqual(response.status_code, 302)\n expected_url = '/dataproducts/data/group/{}/'.format(mygroup.pk)\n self.assertEqual(response.url, expected_url)\n # Check a success message is present\n response2 = self.client.get(expected_url)\n self.assertIn('messages', response2.context)\n messages = list(response2.context['messages'])\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), 'Added 2 data products to group \\'mygroup\\'')\n\n\nclass AsyncProcessTestCase(TomEducationTestCase):\n @patch('tom_education.models.async_process.datetime')\n def test_terminal_timestamp(self, dt_mock):\n somedate = datetime(\n year=2019, month=1, day=2, hour=3, minute=4, second=5, microsecond=6\n )\n dt_mock.now.return_value = somedate\n\n proc = AsyncProcess.objects.create(identifier='blah')\n self.assertTrue(proc.terminal_timestamp is None)\n\n # Timestamp should be set automatically when saving in a terminal state\n proc.status = ASYNC_STATUS_FAILED\n proc.save()\n self.assertEqual(proc.terminal_timestamp, somedate)\n\n\nclass AsyncStatusApiTestCase(TomEducationTestCase):\n @patch('django.utils.timezone.now')\n @patch('tom_education.models.async_process.datetime')\n @patch('tom_education.views.datetime')\n def test_api(self, views_dt_mock, models_dt_mock, django_mock):\n terminal_time = datetime(year=2019, month=1, day=2, hour=3, minute=4, second=5, microsecond=6)\n current_time = datetime(year=2050, month=1, day=1, hour=1, minute=1, second=1, microsecond=1)\n create_time1 = datetime(year=1970, month=1, day=1, hour=1, minute=1, second=1, microsecond=1)\n create_time2 = datetime(year=1971, month=1, day=1, hour=1, minute=1, second=1, microsecond=1)\n terminal_timestamp = terminal_time.timestamp()\n current_timestamp = current_time.timestamp()\n create_timestamp1 = create_time1.timestamp()\n create_timestamp2 = create_time2.timestamp()\n\n models_dt_mock.now.return_value = terminal_time\n views_dt_mock.now.return_value = current_time\n\n django_mock.return_value = create_time1\n target = Target.objects.create(name='my target')\n proc = AsyncProcess.objects.create(identifier='hello', target=target)\n # Make a failed process with a different creation time\n # Have it an PipelineProcess to check 'view_url' is provided\n django_mock.return_value = create_time2\n failed_proc = PipelineProcess.objects.create(\n identifier='ohno',\n target=target,\n status=ASYNC_STATUS_FAILED,\n failure_message='oops'\n )\n url = reverse('tom_education:async_process_status_api', kwargs={'target': target.pk})\n\n # Construct the dicts representing processes expected in the JSON\n # response (excluding fields that will change)\n proc_dict = {\n 'process_type': 'AsyncProcess',\n 'identifier': 'hello',\n 'created': create_timestamp1,\n 'terminal_timestamp': None,\n 'view_url': None,\n 'failure_message': None,\n }\n failed_proc_dict = {\n 'process_type': 'PipelineProcess',\n 'identifier': 'ohno',\n 'created': create_timestamp2,\n 'status': 'failed',\n 'failure_message': 'oops',\n 'terminal_timestamp': terminal_timestamp,\n 'view_url': reverse('tom_education:pipeline_detail', kwargs={'pk': failed_proc.pk})\n }\n\n response1 = self.client.get(url)\n self.assertEqual(response1.status_code, 200)\n self.assertEqual(response1.json(), {\n 'timestamp': current_timestamp,\n 'processes': [failed_proc_dict, dict(proc_dict, status='pending')]\n })\n\n proc.status = ASYNC_STATUS_CREATED\n proc.save()\n response2 = self.client.get(url)\n self.assertEqual(response2.status_code, 200)\n self.assertEqual(response2.json(), {\n 'timestamp': current_timestamp,\n 'processes': [\n failed_proc_dict,\n dict(proc_dict, status='created', terminal_timestamp=terminal_timestamp)\n ],\n })\n\n proc.status = ASYNC_STATUS_FAILED\n proc.save()\n response3 = self.client.get(url)\n self.assertEqual(response3.status_code, 200)\n self.assertEqual(response3.json(), {\n 'timestamp': current_timestamp,\n 'processes': [\n failed_proc_dict,\n dict(proc_dict, status='failed', terminal_timestamp=terminal_timestamp,\n failure_message=None)\n ]\n })\n\n # Bad target PK should give 404\n response4 = self.client.get(reverse('tom_education:async_process_status_api', kwargs={'target': 100000}))\n self.assertEqual(response4.status_code, 404)\n self.assertEqual(response4.json(), {'detail': 'Not found.'})\n\n\nclass FakePipeline(PipelineProcess):\n short_name = 'fakepip'\n\n class Meta:\n proxy = True\n\n def do_pipeline(self, tmpdir, **kwargs):\n self.log(\"doing the thing\")\n file1 = tmpdir / 'file1.csv'\n file2 = tmpdir / 'file2.hs'\n file3 = tmpdir / 'file3.png'\n file1.write_text('hello')\n file2.write_text('goodbye')\n file3.write_text('hello again')\n self.log(\"and another thing\")\n return [\n (file1, DataProduct),\n (file2, ReducedDatum, 'image_file'),\n PipelineOutput(path=file3, output_type=DataProduct, data_product_type='image_file')\n ]\n\n\nclass FakePipelineWithFlags(FakePipeline):\n flags = {\n 'myflag': {'default': False, 'long_name': 'myflag'},\n 'default_true': {'default': True, 'long_name': 'Default True'},\n 'default_false': {'default': False, 'long_name': 'Default False'},\n }\n\n class Meta:\n proxy = True\n # Create method to pass flags to, so we can mock it and check the correct\n # flags were passed\n\n def log_flags(self, flags):\n pass\n\n def do_pipeline(self, tmpdir, **flags):\n self.log_flags(flags)\n return super().do_pipeline(tmpdir)\n\n\nclass FakePipelineBadFlags(FakePipeline):\n flags = 4\n\n class Meta:\n proxy = True\n\n\nclass PipelineTestCase(TomEducationTestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n target_name = 't{}'.format(datetime.now().timestamp())\n cls.target = Target.objects.create(name=target_name)\n cls.prods = [DataProduct.objects.create(product_id=f'test_{i}', target=cls.target)\n for i in range(4)]\n cls.pks = [str(prod.pk) for prod in cls.prods]\n for prod in cls.prods:\n fn = f'{prod.product_id}_file.tar.gz'\n prod.data.save(fn, File(BytesIO()))\n\n def setUp(self):\n super().setUp()\n self.user = User.objects.create_user(username='test', email='test@example.com')\n self.client.force_login(self.user)\n assign_perm('tom_targets.view_target', self.user, self.target)\n\n def test_no_target(self):\n proc = FakePipeline.objects.create(identifier='notarget', target=None)\n proc.input_files.add(*self.prods)\n with self.assertRaises(AsyncError):\n proc.run()\n\n def test_no_input_files(self):\n proc = FakePipeline.objects.create(identifier='notarget', target=self.target)\n with self.assertRaises(AsyncError):\n proc.run()\n\n @patch('tom_education.models.pipelines.datetime')\n def test_save_outputs(self, dt_mock):\n dt_mock.now.return_value = datetime(\n year=1970, month=1, day=1, hour=0, minute=0, second=17\n )\n\n proc = FakePipeline.objects.create(identifier='someprocess', target=self.target)\n proc.input_files.add(*self.prods)\n proc.save()\n\n pre_dp_count = DataProduct.objects.count()\n pre_reduced_count = ReducedDatum.objects.count()\n pre_group_count = DataProductGroup.objects.count()\n self.assertEqual(pre_group_count, 0)\n\n proc.run()\n\n post_dp_count = DataProduct.objects.count()\n post_reduced_count = ReducedDatum.objects.count()\n post_group_count = DataProductGroup.objects.count()\n self.assertEqual(post_dp_count, pre_dp_count + 2)\n self.assertEqual(post_reduced_count, pre_reduced_count + 1)\n self.assertEqual(post_group_count, pre_group_count + 1)\n\n self.assertTrue(proc.group is not None)\n self.assertEqual(proc.group.name, 'someprocess_outputs')\n self.assertEqual(proc.group.dataproduct_set.count(), 2)\n\n # Output names and contents come from FakePipeline.do_pipeline\n file1_dp = DataProduct.objects.get(product_id='someprocess_file1.csv')\n file3_dp = DataProduct.objects.get(product_id='someprocess_file3.png')\n self.assertEqual(file1_dp.data.read(), b'hello')\n self.assertEqual(file3_dp.data.read(), b'hello again')\n\n self.assertEqual(file1_dp.data_product_type, '')\n self.assertEqual(file3_dp.data_product_type, 'image_file')\n\n file2_rd = ReducedDatum.objects.get(source_name='someprocess_file2.hs')\n self.assertEqual(file2_rd.target, self.target)\n self.assertEqual(file2_rd.data_type, 'image_file')\n self.assertEqual(file2_rd.timestamp.timestamp(), 17)\n self.assertEqual(file2_rd.value, 'goodbye')\n self.assertEqual(file2_rd.source_location, '')\n\n def _test_no_data_products(self):\n # **** This seems to have a problem because dataproduct is required **\n # If outputs are only reduced data, a data product group should not be\n # created\n class NoDataProductPipeline(PipelineProcess):\n class Meta:\n proxy = True\n\n def do_pipeline(pself, tmpdir):\n outfile = tmpdir / 'somefile.csv'\n outfile.write_text('this is a csv')\n return [(outfile, ReducedDatum)]\n\n pre_group_count = DataProductGroup.objects.count()\n pre_reduced_count = DataProductGroup.objects.count()\n\n proc = NoDataProductPipeline.objects.create(identifier='someprocess', target=self.target)\n proc.input_files.add(*self.prods)\n proc.save()\n proc.run()\n\n post_group_count = DataProductGroup.objects.count()\n post_reduced_count = ReducedDatum.objects.count()\n # No group should have been created\n self.assertEqual(post_group_count, pre_group_count)\n # ReducedDatum should still have been created\n self.assertEqual(post_reduced_count, pre_reduced_count + 1)\n\n def test_invalid_output_type(self):\n class InvalidOutputTypePipeline(PipelineProcess):\n class Meta:\n proxy = True\n\n def do_pipeline(pself, tmpdir):\n outfile = tmpdir / 'somefile.csv'\n outfile.write_text('this is a csv')\n return [(outfile, 'cheese', 'sometag')]\n\n proc = InvalidOutputTypePipeline.objects.create(identifier='someprocess', target=self.target)\n proc.input_files.add(*self.prods)\n proc.save()\n with self.assertRaises(AsyncError) as ex_info:\n proc.run()\n self.assertEqual(\"Invalid output type 'cheese'\", str(ex_info.exception))\n\n def test_logs(self):\n proc = FakePipeline.objects.create(identifier='someprocess', target=self.target)\n proc.input_files.add(*self.prods)\n proc.save()\n proc.run()\n # Message comes from FakePipeline\n self.assertEqual(proc.logs, 'doing the thing\\nand another thing\\n')\n\n def test_update_status(self):\n class StatusTestPipeline(PipelineProcess):\n class Meta:\n proxy = True\n\n def do_pipeline(pself, tmpdir):\n with pself.update_status('doing something important'):\n self.assertEqual(pself.status, 'doing something important')\n return []\n\n proc = StatusTestPipeline.objects.create(identifier='someprocess', target=self.target)\n proc.input_files.add(*self.prods)\n proc.save()\n proc.run()\n\n def test_view(self):\n proc_with_target = FakePipeline.objects.create(identifier='someprocess', target=self.target)\n url = reverse('tom_education:pipeline_detail', kwargs={'pk': proc_with_target.pk})\n target_url = reverse('tom_education:target_detail', kwargs={'pk': self.target.pk})\n response = self.client.get(url)\n self.assertIn('target_url', response.context)\n self.assertEqual(response.context['target_url'], target_url)\n\n @patch('django.utils.timezone.now')\n @patch('tom_education.models.async_process.datetime')\n def test_api(self, async_mock, django_mock):\n async_mock.now.return_value = datetime(\n year=1970, month=1, day=1, hour=0, minute=6, second=0, microsecond=0\n )\n django_mock.return_value = datetime(\n year=1970, month=1, day=1, hour=0, minute=5, second=0, microsecond=0\n )\n\n proc = FakePipeline.objects.create(\n target=self.target,\n identifier='someprocess',\n status='somestatus'\n )\n proc.input_files.add(*self.prods)\n proc.save()\n\n url = reverse('tom_education:pipeline_api', kwargs={'pk': proc.pk})\n view_url = reverse('tom_education:pipeline_detail', kwargs={'pk': proc.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\n 'identifier': 'someprocess',\n 'created': 300,\n 'status': 'somestatus',\n 'logs': '',\n 'terminal_timestamp': None,\n 'failure_message': None,\n 'view_url': view_url,\n 'group_name': None,\n 'group_url': None,\n })\n\n proc.run()\n group_url = reverse('tom_dataproducts:group-detail', kwargs={'pk': proc.group.pk})\n response2 = self.client.get(url)\n self.assertEqual(response2.status_code, 200)\n self.assertEqual(response2.json(), {\n 'identifier': 'someprocess',\n 'created': 300,\n 'status': ASYNC_STATUS_CREATED,\n 'terminal_timestamp': 360,\n 'failure_message': None,\n 'view_url': view_url,\n 'group_url': group_url,\n 'group_name': 'someprocess_outputs',\n 'logs': proc.logs\n })\n\n # Failure message should be included if process failed\n proc.status = ASYNC_STATUS_FAILED\n proc.failure_message = 'something went wrong'\n proc.save()\n response3 = self.client.get(url)\n self.assertEqual(response3.status_code, 200)\n self.assertEqual(response3.json(), {\n 'identifier': 'someprocess',\n 'created': 300,\n 'status': ASYNC_STATUS_FAILED,\n 'terminal_timestamp': 360,\n 'failure_message': 'something went wrong',\n 'view_url': view_url,\n 'group_url': group_url,\n 'group_name': 'someprocess_outputs',\n 'logs': proc.logs\n })\n\n # Bad PK should give 404\n response4 = self.client.get(reverse('tom_education:pipeline_api', kwargs={'pk': 100000}))\n self.assertEqual(response4.status_code, 404)\n self.assertEqual(response4.json(), {'detail': 'Not found.'})\n\n @patch('tom_education.tests.FakePipelineWithFlags.log_flags')\n @patch('tom_education.models.pipelines.datetime')\n def test_form(self, dt_mock, flags_mock):\n \"\"\"In the target data view\"\"\"\n url = reverse('tom_education:target_data', kwargs={'pk': self.target.pk})\n test_settings = {\n 'mypip': 'tom_education.tests.FakePipeline',\n 'withflags': 'tom_education.tests.FakePipelineWithFlags'\n }\n with self.settings(TOM_EDUCATION_PIPELINES=test_settings):\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('pipeline_names', response.context)\n self.assertEqual(response.context['pipeline_names'], ['mypip', 'withflags'])\n self.assertIn('pipeline_flags', response.context)\n self.assertEqual(response.context['pipeline_flags'], {\n 'withflags': FakePipelineWithFlags.flags\n })\n\n expect_400_data = [\n # missing pipeline name\n {'action': 'pipeline'},\n # invalid pipeline name\n {'action': 'pipeline', 'pipeline_name': 'blah'},\n ]\n for data in expect_400_data:\n resp = self.client.post(url, dict(data, **{self.pks[0]: 'on'}))\n self.assertEqual(resp.status_code, 400, data)\n\n # Give valid pipeline name and check it was created and run\n dt_mock.now.return_value = datetime(year=1980, month=1, day=1)\n response2 = self.client.post(url, {\n 'action': 'pipeline', 'pipeline_name': 'mypip', self.pks[0]: 'on'\n })\n self.assertEqual(response2.status_code, 200)\n # Check process was made\n proc = PipelineProcess.objects.filter(identifier__startswith='fakepip').first()\n self.assertTrue(proc is not None)\n # Check outputs\n self.assertTrue(proc.group is not None)\n self.assertEqual(proc.group.dataproduct_set.count(), 2)\n # Shouldn't be any flags\n self.assertEqual(proc.flags_json, None)\n\n # POST with flags and check they were passed do the pipeline\n # correctly\n dt_mock.now.return_value = datetime(year=1981, month=1, day=1)\n response3 = self.client.post(url, {\n 'action': 'pipeline', 'pipeline_name': 'withflags', self.pks[0]: 'on',\n 'pipeline_flag_myflag': 'on',\n 'pipeline_flag_default_false': 'on',\n 'pipeline_flag_bogus': 'on', # unexpected flag name should not cause problems\n })\n expected_flags = {'myflag': True, 'default_true': False, 'default_false': True}\n proc = PipelineProcess.objects.filter(identifier__contains='1981').get()\n self.assertEqual(proc.flags_json, json.dumps(expected_flags))\n flags_mock.assert_called_with(expected_flags)\n\n def test_invalid_pipelines(self):\n invalid_settings = [\n # Bad import paths\n {'mypip': 'blah'},\n {'mypip': 'fakepackage.blah'},\n {'mypip': 'tom_education.blah'},\n # Path to an object which is not a PipelineProcess subclass\n {'mypip': 'datetime.datetime'},\n # Class with invalid flags\n {'mypip': 'tom_education.tests.FakePipelineBadFlags'},\n ]\n\n url = reverse('tom_education:target_data', kwargs={'pk': self.target.pk})\n for invalid in invalid_settings:\n with self.settings(TOM_EDUCATION_PIPELINES=invalid):\n with self.assertRaises(InvalidPipelineError):\n self.client.get(url)\n\n def test_validate_flags(self):\n invalid = [\n # Wrong type\n 4, 'hello', [],\n # Missing default\n {'name': {'long_name': 'hello'}},\n # Missing long name\n {'name': {'default': False}},\n # Whitespace in name\n {'name with spaces': {'default': False, 'long_name': 'hello'}},\n ]\n for flags in invalid:\n with self.assertRaises(AssertionError):\n PipelineProcess.validate_flags(flags)\n\n # Should not raise an exception\n PipelineProcess.validate_flags(FakePipeline.flags)\n PipelineProcess.validate_flags(FakePipelineWithFlags.flags)\n\n def test_allowed_suffixes(self):\n tar_prod = DataProduct.objects.create(product_id='tar_prod', target=self.target)\n tar_prod.data.save('myarchive.tar', File(BytesIO()))\n\n proc = FakePipeline.objects.create(identifier='proc', target=self.target)\n proc.input_files.add(*self.prods, tar_prod)\n proc.save()\n\n with patch('tom_education.tests.FakePipeline.allowed_suffixes', ['.tar', '.7z']):\n with self.assertRaises(AsyncError) as ex_info:\n proc.run()\n err_msg = str(ex_info.exception)\n self.assertIn('_file.tar.gz', err_msg) # filename of the offending file\n self.assertIn('.tar, .7z', err_msg) # allowed suffixes\n\n with patch('tom_education.tests.FakePipeline.allowed_suffixes', ['.tar', '.tar.gz']):\n proc.run()\n\n\ndef mock_write_timelapse(_self, outfile, *args, **kwargs):\n pass\n\n\nclass TargetDetailApiTestCase(TomEducationTestCase):\n def setUp(self):\n super().setUp()\n write_timelapse_method = 'tom_education.models.timelapse.TimelapsePipeline.write_timelapse'\n\n now = datetime.now().timestamp()\n self.target_name = f'target_{now}'\n self.target = Target(\n name=self.target_name,\n ra=1.2345\n )\n self.target.save(extras={'extrafield': 'extravalue'})\n\n # Make some non-timelapse data products for the target\n data_products = [\n # (product_id, filename)\n ('fits1', 'somefile.fits'),\n ('fits2', 'somefile.fits.fz'),\n ('png', 'somefile.png'),\n ('no extension', 'randomfile'),\n ('not a real timelapse', 'timelapse.sh'),\n ]\n for product_id, filename in data_products:\n dp = DataProduct.objects.create(product_id=product_id, target=self.target)\n dp.data.save(filename, File(BytesIO()))\n\n # Make a non-timelapse pipeline process\n other_pipeline = PipelineProcess.objects.create(\n identifier='this_is_not_a_timelapse',\n target=self.target\n )\n other_pipeline.input_files.add(DataProduct.objects.first())\n other_pipeline.save()\n\n # Create GIF and WebM timelapses with 2 and 1 frames respectively\n dp1 = DataProduct.objects.create(product_id='dp1', target=self.target)\n dp2 = DataProduct.objects.create(product_id='dp2', target=self.target)\n dp1.data.save('frame1.fits.fz', File(BytesIO()))\n dp2.data.save('frame2.fits.fz', File(BytesIO()))\n\n tl_gif_pipeline = TimelapsePipeline.objects.create(\n identifier='gif_tl',\n target=self.target\n )\n tl_gif_pipeline.input_files.add(dp1, dp2)\n with patch(write_timelapse_method, mock_write_timelapse):\n with self.settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': 'gif'}):\n tl_gif_pipeline.run()\n\n self.gif_creation = tl_gif_pipeline.terminal_timestamp\n self.gif_url = tl_gif_pipeline.group.dataproduct_set.first().data.url\n\n tl_webm_pipeline = TimelapsePipeline.objects.create(\n identifier='webm_tl',\n target=self.target\n )\n tl_webm_pipeline.input_files.add(dp1)\n with patch(write_timelapse_method, mock_write_timelapse):\n with self.settings(TOM_EDUCATION_TIMELAPSE_SETTINGS={'format': 'webm'}):\n tl_webm_pipeline.run()\n\n self.webm_creation = tl_webm_pipeline.terminal_timestamp\n self.webm_url = tl_webm_pipeline.group.dataproduct_set.first().data.url\n\n # Make some timelapses pipelines in a non-terminal state: should not be\n # included in API response\n tl_failed = TimelapsePipeline.objects.create(\n identifier='tl_failed',\n target=self.target,\n status=ASYNC_STATUS_FAILED\n )\n tl_pending = TimelapsePipeline.objects.create(\n identifier='tl_pending',\n target=self.target,\n status=ASYNC_STATUS_PENDING\n )\n\n @override_settings(EXTRA_FIELDS=[{'name': 'extrafield', 'type': 'string'}])\n def test_api(self):\n self.maxDiff = None\n\n url = reverse('tom_education:target_api', kwargs={'pk': self.target.pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\n 'target': {\n 'name': self.target_name,\n # Note: ra should be missing\n 'extra_fields': {'extrafield': 'extravalue'},\n },\n # Should be sorted: most recent first\n 'timelapses': [{\n 'name': 'webm_tl_t.webm',\n 'format': 'webm',\n 'url': self.webm_url,\n 'frames': 1,\n 'created': self.webm_creation.timestamp()\n }, {\n 'name': 'gif_tl_t.gif',\n 'format': 'gif',\n 'url': self.gif_url,\n 'frames': 2,\n 'created': self.gif_creation.timestamp()\n }]\n })\n\n url_404 = reverse('tom_education:target_api', kwargs={'pk': 1000000})\n response_404 = self.client.get(url_404)\n self.assertEqual(response_404.status_code, 404)\n self.assertEqual(response_404.json(), {'detail': 'Not found.'})\n\n\n@override_settings(TOM_FACILITY_CLASSES=FAKE_FACILITIES)\n@patch('tom_education.models.ObservationTemplate.get_identifier_field', return_value='test_input')\n@patch('tom_education.views.TemplatedObservationCreateView.supported_facilities', ('TemplateFake',))\n@patch('tom_education.views.ObservationAlertApiCreateView.throttle_scope', '')\nclass ObservationAlertApiTestCase(TomEducationTestCase):\n def setUp(self):\n super().setUp()\n self.target = Target.objects.create(name='my target')\n self.template = ObservationTemplate.objects.create(\n name='mytemplate',\n target=self.target,\n facility='TemplateFake',\n fields='{\"test_input\": \"mytemplate\", \"extra_field\": \"somevalue\", \"another_extra_field\": 17}'\n )\n\n @patch('tom_education.models.observation_template.datetime')\n def test_create(self, dt_mock, _mock):\n dt_mock.now.return_value = datetime(\n year=2019, month=1, day=2, hour=3, minute=4, second=5, microsecond=6\n )\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': self.target.pk,\n 'template_name': self.template.name,\n 'facility': 'TemplateFake',\n 'overrides': {'extra_field': 'hello'},\n 'email': 'someone@somesite.org',\n }, content_type='application/json')\n self.assertEqual(response.status_code, 201)\n\n # Check observation and alert were created\n self.assertEqual(ObservationRecord.objects.count(), 1)\n self.assertEqual(ObservationAlert.objects.count(), 1)\n\n ob = ObservationRecord.objects.first()\n alert = ObservationAlert.objects.first()\n\n self.assertEqual(ob.target, self.target)\n self.assertEqual(ob.facility, 'TemplateFake')\n self.assertEqual(json.loads(ob.parameters), {\n 'target_id': self.target.pk,\n 'facility': 'TemplateFake',\n 'test_input': 'mytemplate-2019-01-02-030405',\n 'extra_field': 'hello',\n 'another_extra_field': 17,\n 'observation_type': '',\n })\n\n self.assertEqual(alert.observation, ob)\n self.assertEqual(alert.email, 'someone@somesite.org')\n\n def test_no_overrides(self, _mock):\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': self.target.pk,\n 'template_name': self.template.name,\n 'facility': 'TemplateFake',\n 'email': 'someone@somesite.org',\n }, content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(ObservationAlert.objects.count(), 1)\n\n def test_invalid_target(self, _mock):\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': 10000000000,\n 'template_name': self.template.name,\n 'facility': 'TemplateFake',\n 'email': 'someone@somesite.org',\n }, content_type='application/json')\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\n 'detail': 'Target not found.'\n })\n\n def test_invalid_facility(self, _mock):\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': self.target.pk,\n 'template_name': self.template.name,\n 'facility': 'the facility you were looking for does not exist',\n 'email': 'someone@somesite.org',\n }, content_type='application/json')\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\n 'detail': 'Facility not found.'\n })\n\n def test_invalid_template(self, _mock):\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': self.target.pk,\n 'template_name': 'tempo',\n 'facility': 'TemplateFake',\n 'email': 'someone@somesite.org',\n }, content_type='application/json')\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\n 'detail': \"Template 'tempo' not found for target 'my target' and facility 'TemplateFake'\"\n })\n\n def test_invalid_form(self, _mock):\n # Check that form validation is called, and that errors are passed back\n # in the API response\n url = reverse('tom_education:observe_api')\n response = self.client.post(url, {\n 'target': self.target.pk,\n 'template_name': self.template.name,\n 'facility': 'TemplateFake',\n 'email': 'someone@somesite.org',\n 'overrides': {'another_extra_field': 'not an integer'},\n }, content_type='application/json')\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json(), {\n 'another_extra_field': ['Enter a whole number.']\n })\n\n\n@override_settings(TOM_FACILITY_CLASSES=FAKE_FACILITIES)\n@patch('tom_education.tests.FakeTemplateFacility.save_data_products')\n@patch('tom_education.models.TimelapsePipeline.write_timelapse', mock_write_timelapse)\n@override_settings(TOM_EDUCATION_FROM_EMAIL_ADDRESS='tom@toolkit.edu')\nclass ProcessObservationAlertsTestCase(TomEducationTestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n target_ident = 'target_{}'.format(datetime.now().strftime('%s'))\n cls.target = Target.objects.create(name='my target')\n cls.ob = ObservingRecordFactory.create(\n target_id=cls.target.pk,\n facility=FakeTemplateFacility.name,\n status='not even started'\n )\n cls.dp1 = DataProduct.objects.create(product_id='dp1', target=cls.target)\n cls.dp2 = DataProduct.objects.create(product_id='dp2', target=cls.target)\n cls.dp3 = DataProduct.objects.create(product_id='dp3', target=cls.target)\n cls.dp1.data.save('img1.fits.fz', File(BytesIO()))\n cls.dp2.data.save('img2.fits.fz', File(BytesIO()))\n # Create a non-FITS file\n cls.dp3.data.save('img3.png', File(BytesIO()))\n\n def test_status_and_data_products_updated(self, save_dp_mock):\n alert = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n call_command('process_observation_alerts')\n save_dp_mock.assert_called_once_with(alert.observation)\n alert.refresh_from_db()\n self.assertEqual(alert.observation.status, 'COMPLETED')\n\n def test_non_alert_observation_not_updated(self, save_dp_mock):\n non_alert_ob = ObservingRecordFactory.create(\n target_id=self.target.pk,\n facility=FakeTemplateFacility.name,\n status='not even started'\n )\n call_command('process_observation_alerts')\n save_dp_mock.assert_not_called()\n non_alert_ob.refresh_from_db()\n self.assertEqual(non_alert_ob.status, 'not even started')\n\n @patch('tom_education.models.TimelapsePipeline.create_timestamped',\n wraps=TimelapsePipeline.create_timestamped)\n def test_timelapse_created(self, pipeline_mock, save_dp_mock):\n alert = ObservationAlert.objects.create(\n observation=self.ob, email='someone@somesite.org'\n )\n call_command('process_observation_alerts')\n self.assertEqual(TimelapsePipeline.objects.count(), 1)\n self.assertEqual(DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0]).count(), 1)\n\n # Check method to create timelapse was called with the correct\n # arguments\n pipeline_mock.assert_called_once()\n args, _ = pipeline_mock.call_args\n self.assertEqual(len(args), 2)\n self.assertEqual(args[0], self.target)\n self.assertIsInstance(args[1], QuerySet)\n self.assertEqual(set(args[1].all()), {self.dp1, self.dp2})\n\n def test_old_timelapses_deleted(self, save_dp_mock):\n alert = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n tl = DataProduct.objects.create(\n target=self.target, product_id='mytimelapse', data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0]\n )\n other_dp = DataProduct.objects.create(\n target=self.target, product_id='notatimelapse'\n )\n call_command('process_observation_alerts')\n # Old timelapse and its data should have been deleted\n self.assertEqual(DataProduct.objects.filter(pk=tl.pk).count(), 0)\n self.assertFalse(tl.data)\n # Other data product should not have been deleted\n self.assertEqual(DataProduct.objects.filter(pk=other_dp.pk).count(), 1)\n # Should be one (new) timelapse\n self.assertEqual(DataProduct.objects.filter(data_product_type=settings.DATA_PRODUCT_TYPES['timelapse'][0]).count(), 1)\n\n @patch('tom_education.models.TimelapsePipeline.create_timestamped',\n wraps=TimelapsePipeline.create_timestamped)\n def test_multiple_alerts_single_target(self, pipeline_mock, save_dp_mock):\n # Create two alerts for the same observation: the target should only\n # have one new timelapse created\n alert1 = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n alert2 = ObservationAlert.objects.create(observation=self.ob, email='someoneelse@somesite.org')\n call_command('process_observation_alerts')\n pipeline_mock.assert_called_once()\n\n @patch('tom_education.models.TimelapsePipeline.create_timestamped',\n wraps=TimelapsePipeline.create_timestamped)\n def test_exclude_raw_data(self, pipeline_mock, save_dp_mock):\n raw_dp = DataProduct.objects.create(product_id='raw', target=self.target)\n raw_dp.data.save('rawfile.e00.fits.fz', File(BytesIO()))\n alert = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n call_command('process_observation_alerts')\n\n pipeline_mock.assert_called_once()\n args, _ = pipeline_mock.call_args\n # Raw file should not be included\n self.assertEqual(set(args[1].all()), {self.dp1, self.dp2})\n\n def test_emails_sent(self, save_dp_mock):\n alert = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n call_command('process_observation_alerts')\n\n self.assertEqual(len(mail.outbox), 1)\n msg = mail.outbox[0]\n self.assertEqual(msg.to, ['someone@somesite.org'])\n self.assertIn('Observation', msg.subject)\n self.assertIn('observation', msg.body)\n\n @override_settings()\n def test_no_from_email_address(self, save_dp_mock):\n # Unset from email add setting: should get an error message\n del settings.TOM_EDUCATION_FROM_EMAIL_ADDRESS\n alert = ObservationAlert.objects.create(observation=self.ob, email='someone@somesite.org')\n buf = StringIO()\n call_command('process_observation_alerts', stderr=buf)\n self.assertIn(\"TOM_EDUCATION_FROM_EMAIL_ADDRESS not set\", buf.getvalue())\n\n\nclass DataProductDeleteMultipleViewTestCase(DataProductTestCase):\n def setUp(self):\n super().setUp()\n self.user = User.objects.create_user(username='test', email='test@example.com')\n self.client.force_login(self.user)\n assign_perm('tom_targets.view_target', self.user, self.target)\n self.url = reverse('tom_education:target_data', kwargs={'pk': self.target.pk})\n self.num_products = DataProduct.objects.count()\n\n def test_user_not_logged_in(self):\n self.client.logout()\n base_url = reverse('tom_education:delete_dataproducts')\n url = base_url + '?product_pks=' + ','.join(str(prod.pk) for prod in self.prods)\n response = self.client.get(url)\n # Should be redirected to login\n self.assertTrue(response.url.startswith(reverse('login') + '?'))\n # No DPs should have been deleted\n self.assertEqual(DataProduct.objects.count(), self.num_products)\n\n self.client.force_login(self.user)\n\n def test_confirmation_page(self):\n base_url = reverse('tom_education:delete_dataproducts')\n url = base_url + '?product_pks=' + ','.join(str(prod.pk) for prod in self.prods)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Check the filenames of the to-be-deleted products are displayed\n for prod in self.prods:\n self.assertIn(prod.data.name.encode(), response.content)\n # 'next' URL should be included in the form\n self.assertIn(b'<input type=\"hidden\" name=\"next\"', response.content)\n # Products should not have actually been deleted yet\n self.assertEqual(DataProduct.objects.count(), self.num_products)\n\n def test_delete(self):\n base_url = reverse('tom_education:delete_dataproducts')\n response = self.client.post(base_url, {\n 'next': 'mycoolwebsite.net',\n 'product_pks': ','.join(map(str, [self.prods[0].pk, self.prods[2].pk]))\n })\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, 'mycoolwebsite.net')\n self.assertEqual(set(DataProduct.objects.all()), {self.prods[1], self.prods[3]})\n\n # Success message should be present on next page\n response2 = self.client.get('/')\n self.assertIn('messages', response2.context)\n messages = list(response2.context['messages'])\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), 'Deleted 2 data products')\n\n\ndef mock_instruments(_self):\n return {\n 'myinstr': {\n 'type': 'IMAGE',\n 'class': '2M0',\n 'name': 'test instrument',\n 'optical_elements': {\n 'filters': [\n {'code': 'redfilter', 'name': 'RED', 'schedulable': True},\n {'code': 'greenfilter', 'name': 'GREEN', 'schedulable': True},\n {'code': 'bluefilter', 'name': 'BLUE', 'schedulable': True},\n ]\n }\n }\n }\n\n\ndef mock_proposals(_self):\n return [('myprop', 'some proposal')]\n\n\n@patch('tom_education.facilities.EducationLCOForm._get_instruments', mock_instruments)\n@patch('tom_education.facilities.EducationLCOForm.proposal_choices', mock_proposals)\n@patch('tom_education.facilities.EducationLCOFacility.validate_observation', return_value=None)\n@patch('tom_education.facilities.EducationLCOFacility.submit_observation', return_value=[1234])\nclass EducationLCOFacilityTestCase(TomEducationTestCase):\n def setUp(self):\n super().setUp()\n self.target = Target.objects.create(name='my target')\n self.url = reverse('tom_education:create_obs', kwargs={'facility': 'LCO'})\n self.user = User.objects.create_user(username='test', email='test@example.com')\n self.client.force_login(self.user)\n\n # Base form data excluding filter/exposure fields\n self.base_form_data = {\n 'target_id': self.target.pk,\n 'facility': 'LCO',\n 'name': 'someobs',\n 'proposal': 'myprop',\n 'ipp_value': '1.05',\n 'observation_mode': 'NORMAL',\n 'max_airmass': '1.6',\n 'start': '2000-01-01',\n 'end': '2001-01-01',\n 'instrument_type': 'myinstr'\n }\n\n def test_multiple_instrument_configurations(self, _validate_mock, submit_mock):\n response = self.client.post(self.url, data={\n **self.base_form_data,\n 'redfilter_exposure_count': '1',\n 'redfilter_exposure_time': '2',\n\n 'bluefilter_exposure_count': '3',\n 'bluefilter_exposure_time': '4',\n })\n # Check payload was submitted with correct-looking args\n submit_mock.assert_called_once()\n args, kwargs = submit_mock.call_args\n self.assertEqual(len(args), 1)\n self.assertEqual(kwargs, {})\n # Get submitted payload and check instrument configurations were\n # correct\n (payload,) = args\n configs = payload['requests'][0]['configurations']\n self.assertEqual(len(configs), 2)\n instr_configs = [config['instrument_configs'][0] for config in configs]\n red_instr_config = {\n 'exposure_count': 1,\n 'exposure_time': 2,\n 'optical_elements': {'filter': 'redfilter'}\n }\n blue_instr_config = {\n 'exposure_count': 3,\n 'exposure_time': 4,\n 'optical_elements': {'filter': 'bluefilter'}\n }\n self.assertIn(red_instr_config, instr_configs)\n self.assertIn(blue_instr_config, instr_configs)\n\n def test_exposure_settings(self, _validate_mock, submit_mock):\n # Check that we get an error if no filters are specified\n response = self.client.post(self.url, data=self.base_form_data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['form'].errors['__all__'], ['No filters selected'])\n\n # Check that we get an error if just time or just count are given\n response = self.client.post(self.url, data={\n **self.base_form_data,\n # Omit count for red\n 'redfilter_exposure_time': '2',\n # Omit time for blue\n 'bluefilter_exposure_count': '3',\n\n # Specify green properly\n 'greenfilter_exposure_time': '20',\n 'greenfilter_exposure_count': '19',\n })\n self.assertEqual(response.status_code, 200)\n expected_msgs = {\n \"Exposure count missing for filter 'RED'\",\n \"Exposure time missing for filter 'BLUE'\"\n }\n self.assertEqual(set(response.context['form'].errors['__all__']), expected_msgs)\n\n def test_instrument_filter_info(self, _validate_mock, _submit_mock):\n # Construct dict that looks like a response from the LCO instruments API\n instr_response = {\n 'instr1': {\n 'optical_elements': {\n 'filters': [\n {'code': 'a1', 'schedulable': False},\n {'code': 'a2', 'schedulable': True},\n ],\n 'slits': [\n {'code': 'a3', 'schedulable': True},\n ]\n }\n },\n 'instr2': {\n 'optical_elements': {\n 'slits': [\n {'code': 'b1', 'schedulable': True},\n ]\n }\n }\n }\n expected = {\n 'instr1': ['a2', 'a3'],\n 'instr2': ['b1'],\n }\n got = EducationLCOForm.get_schedulable_codes(instr_response)\n self.assertEqual(got, expected)\n\n\nclass EducationTargetViewsTestCase(TomEducationTestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='test', email='test@example.com')\n cls.target = Target.objects.create(name='my target', type=Target.NON_SIDEREAL)\n assign_perm('tom_targets.change_target', cls.user, cls.target)\n\n def setUp(self):\n super().setUp()\n self.client.force_login(self.user)\n\n @override_settings(EXTRA_FIELDS=[{'name': 'strextra', 'type': 'string'},\n {'name': 'numericextra', 'type': 'number'}])\n def test_create_view(self):\n\n create_url = reverse('tom_education:target_create') + '?type={}'.format(Target.NON_SIDEREAL)\n update_url = reverse('tom_education:target_update', kwargs={'pk': self.target.pk})\n\n for url in (create_url, update_url):\n response = self.client.get(url)\n self.assertIn('non_sidereal_fields', response.context)\n field_info = json.loads(response.context['non_sidereal_fields'])\n self.assertEqual(set(field_info.keys()), {'base_fields', 'scheme_fields'})\n # Check declared, extra and core required fields are in base_fields\n self.assertTrue(\n {'groups', 'strextra', 'numericextra', 'scheme'} <= set(field_info['base_fields'])\n )\n self.assertIsInstance(field_info['scheme_fields'], dict)\n"
] |
[
[
"numpy.max",
"numpy.full",
"numpy.array",
"numpy.ones",
"numpy.linspace"
]
] |
kevinyang8/deep-learning-models
|
[
"271ddfa106c99bc131023e8e159c1b8cc903fe0e",
"271ddfa106c99bc131023e8e159c1b8cc903fe0e"
] |
[
"models/vision/detection/awsdet/models/bbox_heads/cascade_head.py",
"models/vision/classification/resnet.py"
] |
[
"from .. import builder\nfrom ..registry import HEADS\nfrom awsdet.core.bbox import bbox_target, transforms\nfrom awsdet.models.losses import losses\nimport tensorflow as tf\n\n\n\n@HEADS.register_module\nclass CascadeHead(tf.keras.Model): \n def __init__(self, \n num_stages=3,\n stage_loss_weights=[1, 0.5, 0.25],\n iou_thresholds=[0.5, 0.6, 0.7],\n reg_class_agnostic=True,\n num_classes=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n **kwargs):\n super(CascadeHead, self).__init__(**kwargs)\n\n assert len(stage_loss_weights) == num_stages\n assert len(bbox_head) == num_stages\n assert len(iou_thresholds) == num_stages\n assert reg_class_agnostic or num_classes\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n\n if bbox_roi_extractor is not None and bbox_head is not None:\n self.bbox_roi_extractor = builder.build_roi_extractor(bbox_roi_extractor)\n self.bbox_heads = [builder.build_head(head) for head in bbox_head]\n self.reg_class_agnostic = reg_class_agnostic # used for build targets\n self.num_classes = num_classes\n self.bbox_targets = []\n for iou, bbox_head in zip(iou_thresholds, self.bbox_heads):\n target = bbox_target.ProposalTarget(\n target_means=bbox_head.target_means,\n target_stds=bbox_head.target_stds, \n num_rcnn_deltas=512,\n positive_fraction=0.25,\n pos_iou_thr=iou,\n neg_iou_thr=0.1,\n reg_class_agnostic=self.reg_class_agnostic,\n num_classes=1 if reg_class_agnostic else self.num_classes)\n self.bbox_targets.append(target)\n \n @tf.function(experimental_relax_shapes=True)\n def call(self, inputs, training=True):\n '''\n Args\n ---\n proposals_list: List of Tensors of shape [num_proposals, (ymin, xmin, ymax, xmax)]\n num_proposals=levels * proposals per level. levels refer to FPN levels. \n Length of list is the batch size\n gt_boxes: Tensor of shape [batch_size, 4]\n gt_class_ids: Tensor of shape [batch_size]\n img_metas: Tensor of shape [11]\n rcnn_feature_maps: List of outputs from the FPN\n '''\n if training:\n proposals_list, rcnn_feature_maps, gt_boxes, \\\n gt_class_ids, img_metas = inputs\n else:\n proposals_list, rcnn_feature_maps, img_metas = inputs\n batch_size = img_metas.shape[0]\n loss_dict = {}\n for i in range(self.num_stages):\n if i == 0:\n rois_list = proposals_list\n if training:\n rois_list, rcnn_target_matches, rcnn_target_deltas, inside_weights, \\\n outside_weights = self.bbox_targets[i].build_targets( \\\n rois_list, gt_boxes, gt_class_ids, img_metas) \n pooled_regions_list = self.bbox_roi_extractor(\n (rois_list, rcnn_feature_maps, img_metas), training=training)\n rcnn_class_logits, rcnn_probs, rcnn_deltas = self.bbox_heads[i](pooled_regions_list, training=training)\n if training:\n loss_dict['rcnn_class_loss_stage_{}'.format(i)] = losses.rcnn_class_loss(rcnn_class_logits, \n rcnn_target_matches) * self.stage_loss_weights[i]\n \n loss_dict['rcnn_box_loss_stage_{}'.format(i)] = losses.rcnn_bbox_loss(rcnn_deltas,\n rcnn_target_deltas, \n inside_weights, \n outside_weights) * self.stage_loss_weights[i]\n roi_shapes = [tf.shape(i)[0] for i in rois_list]\n refinements = tf.split(rcnn_deltas, roi_shapes)\n new_rois = []\n if i<(self.num_stages-1):\n for j in range(batch_size):\n new_rois.append(tf.stop_gradient(transforms.delta2bbox(rois_list[j], refinements[j],\n target_means=self.bbox_heads[i].target_means, \\\n target_stds=self.bbox_heads[i].target_stds)))\n rois_list = new_rois\n if training:\n return loss_dict\n else:\n detections_list = self.bbox_heads[-1].get_bboxes(rcnn_probs,\n rcnn_deltas,\n rois_list,\n img_metas)\n detections_dict = {\n 'bboxes': detections_list[0][0],\n 'labels': detections_list[0][1],\n 'scores': detections_list[0][2]\n }\n return detections_dict\n\n ",
"# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n# -*- coding: utf-8 -*-\n\"\"\"ResNet, ResNetV2, and ResNeXt models for Keras.\n\n# Reference papers\n\n- [Deep Residual Learning for Image Recognition]\n (https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)\n- [Identity Mappings in Deep Residual Networks]\n (https://arxiv.org/abs/1603.05027) (ECCV 2016)\n- [Aggregated Residual Transformations for Deep Neural Networks]\n (https://arxiv.org/abs/1611.05431) (CVPR 2017)\n\n# Reference implementations\n\n- [TensorNets]\n (https://github.com/taehoonlee/tensornets/blob/master/tensornets/resnets.py)\n- [Caffe ResNet]\n (https://github.com/KaimingHe/deep-residual-networks/tree/master/prototxt)\n- [Torch ResNetV2]\n (https://github.com/facebook/fb.resnet.torch/blob/master/models/preresnet.lua)\n- [Torch ResNeXt]\n (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\nimport os\nimport numpy as np\n\nlayers = tf.keras.layers\n\nBASE_WEIGHTS_PATH = (\n 'https://github.com/keras-team/keras-applications/'\n 'releases/download/resnet/')\nWEIGHTS_HASHES = {\n 'resnet50': ('2cb95161c43110f7111970584f804107',\n '4d473c1dd8becc155b73f8504c6f6626'),\n 'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',\n '88cf7a10940856eca736dc7b7e228a21'),\n 'resnet152': ('100835be76be38e30d865e96f2aaae62',\n 'ee4c566cf9a93f14d82f913c2dc6dd0c'),\n 'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',\n 'fac2f116257151a9d068a22e544a4917'),\n 'resnet101v2': ('6343647c601c52e1368623803854d971',\n 'c0ed64b8031c3730f411d2eb4eea35b5'),\n 'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',\n 'ed17cf2e0169df9d443503ef94b23b33'),\n 'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',\n '62527c363bdd9ec598bed41947b379fc'),\n 'resnext101': ('34fb605428fcc7aa4d62f44404c11509',\n '0f678c91647380debd923963594981b3')\n}\n\n\ndef block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, \n image_data_format='channels_last', name=None, trainable=True,\n weight_decay=0.0001):\n \"\"\"A residual block.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer.\n kernel_size: default 3, kernel size of the bottleneck layer.\n stride: default 1, stride of the first layer.\n conv_shortcut: default True, use convolution shortcut if True,\n otherwise identity shortcut.\n name: string, block label.\n\n # Returns\n Output tensor for the residual block.\n \"\"\"\n bn_axis = 3 if image_data_format == 'channels_last' else 1\n\n if conv_shortcut is True:\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n x = layers.Conv2D(filters, 1, strides=1, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9,\n name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n x = layers.Conv2D(filters, kernel_size, strides=stride, padding='SAME',\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_2_conv')(x)\n\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, \n name=name + '_2_bn')(x)\n\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(4 * filters, 1, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_3_conv')(x)\n\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9,\n gamma_initializer='zeros', # https://arxiv.org/pdf/1706.02677.pdf\n name=name + '_3_bn')(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x\n\n\ndef stack1(x, filters, blocks, stride1=2, name=None, trainable=True, weight_decay=0.0001):\n \"\"\"A set of stacked residual blocks.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n name: string, stack label.\n\n # Returns\n Output tensor for the stacked blocks.\n \"\"\"\n x = block1(x, filters, stride=stride1, name=name + '_block1', trainable=trainable, weight_decay=weight_decay)\n for i in range(2, blocks + 1):\n x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)\n return x\n\n\ndef block2(x, filters, kernel_size=3, stride=1,\n conv_shortcut=False, name=None, image_data_format='channels_last',\n trainable=True, weight_decay=0.0001):\n \"\"\"A residual block.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer.\n kernel_size: default 3, kernel size of the bottleneck layer.\n stride: default 1, stride of the first layer.\n conv_shortcut: default False, use convolution shortcut if True,\n otherwise identity shortcut.\n name: string, block label.\n\n # Returns\n Output tensor for the residual block.\n \"\"\"\n bn_axis = 3 if image_data_format == 'channels_last' else 1\n preact = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, name=name + '_preact_bn')(x)\n preact = layers.Activation('relu', name=name + '_preact_relu')(preact)\n\n if conv_shortcut is True:\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_0_conv')(preact)\n else:\n shortcut = layers.AveragePooling2D(1, strides=stride)(x) if stride > 1 else x\n\n x = layers.Conv2D(filters, 1, strides=1, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_1_conv')(preact)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n x = layers.Conv2D(filters, kernel_size, strides=stride, padding='SAME', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_2_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(4 * filters, 1, use_bias=False, padding='SAME',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n name=name + '_3_conv')(x)\n x = layers.Add(name=name + '_out')([shortcut, x])\n return x\n\n\ndef stack2(x, filters, blocks, stride1=2, name=None, trainable=True, weight_decay=0.0001):\n \"\"\"A set of stacked residual blocks.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n name: string, stack label.\n\n # Returns\n Output tensor for the stacked blocks.\n \"\"\"\n x = block2(x, filters, conv_shortcut=True, name=name + '_block1', trainable=trainable, weight_decay=weight_decay) \n for i in range(2, blocks):\n x = block2(x, filters, name=name + '_block' + str(i))\n x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks), trainable=trainable, weight_decay=weight_decay)\n return x\n\n\ndef block3(x, filters, kernel_size=3, stride=1, groups=32, image_data_format='channels_last',\n conv_shortcut=True, name=None):\n \"\"\"A residual block.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer.\n kernel_size: default 3, kernel size of the bottleneck layer.\n stride: default 1, stride of the first layer.\n groups: default 32, group size for grouped convolution.\n conv_shortcut: default True, use convolution shortcut if True,\n otherwise identity shortcut.\n name: string, block label.\n\n # Returns\n Output tensor for the residual block.\n \"\"\"\n bn_axis = 3 if image_data_format == 'channels_last' else 1\n\n if conv_shortcut is True:\n shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride,\n use_bias=False, name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_1_bn', trainable=False)(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,\n use_bias=False, name=name + '_2_conv')(x)\n kernel = np.zeros((1, 1, filters * c, filters), dtype=np.float32)\n for i in range(filters):\n start = (i // c) * c * c + i % c\n end = start + c * c\n kernel[:, :, start:end:c, i] = 1.\n x = layers.Conv2D(filters, 1, use_bias=False, trainable=False,\n kernel_initializer={'class_name': 'Constant',\n 'config': {'value': kernel}},\n name=name + '_2_gconv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_2_bn', trainable=False)(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1,\n use_bias=False, name=name + '_3_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_3_bn', trainable=False)(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x\n\n\ndef stack3(x, filters, blocks, stride1=2, groups=32, name=None):\n \"\"\"A set of stacked residual blocks.\n\n # Arguments\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n groups: default 32, group size for grouped convolution.\n name: string, stack label.\n\n # Returns\n Output tensor for the stacked blocks.\n \"\"\"\n x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1')\n for i in range(2, blocks + 1):\n x = block3(x, filters, groups=groups, conv_shortcut=False,\n name=name + '_block' + str(i))\n return x\n\n\ndef ResNet(stack_fn,\n preact,\n use_bias,\n model_name='resnet',\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n weight_decay=0.0001,\n is_training=False,\n image_data_format='channels_last',\n **kwargs):\n \"\"\"Instantiates the ResNet, ResNetV2, and ResNeXt architecture.\n\n Optionally loads weights pre-trained on ImageNet.\n Note that the data format convention used by the model is\n the one specified in your Keras config at `~/.keras/keras.json`.\n\n # Arguments\n stack_fn: a function that returns output tensor for the\n stacked residual blocks.\n preact: whether to use pre-activation or not\n (True for ResNetV2, False for ResNet and ResNeXt).\n use_bias: whether to use biases for convolutional layers or not #FIXME:\n model_name: string, model name.\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or `(3, 224, 224)` (with `channels_first` data format).\n It should have exactly 3 inputs channels.\n pooling: optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n # Returns\n A Keras model instance.\n\n # Raises\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n \"\"\"\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\n ' as true, `classes` should be 1000')\n\n input_shape = (None, None, 3) # 224, 224, 3 for resnet\n\n if input_tensor is None:\n img_input = layers.Input(shape=input_shape)\n else:\n if not backend.is_keras_tensor(input_tensor): # FIXME: dead code\n img_input = layers.Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n bn_axis = 3 if image_data_format == 'channels_last' else 1\n\n x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)\n x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1_conv', \n kernel_regularizer=tf.keras.regularizers.l2(weight_decay),\n kernel_initializer='he_normal')(x)\n\n if preact is False:\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, name='conv1_bn')(x)\n x = layers.Activation('relu', name='conv1_relu')(x)\n\n x = layers.MaxPooling2D(3, strides=2, padding='SAME', name='pool1_pool')(x)\n\n x = stack_fn(x)\n\n if preact is True:\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1e-5, momentum=0.9, name='post_bn')(x)\n x = layers.Activation('relu', name='post_relu')(x)\n\n if include_top:\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n x = layers.Dense(classes,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay), name='logits')(x)\n x = layers.Activation('softmax', dtype='float32')(x)\n else:\n if pooling == 'avg':\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n elif pooling == 'max':\n x = layers.GlobalMaxPooling2D(name='max_pool')(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = keras_utils.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n # Create model.\n model = tf.keras.Model(inputs, x, name=model_name)\n\n # Load weights.\n if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):\n if include_top:\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'\n file_hash = WEIGHTS_HASHES[model_name][0]\n else:\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'\n file_hash = WEIGHTS_HASHES[model_name][1]\n weights_path = keras_utils.get_file(file_name,\n BASE_WEIGHTS_PATH + file_name,\n cache_subdir='models',\n file_hash=file_hash)\n by_name = True if 'resnext' in model_name else False\n model.load_weights(weights_path, by_name=by_name)\n elif weights is not None:\n model.load_weights(weights)\n\n return model\n\n\ndef ResNet50(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n weight_decay=0.0001,\n **kwargs):\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=1, name='conv2', weight_decay=weight_decay)\n x = stack1(x, 128, 4, name='conv3', weight_decay=weight_decay)\n x = stack1(x, 256, 6, name='conv4', weight_decay=weight_decay)\n x = stack1(x, 512, 3, name='conv5', weight_decay=weight_decay)\n return x\n return ResNet(stack_fn, False, True, 'resnet50',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes, weight_decay,\n **kwargs)\n\n\ndef ResNet101(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=1, name='conv2')\n x = stack1(x, 128, 4, name='conv3')\n x = stack1(x, 256, 23, name='conv4')\n x = stack1(x, 512, 3, name='conv5')\n return x\n return ResNet(stack_fn, False, True, 'resnet101',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\ndef ResNet152(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=1, name='conv2')\n x = stack1(x, 128, 8, name='conv3')\n x = stack1(x, 256, 36, name='conv4')\n x = stack1(x, 512, 3, name='conv5')\n return x\n return ResNet(stack_fn, False, True, 'resnet152',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\ndef ResNet50V2(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n weight_decay=0.0001,\n **kwargs):\n def stack_fn(x):\n x = stack2(x, 64, 3, name='conv2', weight_decay=weight_decay)\n x = stack2(x, 128, 4, name='conv3', weight_decay=weight_decay)\n x = stack2(x, 256, 6, name='conv4', weight_decay=weight_decay)\n x = stack2(x, 512, 3, stride1=1, name='conv5', weight_decay=weight_decay)\n return x\n return ResNet(stack_fn, True, True, 'resnet50v2',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes, weight_decay,\n **kwargs)\n\n\ndef ResNet101V2(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack2(x, 64, 3, name='conv2')\n x = stack2(x, 128, 4, name='conv3')\n x = stack2(x, 256, 23, name='conv4')\n x = stack2(x, 512, 3, stride1=1, name='conv5')\n return x\n return ResNet(stack_fn, True, True, 'resnet101v2',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\ndef ResNet152V2(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack2(x, 64, 3, name='conv2')\n x = stack2(x, 128, 8, name='conv3')\n x = stack2(x, 256, 36, name='conv4')\n x = stack2(x, 512, 3, stride1=1, name='conv5')\n return x\n return ResNet(stack_fn, True, True, 'resnet152v2',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\ndef ResNeXt50(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack3(x, 128, 3, stride1=1, name='conv2')\n x = stack3(x, 256, 4, name='conv3')\n x = stack3(x, 512, 6, name='conv4')\n x = stack3(x, 1024, 3, name='conv5')\n return x\n return ResNet(stack_fn, False, False, 'resnext50',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\ndef ResNeXt101(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n def stack_fn(x):\n x = stack3(x, 128, 3, stride1=1, name='conv2')\n x = stack3(x, 256, 4, name='conv3')\n x = stack3(x, 512, 23, name='conv4')\n x = stack3(x, 1024, 3, name='conv5')\n return x\n return ResNet(stack_fn, False, False, 'resnext101',\n include_top, weights,\n input_tensor, input_shape,\n pooling, classes,\n **kwargs)\n\n\nsetattr(ResNet50, '__doc__', ResNet.__doc__)\nsetattr(ResNet101, '__doc__', ResNet.__doc__)\nsetattr(ResNet152, '__doc__', ResNet.__doc__)\nsetattr(ResNet50V2, '__doc__', ResNet.__doc__)\nsetattr(ResNet101V2, '__doc__', ResNet.__doc__)\nsetattr(ResNet152V2, '__doc__', ResNet.__doc__)\nsetattr(ResNeXt50, '__doc__', ResNet.__doc__)\nsetattr(ResNeXt101, '__doc__', ResNet.__doc__)\n\n"
] |
[
[
"tensorflow.split",
"tensorflow.shape",
"tensorflow.function"
],
[
"tensorflow.keras.Model",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.regularizers.l2",
"numpy.zeros"
]
] |
sourabbapusridhar/master-thesis
|
[
"7ca0b36230dcab2e37a9501bcb519390d68b3b26"
] |
[
"train.py"
] |
[
"# Implementation of Training\n\nimport torch\nimport argparse\nimport numpy as np\nimport sys\nimport collections\nfrom trainer import Trainer\nimport model.loss as lossModule\nfrom utils import prepare_device\nimport model.metric as metricModule\nimport torch.nn.functional as F\nfrom parse_config import ConfigParser\nimport model.social_stgcnn as architectureModule\nimport data.datasets.custom_dataset as customDataset\n\n# Fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.set_default_dtype(torch.double)\ntorch.backends.cudnn.benchmark = False\ntorch.autograd.set_detect_anomaly(True)\nnp.random.seed(SEED)\n\ndef main(configuration):\n \"\"\"\n Entry point for training the experiment.\n\n Parameters\n ----------\n configuration : dict\n User defined configuration for training the experiment\n\n Returns\n -------\n None\n \"\"\"\n\n epoch_range = 1\n savePeriod = 1\n filename = \"saved models/Model 2/checkpoint.pth\"\n print(\"Getting graph dataset... \")\n\n dataset = configuration.initialize_object(\"dataset\", customDataset)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = configuration.initialize_object(\"model\", architectureModule).to(device)\n dataset.to_device(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)# , weight_decay=5e-4)\n\n trainingDataset, validationDataset = dataset.split_dataset(validationSplit=0.2)\n\n print(\"Start training...\")\n for idx_data, (video_name, data) in enumerate(trainingDataset.items()):\n sys.stdout.write(\"\\nTrainging {}, Video: {}/{}, Number of frames:{}\"\n .format(video_name, idx_data+1, len(trainingDataset.keys()), len(data)))\n model.train()\n for epoch in range(epoch_range):\n if epoch_range > 1: sys.stdout.write(\"\\nEpoch: {}/{}\".format(epoch+1, epoch_range))\n total_loss = 0\n correct = 0\n total = 0\n for time_frame, frame in enumerate(data):\n optimizer.zero_grad()\n out = model(frame.cuda(), device)\n y = torch.cat([frame.y.cuda(), torch.ones(size=[out.shape[0]-frame.y.shape[0],\n frame.y.shape[1]], device=device)*2], dim=0)\n\n loss = torch.mean((out - y) ** 2)\n total_loss += loss\n loss.backward()\n optimizer.step()\n out = torch.round(out)\n correct = correct + torch.sub(out, y).numel() - torch.count_nonzero(torch.sub(out, y))\n total = total + torch.sub(out, y).numel()\n accuracy = correct / total\n sys.stdout.write(\", MSE: {:.4f}, Accuracy: {:.4f}\".format(total_loss, accuracy))\n\n #if epoch % savePeriod == 0:\n # torch.save(model.state_dict(), filename.format(idx_data+1, epoch))\n\n print(\"Saving Model....\")\n torch.save(model.state_dict(), filename)\n\n \"\"\"\n model.eval()\n correct_each_prediction = [0, 0, 0]\n total_each_prediction = [0, 0, 0]\n print(\"\\nCalculating final accuracy...\")\n for idx_video, (_, video) in enumerate(validationDataset.items()):\n sys.stdout.write(\"\\rTesting video {}/{}\".format(idx_video+1, len(validationDataset.keys())))\n sys.stdout.flush()\n for idx_frame, frame in enumerate(video):\n pred = torch.round(model(frame, device))\n y = torch.cat([frame.y.cuda(),\n torch.ones(size=[pred.shape[0]-frame.y.shape[0],\n frame.y.shape[1]], device=device)*2], dim=0)\n comparison = torch.sub(pred, y)\n correct_each_prediction = [pred + comparison[:, it].numel() -\n torch.count_nonzero(comparison[:, it])\n for it, pred in enumerate(correct_each_prediction)]\n\n total_each_prediction = [pred + comparison[:, it].numel()\n for it, pred in enumerate(total_each_prediction)]\n\n total = sum(total_each_prediction)\n correct = sum(correct_each_prediction)\n accuracy = correct / total\n accuracy_each_prediction = [correct_each_prediction[it] / tot\n for it, tot in enumerate(total_each_prediction)]\n\n print('Final accuracy frames: {:.4f}'.format(accuracy))\n print('Final accuracy for specific frame prediction: \\n '\n '15 frames: {:.4f}, 30 frames: {:.4f}, 45 frames: {:.4f}'\n .format(accuracy_each_prediction[2], accuracy_each_prediction[1], accuracy_each_prediction[0]))\n \"\"\"\n \n '''\n print(\"Validation...\")\n validationDataLoader = dataLoader.split_validation()\n\n\n # Build Model Architecture and print to console\n print(\"Build Model Architecture and print to console\")\n model = configuration.initialize_object(\"architecture\", architectureModule)\n logger.info(model)\n\n # Prepare for (multi-device) GPU training\n device, deviceIds = prepare_device(configuration[\"numberOfGpus\"])\n model = model.to(device)\n if len(deviceIds) > 1:\n model = torch.nn.DataParallel(model, device_ids = deviceIds)\n\n # Get function handles of loss and metrics\n criterion = getattr(lossModule, configuration[\"loss\"])\n metrics = [getattr(metricModule, individualMetric) for individualMetric in configuration[\"metrics\"]]\n\n # Build Optimizer, Learning Rate Scheduler and delete every lines containing lr_scheduler for disabling scheduler\n trainiableParameters = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = configuration.initialize_object(\"optimizer\", torch.optim, trainiableParameters)\n learningRateScheduler = configuration.initialize_object(\"learningRateScheduler\", torch.optim.lr_scheduler, optimizer)\n\n trainer = Trainer(model, criterion, metrics, optimizer,\n configuration=configuration,\n device=device,\n dataLoader=dataLoader,\n validationDataLoader=validationDataLoader,\n learningRateScheduler=learningRateScheduler)\n\n trainer.train()\n '''\n\nif __name__ == \"__main__\":\n args = argparse.ArgumentParser(description=\"Script to train Graph Neural Network\")\n args.add_argument(\"-c\", \"--config\", default=None, type=str, help=\"Path to the configuration file (Default: None)\")\n args.add_argument(\"-r\", \"--resume\", default=None, type=str, help=\"Path to the latest checkpoint (Default: None)\")\n args.add_argument(\"-d\", \"--device\", default=None, type=str, help=\"Index of the GPU used (Default: None)\")\n\n\n configuration = ConfigParser.from_args(args)\n main(configuration)\n"
] |
[
[
"torch.round",
"numpy.random.seed",
"torch.sub",
"torch.autograd.set_detect_anomaly",
"torch.ones",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.set_default_dtype",
"torch.mean"
]
] |
SyrekGMR/Gradient-Descent-Optimisation
|
[
"a3b891108a8b4260d64eb8dfd7958fe686150dc6",
"a3b891108a8b4260d64eb8dfd7958fe686150dc6"
] |
[
"Evaluate.py",
"Optimisers/KFAC.py"
] |
[
"import torch\n\ndef evaluate(model, device, test_data_loader):\n model.eval()\n correct = 0\n total = 0\n\n with torch.no_grad():\n for images, labels in test_data_loader:\n images = images.to(device)\n labels = labels.to(device)\n\n output = model.forward(images)\n output = torch.max(output, 1)[-1]\n correct += torch.sum(output == labels).item()\n\n total += len(output)\n\n return correct/total\n",
"import math\n\nimport torch\nimport torch.optim as optim\nfrom torch import nn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass KFACOptimizer(optim.Optimizer):\n def __init__(self,\n model,\n lr=0.001,\n momentum=0.9,\n stat_decay=0.95,\n damping=0.001,\n kl_clip=0.001,\n weight_decay=0,\n TCov=10,\n TInv=100,\n batch_averaged=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n defaults = dict(lr=lr, momentum=momentum, damping=damping,\n weight_decay=weight_decay)\n # TODO (CW): KFAC optimizer now only support model as input\n super(KFACOptimizer, self).__init__(model.parameters(), defaults)\n self.CovAHandler = ComputeCovA()\n self.CovGHandler = ComputeCovG()\n self.batch_averaged = batch_averaged\n\n self.known_modules = {'Linear', 'Conv2d'}\n\n self.modules = []\n self.grad_outputs = {}\n\n self.model = model\n self._prepare_model()\n\n self.steps = 0\n\n self.m_aa, self.m_gg = {}, {}\n self.Q_a, self.Q_g = {}, {}\n self.d_a, self.d_g = {}, {}\n self.stat_decay = stat_decay\n\n self.kl_clip = kl_clip\n self.TCov = TCov\n self.TInv = TInv\n\n def _save_input(self, module, input):\n if torch.is_grad_enabled() and self.steps % self.TCov == 0:\n aa = self.CovAHandler(input[0].data, module)\n # Initialize buffers\n if self.steps == 0:\n self.m_aa[module] = torch.diag(aa.new(aa.size(0)).fill_(1))\n update_running_stat(aa, self.m_aa[module], self.stat_decay)\n\n def _save_grad_output(self, module, grad_input, grad_output):\n # Accumulate statistics for Fisher matrices\n if self.acc_stats and self.steps % self.TCov == 0:\n gg = self.CovGHandler(grad_output[0].data, module, self.batch_averaged)\n # Initialize buffers\n if self.steps == 0:\n self.m_gg[module] = torch.diag(gg.new(gg.size(0)).fill_(1))\n update_running_stat(gg, self.m_gg[module], self.stat_decay)\n\n def _prepare_model(self):\n count = 0\n print(self.model)\n print(\"=> We keep following layers in KFAC. \")\n for module in self.model.modules():\n classname = module.__class__.__name__\n # print('=> We keep following layers in KFAC. <=')\n if classname in self.known_modules:\n self.modules.append(module)\n module.register_forward_pre_hook(self._save_input)\n module.register_backward_hook(self._save_grad_output)\n print('(%s): %s' % (count, module))\n count += 1\n\n def _update_inv(self, m):\n \"\"\"Do eigen decomposition for computing inverse of the ~ fisher.\n :param m: The layer\n :return: no returns.\n \"\"\"\n eps = 1e-10 # for numerical stability\n self.d_a[m], self.Q_a[m] = torch.symeig(\n self.m_aa[m], eigenvectors=True)\n self.d_g[m], self.Q_g[m] = torch.symeig(\n self.m_gg[m], eigenvectors=True)\n\n self.d_a[m].mul_((self.d_a[m] > eps).float())\n self.d_g[m].mul_((self.d_g[m] > eps).float())\n\n @staticmethod\n def _get_matrix_form_grad(m, classname):\n \"\"\"\n :param m: the layer\n :param classname: the class name of the layer\n :return: a matrix form of the gradient. it should be a [output_dim, input_dim] matrix.\n \"\"\"\n if classname == 'Conv2d':\n p_grad_mat = m.weight.grad.data.view(m.weight.grad.data.size(0), -1) # n_filters * (in_c * kw * kh)\n else:\n p_grad_mat = m.weight.grad.data\n if m.bias is not None:\n p_grad_mat = torch.cat([p_grad_mat, m.bias.grad.data.view(-1, 1)], 1)\n return p_grad_mat\n\n def _get_natural_grad(self, m, p_grad_mat, damping):\n \"\"\"\n :param m: the layer\n :param p_grad_mat: the gradients in matrix form\n :return: a list of gradients w.r.t to the parameters in `m`\n \"\"\"\n # p_grad_mat is of output_dim * input_dim\n # inv((ss')) p_grad_mat inv(aa') = [ Q_g (1/R_g) Q_g^T ] @ p_grad_mat @ [Q_a (1/R_a) Q_a^T]\n v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]\n v2 = v1 / (self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) + damping)\n v = self.Q_g[m] @ v2 @ self.Q_a[m].t()\n if m.bias is not None:\n # we always put gradient w.r.t weight in [0]\n # and w.r.t bias in [1]\n v = [v[:, :-1], v[:, -1:]]\n v[0] = v[0].view(m.weight.grad.data.size())\n v[1] = v[1].view(m.bias.grad.data.size())\n else:\n v = [v.view(m.weight.grad.data.size())]\n\n return v\n\n def _kl_clip_and_update_grad(self, updates, lr):\n # do kl clip\n vg_sum = 0\n for m in self.modules:\n v = updates[m]\n vg_sum += (v[0] * m.weight.grad.data * lr ** 2).sum().item()\n if m.bias is not None:\n vg_sum += (v[1] * m.bias.grad.data * lr ** 2).sum().item()\n nu = min(1.0, math.sqrt(self.kl_clip / vg_sum))\n\n for m in self.modules:\n v = updates[m]\n m.weight.grad.data.copy_(v[0])\n m.weight.grad.data.mul_(nu)\n if m.bias is not None:\n m.bias.grad.data.copy_(v[1])\n m.bias.grad.data.mul_(nu)\n\n def _step(self, closure):\n # FIXME (CW): Modified based on SGD (removed nestrov and dampening in momentum.)\n # FIXME (CW): 1. no nesterov, 2. buf.mul_(momentum).add_(1 <del> - dampening </del>, d_p)\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0 and self.steps >= 20 * self.TCov:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)\n buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1, d_p)\n d_p = buf\n\n p.data.add_(-group['lr'], d_p)\n\n def step(self, closure=None):\n # FIXME(CW): temporal fix for compatibility with Official LR scheduler.\n group = self.param_groups[0]\n lr = group['lr']\n damping = group['damping']\n updates = {}\n for m in self.modules:\n classname = m.__class__.__name__\n if self.steps % self.TInv == 0:\n self._update_inv(m)\n p_grad_mat = self._get_matrix_form_grad(m, classname)\n v = self._get_natural_grad(m, p_grad_mat, damping)\n updates[m] = v\n self._kl_clip_and_update_grad(updates, lr)\n\n self._step(closure)\n self.steps += 1\n\nclass ComputeCovA:\n\n @classmethod\n def compute_cov_a(cls, a, layer):\n return cls.__call__(a, layer)\n\n @classmethod\n def __call__(cls, a, layer):\n if isinstance(layer, nn.Linear):\n cov_a = cls.linear(a, layer)\n elif isinstance(layer, nn.Conv2d):\n cov_a = cls.conv2d(a, layer)\n else:\n # FIXME(CW): for extension to other layers.\n # raise NotImplementedError\n cov_a = None\n\n return cov_a\n\n @staticmethod\n def conv2d(a, layer):\n batch_size = a.size(0)\n a = _extract_patches(a, layer.kernel_size, layer.stride, layer.padding)\n spatial_size = a.size(1) * a.size(2)\n a = a.view(-1, a.size(-1))\n if layer.bias is not None:\n a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)\n a = a/spatial_size\n # FIXME(CW): do we need to divide the output feature map's size?\n return a.t() @ (a / batch_size)\n\n @staticmethod\n def linear(a, layer):\n # a: batch_size * in_dim\n batch_size = a.size(0)\n if layer.bias is not None:\n a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)\n return a.t() @ (a / batch_size)\n\n\nclass ComputeCovG:\n\n @classmethod\n def compute_cov_g(cls, g, layer, batch_averaged=False):\n \"\"\"\n :param g: gradient\n :param layer: the corresponding layer\n :param batch_averaged: if the gradient is already averaged with the batch size?\n :return:\n \"\"\"\n # batch_size = g.size(0)\n return cls.__call__(g, layer, batch_averaged)\n\n @classmethod\n def __call__(cls, g, layer, batch_averaged):\n if isinstance(layer, nn.Conv2d):\n cov_g = cls.conv2d(g, layer, batch_averaged)\n elif isinstance(layer, nn.Linear):\n cov_g = cls.linear(g, layer, batch_averaged)\n else:\n cov_g = None\n\n return cov_g\n\n @staticmethod\n def conv2d(g, layer, batch_averaged):\n # g: batch_size * n_filters * out_h * out_w\n # n_filters is actually the output dimension (analogous to Linear layer)\n spatial_size = g.size(2) * g.size(3)\n batch_size = g.shape[0]\n g = g.transpose(1, 2).transpose(2, 3)\n g = try_contiguous(g)\n g = g.view(-1, g.size(-1))\n\n if batch_averaged:\n g = g * batch_size\n g = g * spatial_size\n cov_g = g.t() @ (g / g.size(0))\n\n return cov_g\n\n @staticmethod\n def linear(g, layer, batch_averaged):\n # g: batch_size * out_dim\n batch_size = g.size(0)\n\n if batch_averaged:\n cov_g = g.t() @ (g * batch_size)\n else:\n cov_g = g.t() @ (g / batch_size)\n return cov_g\n\n\ndef update_running_stat(aa, m_aa, stat_decay):\n # using inplace operation to save memory!\n m_aa *= stat_decay / (1 - stat_decay)\n m_aa += aa\n m_aa *= (1 - stat_decay)\n \n\n\n\ndef try_contiguous(x):\n if not x.is_contiguous():\n x = x.contiguous()\n\n return x\n\n\ndef _extract_patches(x, kernel_size, stride, padding):\n \"\"\"\n :param x: The input feature maps. (batch_size, in_c, h, w)\n :param kernel_size: the kernel size of the conv filter (tuple of two elements)\n :param stride: the stride of conv operation (tuple of two elements)\n :param padding: number of paddings. be a tuple of two elements\n :return: (batch_size, out_h, out_w, in_c*kh*kw)\n \"\"\"\n if padding[0] + padding[1] > 0:\n x = F.pad(x, (padding[1], padding[1], padding[0],\n padding[0])).data # Actually check dims\n x = x.unfold(2, kernel_size[0], stride[0])\n x = x.unfold(3, kernel_size[1], stride[1])\n x = x.transpose_(1, 2).transpose_(2, 3).contiguous()\n x = x.view(\n x.size(0), x.size(1), x.size(2),\n x.size(3) * x.size(4) * x.size(5))\n return x"
] |
[
[
"torch.sum",
"torch.no_grad",
"torch.max"
],
[
"torch.is_grad_enabled",
"torch.nn.functional.pad",
"torch.symeig",
"torch.zeros_like"
]
] |
TimoKuenstle/timeseries
|
[
"0e55710631f148c27c74b50dd7e960c7d4bf6601"
] |
[
"generate6.py"
] |
[
"# -*- coding: utf-8 -*-\nimport sugartensor as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n__author__ = 'njkim@jamonglab.com'\n\n\ntf.sg_verbosity(10)\n\n\n\nbatch_size = 100 \nnum_category = 10 \nnum_cont = 6 \nnum_dim = 30 \n\n\n\n\ntarget_num = tf.placeholder(dtype=tf.sg_intx, shape=batch_size)\n\ntarget_cval_1 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\n\ntarget_cval_2 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\ntarget_cval_3 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\ntarget_cval_4 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\ntarget_cval_5 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\ntarget_cval_6 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)\n\n\n\n\nz = (tf.ones(batch_size, dtype=tf.sg_intx) * target_num).sg_one_hot(depth=num_category)\n\n\nz = z.sg_concat(target=[target_cval_1.sg_expand_dims(), target_cval_2.sg_expand_dims(), target_cval_3.sg_expand_dims(), \n target_cval_4.sg_expand_dims(), target_cval_5.sg_expand_dims(), target_cval_6.sg_expand_dims()])\n\n\nz = z.sg_concat(target=tf.random_uniform((batch_size, num_dim-num_cont-num_category)))\n\n\n\n\nwith tf.sg_context(name='generator', size=(4, 1), stride=(2, 1), act='relu', bn=True):\n gen = (z.sg_dense(dim=1024)\n .sg_dense(dim=48*1*128)\n .sg_reshape(shape=(-1, 48, 1, 128))\n .sg_upconv(dim=64)\n .sg_upconv(dim=32)\n .sg_upconv(dim=num_cont, act='sigmoid', bn=False))\n\n\n\ndef run_generator(num, x1, x2, x3, x4, x5, x6, fig_name='sample.png', csv_name='sample.csv'):\n with tf.Session() as sess:\n tf.sg_init(sess)\n \n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint('asset/train'))\n\n \n imgs = sess.run(gen, {target_num: num,\n target_cval_1: x1,\n target_cval_2: x2,\n target_cval_3: x3,\n target_cval_4: x4,\n target_cval_5: x5,\n target_cval_6: x6})\n print(imgs.shape)\n\n \n _, ax = plt.subplots(10, 10, sharex=True, sharey=True)\n for i in range(10):\n for j in range(10):\n ax[i][j].plot(imgs[i * 10 + j, :, 0])\n pd.DataFrame(imgs[i * 10 + j, :, 0]).to_csv(\"asset/train/\" + csv_name)\n\n \n ax[i][j].set_axis_off()\n plt.savefig('asset/train/' + fig_name, dpi=600)\n tf.sg_info('Sample image saved to \"asset/train/%s\"' % fig_name)\n tf.sg_info('Sample csv saved to \"asset/train/%s\"' % csv_name)\n plt.close()\n\n\n\n\n\nrun_generator(np.random.randint(0, num_category, batch_size),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size),\n fig_name='fake.png', csv_name='fake.csv')\n\nrun_generator(np.arange(num_category).repeat(num_category),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size),\n np.random.uniform(0, 1, batch_size), np.random.uniform(0, 1, batch_size))\n\n\n\nfor i in range(10):\n run_generator(np.ones(batch_size) * i,\n np.linspace(0, 1, num_category).repeat(num_category),\n np.expand_dims(np.linspace(0, 1, num_category), axis=1).repeat(num_category, axis=1).T.flatten(),\n np.expand_dims(np.linspace(0, 1, num_category), axis=1).repeat(num_category, axis=1).T.flatten(),\n np.expand_dims(np.linspace(0, 1, num_category), axis=1).repeat(num_category, axis=1).T.flatten(),\n np.expand_dims(np.linspace(0, 1, num_category), axis=1).repeat(num_category, axis=1).T.flatten(),\n np.expand_dims(np.linspace(0, 1, num_category), axis=1).repeat(num_category, axis=1).T.flatten(),\n fig_name='sample%d.png' % i, csv_name='sample%d.csv' % i)\n"
] |
[
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.arange",
"numpy.linspace"
]
] |
shoaibahmed/pl-cnn
|
[
"75f06630c755168771d049b7dbca300a21f27267"
] |
[
"src/tests/svm.py"
] |
[
"import unittest\nimport theano\nimport theano.tensor as T\nimport numpy as np\n\nfrom layers.svm import SVMLayer\n\n\ndef compile_objective():\n\n scores = T.matrix()\n y_truth = T.ivector()\n\n objective, acc = SVMLayer.objective(scores, y_truth)\n\n return theano.function([scores, y_truth], [objective, acc])\n\n\ndef objective_py(scores, y_truth):\n\n objective, acc = 0, 0\n n_samples = scores.shape[0]\n n_classes = scores.shape[1]\n for i in range(n_samples):\n # find maximally violated constraint\n loss_augmented = np.array([scores[i, y] + int(y != y_truth[i])\n for y in range(n_classes)])\n y_star = np.argmax(loss_augmented)\n\n # update metrics\n delta = int(y_truth[i] != y_star)\n acc += int(y_truth[i] == np.argmax(scores[i]))\n objective += delta + scores[i, y_star] - scores[i, y_truth[i]]\n\n return objective, acc\n\n\nclass TestSVM(unittest.TestCase):\n\n def setUp(self):\n\n self.n_samples = 20\n self.n_classes = 100\n self.k = 5\n\n def test_objective_svm(self):\n \"\"\" Test objective function of standard top-1 SVM\n \"\"\"\n\n objective_theano = compile_objective()\n\n scores = np.random.normal(size=(self.n_samples, self.n_classes)) \\\n .astype(np.float32)\n y_truth = np.random.randint(0, self.n_classes, size=self.n_samples) \\\n .astype(np.int32)\n\n objective_1, acc_1 = objective_theano(scores, y_truth)\n objective_2, acc_2 = objective_py(scores, y_truth)\n\n assert np.isclose(objective_1, objective_2)\n assert np.isclose(acc_1, acc_2)\n"
] |
[
[
"numpy.random.normal",
"numpy.random.randint",
"numpy.isclose",
"numpy.argmax"
]
] |
isabella232/flax
|
[
"39a04e82d6f97ef90c59425599018f2b9df8b6ea"
] |
[
"examples/wip/moco/train_moco.py"
] |
[
"# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Momentum Contrast for Unsupervised Visual Representation Learning.\n\"\"\"\n\nimport ast\nimport functools\nimport time\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom flax import jax_utils\nfrom flax import optim\nfrom moco import imagenet_data_source\nfrom moco import model_resnet\nfrom flax.metrics import tensorboard\nimport flax.nn\nfrom flax.training import common_utils\nfrom flax.training import lr_schedule\n\nimport jax\nfrom jax import lax\nimport jax.nn\nimport jax.numpy as jnp\nimport tensorflow as tf\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help=('Directory to store model data'))\n\nflags.DEFINE_integer(\n 'batch_size', default=2048,\n help=('Batch size for training.'))\n\nflags.DEFINE_integer(\n 'eval_batch_size', default=8192,\n help=('Batch size for evaluation.'))\n\nflags.DEFINE_integer(\n 'num_moco_epochs', default=200,\n help=('Number of MoCo training epochs.'))\n\nflags.DEFINE_integer(\n 'num_clf_epochs', default=100,\n help=('Number of linear classifier training epochs.'))\n\nflags.DEFINE_float(\n 'moco_learning_rate', default=0.03,\n help=('The learning rate for the MoCo optimizer.'))\n\nflags.DEFINE_float(\n 'clf_learning_rate', default=30.0,\n help=('The learning rate for the classifier optimizer.'))\n\nflags.DEFINE_float(\n 'sgd_momentum', default=0.9,\n help=('SGD optimizer momentum.'))\n\nflags.DEFINE_bool(\n 'sgd_nesterov', default=True,\n help=('Use SGD Nesterov momentum.'))\n\nflags.DEFINE_string(\n 'lr_moco_sched_steps', default='[[120, 0.1], [160, 0.01]]',\n help=('MoCo learning rate schedule steps as a Python list; '\n '[[step1_epoch, step1_lr_scale], '\n '[step2_epoch, step2_lr_scale], ...].'))\n\nflags.DEFINE_string(\n 'lr_clf_sched_steps', default='[[60, 0.2], [75, 0.04], [90, 0.008]]',\n help=('Linear classifier learning rate schedule steps as a Python list; '\n '[[step1_epoch, step1_lr_scale], '\n '[step2_epoch, step2_lr_scale], ...].'))\n\nflags.DEFINE_float(\n 'lr_moco_sched_warmup', default=5.0,\n help=('The length of the linear learning rate ramp-up used at the start '\n 'of MoCo training.'))\n\nflags.DEFINE_float(\n 'lr_clf_sched_warmup', default=0.0,\n help=('The length of the linear learning rate ramp-up used at the start '\n 'of linear classifier training.'))\n\nflags.DEFINE_float(\n 'moco_l2_reg', default=0.0001,\n help=('The amount of L2-regularization to apply while training MoCo.'))\n\nflags.DEFINE_float(\n 'clf_l2_reg', default=0.0,\n help=('The amount of L2-regularization to apply while training linear '\n 'classifier.'))\n\nflags.DEFINE_string(\n 'arch', default='resnet50',\n help=('Network architecture (resnet50, resnet101 or resnet152).'))\n\nflags.DEFINE_float(\n 'moco_momentum', default=0.999,\n help=('MoCo momentum'))\n\nflags.DEFINE_integer(\n 'emb_size', default=128,\n help=('Size of embedding generated by MoCo network during training.'))\n\nflags.DEFINE_float(\n 'moco_temperature', default=0.07,\n help=('Softmax temperature.'))\n\nflags.DEFINE_integer(\n 'dictionary_size', default=65536,\n help=('Size of dictionary of keys used during MoCo training.'))\n\nflags.DEFINE_integer(\n 'rng', default=0,\n help=('Random seed for network initialization and training.'))\n\n\n@functools.partial(jax.jit, static_argnums=(1, 2, 3))\ndef create_model(key, batch_size, image_size, module):\n input_shape = (batch_size, image_size, image_size, 3)\n with flax.nn.stateful() as init_state:\n with flax.nn.stochastic(jax.random.PRNGKey(0)):\n (_, _), initial_params = module.init_by_shape(\n key, [(input_shape, jnp.float32)])\n model = flax.nn.Model(module, initial_params)\n return model, init_state\n\n\n@functools.partial(jax.jit, static_argnums=(1, 2, 3))\ndef create_linear_classifier(key, batch_size, feature_size, num_classes):\n input_shape = (batch_size, feature_size)\n module = flax.nn.Dense.partial(features=num_classes)\n with flax.nn.stateful():\n _, initial_params = module.init_by_shape(\n key, [(input_shape, jnp.float32)])\n model = flax.nn.Model(module, initial_params)\n return model\n\n\ndef cross_entropy_loss(logits, labels):\n log_softmax_logits = jax.nn.log_softmax(logits)\n num_classes = log_softmax_logits.shape[-1]\n one_hot_labels = common_utils.onehot(labels, num_classes)\n return -jnp.mean(jnp.sum(one_hot_labels * log_softmax_logits, axis=-1))\n\n\ndef compute_metrics(logits, labels):\n loss = cross_entropy_loss(logits, labels)\n error_rate = jnp.mean(jnp.argmax(logits, -1) != labels)\n metrics = {\n 'loss': loss,\n 'error_rate': error_rate,\n }\n metrics = common_utils.pmean(metrics)\n return metrics\n\n\ndef compute_train_moco_metrics(moco_loss_per_sample):\n metrics = {\n 'moco_loss': moco_loss_per_sample.mean(),\n }\n metrics = common_utils.pmean(metrics)\n return metrics\n\n\ndef normalize_embeddings(x, eps=1e-6):\n # A note for those who are interested:\n # For some reason I thought it would be beneficial to stop the gradient\n # on the L2 norm. Turns out this is a bad idea and makes the results\n # substantially worse. Don't do this.\n l2_norm = jnp.sqrt(jnp.square(x).sum(axis=1, keepdims=True))\n return x / (l2_norm + eps)\n\n\ndef moco_loss(emb_query, emb_key, moco_dictionary, temperature):\n \"\"\"Compute MoCo loss.\n\n Args:\n emb_query: embedding predicted by query network\n emb_key: embedding predicted by key network\n moco_dictionary: dictionary of embeddings from prior epochs\n temperature: softmax temperature\n\n Returns:\n MoCo loss\n \"\"\"\n # Positive logits\n # pos_logits.shape = (n_samples, 1)\n pos_logits = (emb_query * emb_key).sum(axis=1, keepdims=True) / temperature\n\n # Negative logits = (n_samples, n_codes)\n neg_logits = jnp.dot(emb_query, moco_dictionary.T) / temperature\n\n # We now want to:\n # - append pos_logits and neg_logits along axis 1\n # - compute negative log_softmax to get cross-entropy loss\n # - use the cross-entropy of the positive samples (position 0 in axis 1)\n logits = jnp.append(pos_logits, neg_logits, axis=1)\n moco_loss_per_sample = -jax.nn.log_softmax(logits)[:, 0]\n\n return moco_loss_per_sample\n\n\ndef moco_key_step(model_key, state_key, batch):\n \"\"\"MoCo train step part 1; predict embeddings given key network.\n\n We separate our MoCo training step into two parts.\n This first part uses the key network to predict embeddings.\n The samples that are used have to be shuffled to prevent the network\n from cheating using differing batch stats between devices.\n (see https://arxiv.org/abs/1911.05722, sec 3.3)\n\n Args:\n model_key: key network\n state_key: batch stats and state for key network\n batch: batch of samples\n\n Returns:\n embeddings for samples in `batch`\n \"\"\"\n # Average batch stats across devices/hosts\n state_key = common_utils.pmean(state_key)\n\n # emb_key.shape = (n_samples, emb_size)\n x_key = batch['x_key']\n with flax.nn.stateful(state_key) as new_state_key:\n emb_key, _ = model_key(x_key, train=True)\n emb_key = jax.lax.stop_gradient(emb_key)\n emb_key = normalize_embeddings(emb_key)\n return emb_key, new_state_key\n\n\ndef moco_train_step(optimizer_query, state_query, model_key,\n batch, moco_dictionary, n_devices,\n moco_temperature, learning_rate_fn, l2_reg,\n moco_momentum):\n \"\"\"MoCo training step part 2.\n\n Given the keys generated in part 1, part 2\n uses the query network to predict embeddings for the same samples as in\n part 1.\n The MoCo loss encourages the query network to predict an\n embedding that is more similar to the corresponding key network\n embedding than to any of the embeddings in the MoCo dictionary\n (the paper uses the term dictionary).\n\n Args:\n optimizer_query: query network optimizer/model\n state_query: query network state / batch stats\n model_key: key network\n batch: data batch\n moco_dictionary: dictionary of embeddings from key network\n n_devices: number of devices in use\n moco_temperature: softmax temperature for computing MoCo loss\n learning_rate_fn: function fn(step) -> lr that defines learning rate\n schedule\n l2_reg: L2 regularization coefficient\n moco_momentum: MoCo key network momentum parameter\n\n Returns:\n (new_optimizer_query, new_state_query, metrics, model_key, emb_key_all)\n new_optimizer_query: query network optimizer and model after step\n new_state_query: query network state / batch stats after step\n metrics: MoCo training metrics\n model_key: key network model (used to update query network)\n emb_key_all: key network embeddings concatenated across devices\n \"\"\"\n def loss_fn(model_query):\n \"\"\"loss function used for training.\"\"\"\n\n emb_key = batch['emb_key']\n x_query = batch['query_image']\n\n # Get predicted embeddings from query network\n with flax.nn.stateful(state_query) as new_state_query:\n emb_query, _ = model_query(x_query, train=True)\n emb_query = normalize_embeddings(emb_query)\n # emb_query.shape = (n_samples, emb_size)\n\n # Compute per-sample MoCo loss\n moco_loss_per_sample = moco_loss(emb_query, emb_key, moco_dictionary,\n moco_temperature)\n loss = moco_loss_per_sample.mean()\n\n # Apply L2 regularization\n if l2_reg > 0:\n weight_penalty_params = jax.tree_leaves(model_query.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n\n return loss, (new_state_query, moco_loss_per_sample, emb_key)\n\n step = optimizer_query.state.step\n lr = learning_rate_fn(step)\n new_optimizer_query, _, (new_state_query, moco_loss_per_sample,\n emb_key) = \\\n optimizer_query.optimize(loss_fn, learning_rate=lr)\n\n # Update key network - exponential moving average of query network\n model_key_params = jax.tree_multimap(\n lambda p_k, p_q: p_k * moco_momentum + p_q * (1.0 - moco_momentum),\n model_key.params, new_optimizer_query.target.params\n )\n model_key = model_key.replace(params=model_key_params)\n\n # Compute metrics\n metrics = compute_train_moco_metrics(moco_loss_per_sample)\n metrics['learning_rate'] = lr\n\n # In this step we use `lax.pswapaxes` to concatenate the embeddings\n # generated by the key network *across multiple hosts*\n emb_rep = [n_devices] + [1] * emb_key.ndim\n emb_key = emb_key[None, ...]\n emb_key = jnp.tile(emb_key, emb_rep)\n emb_key_all = lax.pswapaxes(emb_key, 'batch', 0)\n\n # Return the concatenated key embeddings\n return new_optimizer_query, new_state_query, metrics, model_key, emb_key_all\n\n\ndef classifier_train_step(clf_feat_optimizer, model_moco, state_moco,\n batch, learning_rate_fn, l2_reg):\n \"\"\"Linear classifier training step.\"\"\"\n # Average batch stats across devices/hosts\n state_moco = common_utils.pmean(state_moco)\n\n # Get data from batch\n sup_x = batch['image']\n\n # Predict features (ignore embeddings)\n with flax.nn.stateful(state_moco, mutable=False):\n _, features = model_moco(sup_x, train=False)\n features = jax.lax.stop_gradient(features)\n\n def features_loss_fn(model_clf):\n \"\"\"loss function used for training.\"\"\"\n logits = model_clf(features)\n loss = cross_entropy_loss(logits, batch['label'])\n\n if l2_reg > 0:\n weight_penalty_params = jax.tree_leaves(model_clf.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (logits,)\n\n # Feature classifier\n feat_step = clf_feat_optimizer.state.step\n feat_lr = learning_rate_fn(feat_step)\n new_clf_feat_optimizer, _, (feat_logits,) = clf_feat_optimizer.optimize(\n features_loss_fn, learning_rate=feat_lr)\n\n feat_metrics = compute_metrics(feat_logits, batch['label'])\n feat_metrics['learning_rate'] = feat_lr\n\n return new_clf_feat_optimizer, feat_metrics\n\n\ndef eval_step(model_moco, state_moco, feat_clf_model, batch):\n \"\"\"Linear classifier evaluation step.\"\"\"\n # Average batch stats across devices/hosts\n state_moco = common_utils.pmean(state_moco)\n # Use MoCo network to predict features\n with flax.nn.stateful(state_moco, mutable=False):\n _, features = model_moco(batch['image'], train=False)\n # Use linear model to predict class logits\n feat_logits = feat_clf_model(features)\n feat_metrics = compute_metrics(feat_logits, batch['label'])\n return feat_metrics\n\n\ndef train(module,\n model_dir,\n batch_size,\n eval_batch_size,\n num_moco_epochs,\n num_clf_epochs,\n moco_learning_rate,\n clf_learning_rate,\n sgd_momentum=0.9,\n sgd_nesterov=True,\n make_moco_lr_fun=None,\n make_clf_lr_fun=None,\n moco_l2_reg=0.0001,\n clf_l2_reg=0.0,\n feature_size=64*8*4,\n moco_momentum=0.999,\n emb_size=128,\n moco_temperature=0.07,\n dictionary_size=65536,\n run_seed=0):\n \"\"\"Train MoCo model.\"\"\"\n if make_moco_lr_fun is None:\n def make_moco_lr_fun(base_lr, steps_per_epoch): # pylint: disable=function-redefined\n return lr_schedule.create_stepped_learning_rate_schedule(\n base_lr, steps_per_epoch, [[120, 0.1], [160, 0.01]])\n\n if make_clf_lr_fun is None:\n def make_clf_lr_fun(base_lr, steps_per_epoch): # pylint: disable=function-redefined\n return lr_schedule.create_stepped_learning_rate_schedule(\n base_lr, steps_per_epoch, [[60, 0.2], [75, 0.04], [90, 0.008]])\n\n if jax.host_id() == 0:\n summary_writer = tensorboard.SummaryWriter(model_dir)\n else:\n summary_writer = None\n\n #\n #\n # If using more than 1 host, warn the user\n #\n #\n\n if jax.host_count() > 1:\n logging.info('WARNING: the all_to_all collective used by this program is '\n 'not yet supported in multi-host environments')\n\n train_rng = jax.random.PRNGKey(run_seed)\n (init_moco_rng, init_clf_rng, init_dictionary_rng,\n train_rng) = jax.random.split(train_rng, num=4)\n\n if batch_size % jax.device_count() > 0:\n raise ValueError('Train batch size must be divisible by the number '\n 'of devices')\n if eval_batch_size % jax.device_count() > 0:\n raise ValueError('Eval batch size must be divisible by the number '\n 'of devices')\n local_batch_size = batch_size // jax.host_count()\n local_eval_batch_size = eval_batch_size // jax.host_count()\n n_devices = jax.device_count()\n n_local_devices = jax.local_device_count()\n\n device_batch_size = batch_size // n_devices\n\n image_size = 224\n data_source = imagenet_data_source.load_imagenet(\n train_batch_size=local_batch_size,\n eval_batch_size=local_eval_batch_size,\n greyscale_prob=0.1)\n\n n_train = data_source.n_train\n train_moco_ds = data_source.train_moco_ds\n train_clf_ds = data_source.train_clf_ds\n eval_ds = data_source.test_ds\n n_eval = data_source.n_test\n\n logging.info('DATA: |train|=%d, |eval|=%d', data_source.n_train, n_eval)\n\n steps_per_epoch = n_train // batch_size\n steps_per_eval = n_eval // eval_batch_size\n num_moco_steps = steps_per_epoch * num_moco_epochs\n num_clf_steps = steps_per_epoch * num_clf_epochs\n\n logging.info('Loaded dataset')\n\n #\n # Create query model\n #\n model_query, state_query = create_model(\n init_moco_rng, device_batch_size, image_size, module)\n state_query = jax_utils.replicate(state_query)\n\n # Create linear classifier\n feat_model_clf = create_linear_classifier(\n init_clf_rng, device_batch_size, feature_size, data_source.n_classes)\n\n # Randomly initialise dictionary\n moco_dictionary = jax.random.normal(\n init_dictionary_rng, (dictionary_size, emb_size), dtype=jnp.float32)\n moco_dictionary = normalize_embeddings(moco_dictionary)\n logging.info('Built model')\n\n #\n # Create optimizer\n #\n\n optimizer_def = optim.Momentum(learning_rate=moco_learning_rate,\n beta=sgd_momentum, nesterov=sgd_nesterov)\n optimizer_query = optimizer_def.create(model_query)\n optimizer_query = optimizer_query.replicate()\n del model_query # don't keep a copy of the initial model\n\n feat_clf_optimizer_def = optim.Momentum(\n learning_rate=clf_learning_rate, beta=sgd_momentum,\n nesterov=sgd_nesterov)\n feat_clf_optimizer = feat_clf_optimizer_def.create(feat_model_clf)\n feat_clf_optimizer = feat_clf_optimizer.replicate()\n logging.info('Built optimizer')\n\n #\n # Learning rate schedule\n #\n\n base_moco_learning_rate = moco_learning_rate * batch_size / 256.\n base_clf_learning_rate = clf_learning_rate * batch_size / 256.\n moco_learning_rate_fn = make_moco_lr_fun(\n base_moco_learning_rate, steps_per_epoch)\n clf_learning_rate_fn = make_clf_lr_fun(\n base_clf_learning_rate, steps_per_epoch)\n\n # The key model is a replica of the query model. Since Flax models are\n # immutable, we can start with the query model\n model_key = optimizer_query.target\n # Replicate batch stats\n state_key = jax.tree_map(lambda x: x, state_query)\n\n # Set up epoch and step counter\n # Load existing checkpoint if available\n moco_epoch = 1\n clf_epoch = 1\n moco_step = 0\n clf_step = 0\n\n #\n # Training and eval functions\n #\n p_moco_key_step = jax.pmap(\n functools.partial(moco_key_step),\n axis_name='batch')\n p_moco_train_step = jax.pmap(\n functools.partial(moco_train_step, n_devices=n_devices,\n moco_temperature=moco_temperature,\n learning_rate_fn=moco_learning_rate_fn,\n l2_reg=moco_l2_reg,\n moco_momentum=moco_momentum),\n axis_name='batch')\n p_classifier_train_step = jax.pmap(\n functools.partial(classifier_train_step,\n learning_rate_fn=clf_learning_rate_fn,\n l2_reg=clf_l2_reg),\n axis_name='batch')\n p_eval_step = jax.pmap(\n functools.partial(eval_step),\n axis_name='batch')\n\n # Create MoCo dataset batch iterator\n train_moco_it = iter(train_moco_ds)\n\n #\n # Training loop\n #\n\n logging.info('Training MoCo...')\n\n epoch_metrics_moco = []\n t1 = time.time()\n while moco_step < num_moco_steps:\n (train_rng, shuffle_rng) = jax.random.split(train_rng, num=2)\n\n batch = next(train_moco_it)\n # TF to NumPy\n batch = jax.tree_map(lambda x: x._numpy(), batch) # pylint: disable=protected-access\n\n # Compute key embeddings\n # We have to shuffle the batch to prevent the network from cheating using\n # batch stats\n shuffle_forward = jax.random.shuffle(\n shuffle_rng, jnp.arange(local_batch_size))\n shuffle_backward = jnp.zeros((local_batch_size,), dtype=int)\n shuffle_backward = jax.ops.index_update(\n shuffle_backward, shuffle_forward, jnp.arange(local_batch_size))\n\n key_batch = dict(x_key=batch['key_image'][shuffle_forward, ...])\n key_batch_sharded = common_utils.shard(key_batch)\n emb_key, state_key = p_moco_key_step(\n model_key, state_key, key_batch_sharded)\n emb_key = emb_key.reshape((-1, emb_size))\n emb_key = emb_key[shuffle_backward, ...]\n\n #\n # Main MoCo training step\n #\n moco_batch = batch.copy()\n moco_batch['emb_key'] = emb_key\n sharded_moco_batch = common_utils.shard(moco_batch)\n\n # Repeat the MoCo dictionary across shards\n sharded_dict = jnp.repeat(moco_dictionary[None, ...], n_local_devices,\n axis=0)\n\n # The main train step function is applied slightly differently in\n # multi-host environments\n optimizer_query, state_query, metrics_moco, model_key, code_batch = \\\n p_moco_train_step(optimizer_query, state_query, model_key,\n sharded_moco_batch, sharded_dict)\n code_batch = code_batch[0].reshape((-1, emb_size))\n\n moco_dictionary = jnp.append(\n code_batch, moco_dictionary, axis=0)[:dictionary_size]\n\n epoch_metrics_moco.append(metrics_moco)\n if (moco_step + 1) % steps_per_epoch == 0:\n epoch_metrics_moco = common_utils.get_metrics(epoch_metrics_moco)\n train_epoch_metrics = jax.tree_map(lambda x: x.mean(),\n epoch_metrics_moco)\n if summary_writer is not None:\n for key, vals in epoch_metrics_moco.items():\n tag = 'train_%s' % key\n for i, val in enumerate(vals):\n summary_writer.scalar(tag, val, moco_step - len(vals) + i + 1)\n\n epoch_metrics_moco = []\n\n t2 = time.time()\n\n logging.info(\n 'MoCo EPOCH %d: (took %.3fs): MoCo loss=%.6f',\n moco_epoch, t2 - t1, train_epoch_metrics['moco_loss'])\n\n t1 = t2\n\n if summary_writer is not None:\n summary_writer.flush()\n\n moco_epoch += 1\n\n moco_step += 1\n\n del train_moco_it\n\n #\n #\n # Unsupervised MoCo training complete\n # Train classifier\n #\n #\n\n logging.info('Training Linear Classifier...')\n\n train_clf_it = iter(train_clf_ds)\n eval_iter = iter(eval_ds)\n\n epoch_feat_metrics = []\n t1 = time.time()\n while clf_step < num_clf_steps:\n batch = next(train_clf_it)\n # TF to NumPy\n batch = jax.tree_map(lambda x: x._numpy(), batch) # pylint: disable=protected-access\n batch = common_utils.shard(batch)\n\n feat_clf_optimizer, feat_metrics = p_classifier_train_step(\n feat_clf_optimizer, model_key, state_key, batch)\n\n epoch_feat_metrics.append(feat_metrics)\n if (clf_step + 1) % steps_per_epoch == 0:\n epoch_feat_metrics = common_utils.get_metrics(epoch_feat_metrics)\n train_epoch_feat_metrics = jax.tree_map(lambda x: x.mean(),\n epoch_feat_metrics)\n if summary_writer is not None:\n for key, vals in epoch_feat_metrics.items():\n tag = 'train_feat_%s' % key\n for i, val in enumerate(vals):\n summary_writer.scalar(tag, val, clf_step - len(vals) + i + 1)\n\n epoch_feat_metrics = []\n eval_feat_metrics = []\n for _ in range(steps_per_eval):\n eval_batch = next(eval_iter)\n # TF to NumPy\n eval_batch = jax.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access\n # Shard across local devices\n eval_batch = common_utils.shard(eval_batch)\n feat_metrics = p_eval_step(\n model_key, state_key, feat_clf_optimizer.target, eval_batch)\n eval_feat_metrics.append(feat_metrics)\n eval_feat_metrics = common_utils.get_metrics(eval_feat_metrics)\n eval_epoch_feat_metrics = jax.tree_map(lambda x: x.mean(),\n eval_feat_metrics)\n\n t2 = time.time()\n\n logging.info(\n 'Linear classifier EPOCH %d: (took %.3fs): TRAIN FEAT loss=%.6f, '\n 'err=%.3f; EVAL FEAT loss=%.6f, err=%.3f',\n clf_epoch, t2 - t1, train_epoch_feat_metrics['loss'],\n train_epoch_feat_metrics['error_rate'] * 100.0,\n eval_epoch_feat_metrics['loss'],\n eval_epoch_feat_metrics['error_rate'] * 100.0,\n )\n\n t1 = t2\n\n if summary_writer is not None:\n summary_writer.scalar('eval_feat_loss',\n eval_epoch_feat_metrics['loss'], clf_epoch)\n summary_writer.scalar('eval_feat_error_rate',\n eval_epoch_feat_metrics['error_rate'], clf_epoch)\n summary_writer.flush()\n\n clf_epoch += 1\n\n clf_step += 1\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Make sure tf does not allocate gpu memory.\n tf.config.experimental.set_visible_devices([], 'GPU')\n\n emb_size = FLAGS.emb_size\n\n if FLAGS.arch == 'resnet50':\n module = model_resnet.ResNet50.partial(num_outputs=emb_size)\n feature_size = 64 * 8 * 4\n elif FLAGS.arch == 'resnet101':\n module = model_resnet.ResNet101.partial(num_outputs=emb_size)\n feature_size = 64 * 8 * 4\n elif FLAGS.arch == 'resnet152':\n module = model_resnet.ResNet152.partial(num_outputs=emb_size)\n feature_size = 64 * 8 * 4\n else:\n raise ValueError\n\n if FLAGS.lr_moco_sched_steps:\n lr_moco_sched_steps = ast.literal_eval(FLAGS.lr_moco_sched_steps)\n else:\n lr_moco_sched_steps = [[120, 0.1], [160, 0.01]]\n\n if FLAGS.lr_clf_sched_steps:\n lr_clf_sched_steps = ast.literal_eval(FLAGS.lr_clf_sched_steps)\n else:\n lr_clf_sched_steps = [[60, 0.2], [75, 0.04], [90, 0.008]]\n\n def make_moco_lr_fun(base_lr, steps_per_epoch):\n return lr_schedule.create_stepped_learning_rate_schedule(\n base_lr, steps_per_epoch, lr_moco_sched_steps,\n warmup_length=FLAGS.lr_moco_sched_warmup)\n\n def make_clf_lr_fun(base_lr, steps_per_epoch):\n return lr_schedule.create_stepped_learning_rate_schedule(\n base_lr, steps_per_epoch, lr_clf_sched_steps,\n warmup_length=FLAGS.lr_clf_sched_warmup)\n\n train(module,\n model_dir=FLAGS.model_dir,\n batch_size=FLAGS.batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n num_moco_epochs=FLAGS.num_moco_epochs,\n num_clf_epochs=FLAGS.num_clf_epochs,\n moco_learning_rate=FLAGS.moco_learning_rate,\n clf_learning_rate=FLAGS.clf_learning_rate,\n sgd_momentum=FLAGS.sgd_momentum,\n sgd_nesterov=FLAGS.sgd_nesterov,\n make_moco_lr_fun=make_moco_lr_fun,\n make_clf_lr_fun=make_clf_lr_fun,\n moco_l2_reg=FLAGS.moco_l2_reg,\n clf_l2_reg=FLAGS.clf_l2_reg,\n feature_size=feature_size,\n moco_momentum=FLAGS.moco_momentum,\n emb_size=emb_size,\n moco_temperature=FLAGS.moco_temperature,\n dictionary_size=FLAGS.dictionary_size,\n run_seed=FLAGS.rng)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] |
[
[
"tensorflow.config.experimental.set_visible_devices"
]
] |
georg-wenzel/ml-data-smell-detection
|
[
"7dddd401ca1f1a830dfd8b00760659911e5b1086",
"7dddd401ca1f1a830dfd8b00760659911e5b1086"
] |
[
"WebApp/main/views_datasets.py",
"WebApp/main/utility/TensorflowUtility.py"
] |
[
"from django.shortcuts import render, redirect\nfrom main.forms import AddDatasetForm, EditDatasetForm, DeleteDatasetForm, EditColumnFormSet\nfrom django.http import HttpResponseBadRequest\nfrom main.utility.DatabaseUtility import safe_get\nfrom main.utility import StringUtility\nfrom main.models import Dataset, Column, Agent\nfrom django import forms\nimport pandas\nfrom django.contrib import messages\n\n# views for dataset-specific calls\n\n# view which lists all datasets\ndef datasets(request):\n if not request.user.is_authenticated:\n return redirect('/user/login')\n \n # Create form for adding and deleting a dataset\n form = AddDatasetForm()\n delete_form = DeleteDatasetForm()\n # add a hidden id field to the dataset deletion form \n # (this will be filled when a user clicks the button on any specific dataset)\n delete_form.fields['id'].widget = forms.HiddenInput()\n datasets = Dataset.objects.filter(user=request.user)\n return render(request, 'main/datasets.html', {\"username\": request.user.username, \n \"form\": form, \"datasets\": datasets, \"deleteForm\": delete_form})\n\n# view which creates a new dataset \ndef add_dataset(request):\n # authentication\n if not request.user.is_authenticated:\n return redirect('/user/login')\n\n # must be submitted as post\n if request.method != \"POST\":\n return redirect('/datasets')\n\n #get the data from the post request into an AddDatasetForm\n form = AddDatasetForm(request.POST, request.FILES)\n\n #validate data, add user and store if file is valid\n if(form.is_valid):\n #ignore files that are not content type text/csv\n if not request.FILES['upload'].name.endswith(\".csv\"):\n messages.add_message(request, messages.ERROR, \"Only .csv files are supported for uploading.\")\n return redirect('/datasets')\n\n #store but dont commit\n dataset = form.save(commit=False)\n \n #set user to authenticated user\n dataset.user = request.user\n #open csv file\n try:\n csv = pandas.read_csv(dataset.upload, index_col=False)\n dataset.row_length = csv.shape[0]\n dataset.col_length = csv.shape[1]\n dataset.save()\n\n #if user checked the \"has headers\", attempt to extract column types and headers from the dataset\n if form.cleaned_data['has_headers']:\n for name, dtype in csv.dtypes.iteritems():\n c = Column(name=name,dtype=dtype,dataset=dataset)\n c.save()\n #otherwise, store them as undefined\n else:\n for _ in range(csv.shape[1]):\n c = Column(name=\"undefined\",dtype=\"undefined\",dataset=dataset)\n c.save()\n\n #redirect to dataset list\n return redirect('/datasets')\n except SystemExit:\n raise\n except:\n messages.add_message(request, messages.ERROR, \"There was an error parsing your .csv file.\")\n return redirect('/datasets')\n \n# view which edits an existing dataset / displays details\ndef edit_dataset(request, id=1):\n if not request.user.is_authenticated:\n return redirect('/user/login')\n\n #get dataset with the given id\n dataset = safe_get(Dataset, id=id)\n\n if not dataset:\n messages.add_message(request, messages.ERROR, \"Could not find dataset.\")\n return redirect('/datasets')\n\n #check if user is authorized to see this dataset\n if not dataset.user == request.user:\n messages.add_message(request, messages.ERROR, StringUtility.ERR_UNAUTHORIZED(\"dataset\"))\n return redirect('/datasets')\n\n #if not post request\n if request.method != \"POST\":\n #provide an edit form (and edit column form set) and return the view\n form = EditDatasetForm(instance=dataset, prefix='dataset')\n formset = EditColumnFormSet(queryset=Column.objects.filter(dataset=dataset), prefix='columns')\n\n return render(request, 'main/editdataset.html', {\"username\": request.user.username, \n \"dataset\": dataset, \"form\": form, \"colforms\": formset})\n\n\n #if post request, get the data from the form and formset\n form = EditDatasetForm(request.POST, instance=dataset, prefix='dataset')\n formset = EditColumnFormSet(request.POST, prefix='columns')\n\n #update form and formset if valid\n if form.is_valid:\n form.save()\n if formset.is_valid:\n instances = formset.save(commit=False)\n for f in instances:\n f.dataset = dataset\n f.save()\n for f in formset.deleted_objects:\n f.delete()\n\n #return to datasets view\n return redirect('/datasets')\n\n# view which deletes a dataset\ndef delete_dataset(request):\n #must be called via post\n if request.method != \"POST\":\n return redirect('/datasets')\n\n #must be called by logged in user\n if not request.user.is_authenticated:\n return redirect('/user/login')\n\n #get dataset with this id\n form = DeleteDatasetForm(request.POST)\n #delete if dataset exists and owner is equal to user calling the POST\n if form.is_valid():\n dataset = safe_get(Dataset, pk=form.cleaned_data['id'])\n if dataset:\n if(dataset.user == request.user):\n #we can only delete the dataset if it has not been used in the training of agents\n agent_datasets = Agent.objects.filter(dataset = dataset)\n if len(agent_datasets) == 0:\n dataset.upload.delete()\n dataset.delete()\n else:\n #build error string\n err = \"This dataset is used to train the following agents: \" + (\", \".join([a.name for a in agent_datasets]))\n err += \". Please first remove the corresponding agents.\"\n #show error\n messages.add_message(request, messages.ERROR, err)\n \n #either way, redirect to the datasets page afterwards\n return redirect('/datasets')",
"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.utils.class_weight import compute_class_weight\n\n# Utility functions for TensorFlow\n\n#return an in-memory version of the given .csv file (through pandas)\ndef get_csv(file):\n return pd.read_csv(file)\n\n#return an encoder fitted on the given pandas x column\ndef get_encoder(x):\n #fit encoder to the x column\n encoder = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n encoder.fit_on_texts(x)\n return encoder\n\n#encode a column of x data (by mapping to sequences and padding)\ndef encode_x(encoder, x):\n enc_x = encoder.texts_to_sequences(x)\n max_length = max(map(len, enc_x))\n enc_x = tf.keras.preprocessing.sequence.pad_sequences(enc_x, maxlen=max_length)\n enc_x = np.array(enc_x)\n return enc_x\n\n#encode x to a fixed predetermined length. if none is given, encode normally and return the length\ndef encode_x_fixed_length(encoder, x, length=None):\n enc_x = encoder.texts_to_sequences(x)\n if not length: max_length = max(map(len, enc_x))\n else: max_length = length\n enc_x = tf.keras.preprocessing.sequence.pad_sequences(enc_x, maxlen=max_length)\n enc_x = tf.keras.utils.normalize(enc_x)\n enc_x = np.array(enc_x)\n return enc_x, max_length\n\n#encode a column of y data (by converting to a one-hot representation in a numpy array)\ndef encode_y(y):\n return np.array(to_categorical(y))\n\ndef get_class_weights(encoded_y):\n y_int = np.argmax(encoded_y, axis=1)\n class_weights = compute_class_weight('balanced', np.unique(y_int), y_int)\n d_weights = dict(enumerate(class_weights))\n return d_weights\n\n#train a model on given encoded x and y, encoder, and class weights\ndef train_model(encoder, enc_x, enc_y, weights):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding\n (\n len(encoder.index_word) + 1,\n 32\n ),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8, dropout=0.15)),\n tf.keras.layers.Dense(enc_y.shape[1], activation='softmax')\n ])\n\n #compile the model\n model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.005),\n metrics=['accuracy'])\n\n #train the model with earlystopping\n callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)\n model.fit(enc_x, enc_y, epochs=100, batch_size=4, class_weight=weights, shuffle=True, callbacks=[callback])\n\n return model\n\n#train an autoencoder on given x\ndef train_anomaly_model(enc_x, input_dim):\n model = tf.keras.Sequential([\n tf.keras.layers.Input(shape=(input_dim,)),\n tf.keras.layers.Dense(16, activation='relu'),\n tf.keras.layers.Dense(8, activation='relu'),\n tf.keras.layers.Dense(16, activation='relu'),\n tf.keras.layers.Dense(input_dim, activation='sigmoid')\n ])\n\n #compile the model\n model.compile(optimizer=tf.keras.optimizers.Adam(), loss='mse', metrics=['acc'])\n\n #train the model with early stopping\n callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)\n model.fit(enc_x, enc_x, epochs=700, batch_size=4, callbacks=[callback])\n\n return model"
] |
[
[
"pandas.read_csv"
],
[
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.utils.normalize",
"numpy.array",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.layers.LSTM",
"pandas.read_csv",
"tensorflow.keras.optimizers.Adam",
"numpy.unique",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
thequilo/padertorch
|
[
"5e7ff6c2570739a0556d7c88bb93cd77017662a2"
] |
[
"padertorch/contrib/je/models/vae.py"
] |
[
"import numpy as np\nimport torch\nfrom einops import rearrange\nfrom padertorch.base import Model\nfrom padertorch.contrib.je.modules.conv import CNN1d, CNNTranspose1d\nfrom padertorch.contrib.je.modules.gmm import GMM\nfrom padertorch.contrib.je.modules.hmm import HMM\nfrom padertorch.contrib.je.modules.features import NormalizedLogMelExtractor\nfrom padertorch.contrib.je.modules.global_pooling import Mean\nfrom padertorch.contrib.je.modules.hybrid import HybridCNN, HybridCNNTranspose\nfrom sklearn import metrics\nfrom torch.distributions import Normal\nfrom torchvision.utils import make_grid\n\n\nclass VAE(Model):\n \"\"\"\n >>> config = VAE.get_config(dict(\\\n encoder=dict(\\\n factory=HybridCNN,\\\n input_size=80,\\\n cnn_2d=dict(\\\n in_channels=1, out_channels=3*[32], kernel_size=3, \\\n ), \\\n cnn_1d=dict(\\\n out_channels=3*[32], kernel_size=3\\\n ),\\\n ),\\\n feature_extractor=dict(\\\n sample_rate=16000,\\\n fft_length=512,\\\n n_mels=80,\\\n ),\\\n ))\n >>> config['encoder']['cnn_1d']['in_channels']\n 2560\n >>> config['encoder']['cnn_1d']['out_channels']\n [32, 32, 32]\n >>> config['decoder']['cnn_transpose_1d']['in_channels']\n 16\n >>> vae = VAE.from_config(config)\n >>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None}\n >>> outputs = vae(inputs)\n >>> outputs[0][0].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[0][1].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[1][0][0].shape\n torch.Size([4, 16, 100])\n >>> outputs[1][0][1].shape\n torch.Size([4, 16, 100])\n >>> review = vae.review(inputs, outputs)\n \"\"\"\n def __init__(\n self, encoder: HybridCNN, decoder: HybridCNNTranspose, *,\n feature_key='stft', feature_extractor=None, feature_size=None,\n ):\n super().__init__()\n # allow joint optimization of encoder and decoder\n self.encoder = encoder\n self.decoder = decoder\n self.feature_key = feature_key\n self.feature_extractor = feature_extractor\n self.feature_size = feature_size\n self.n_params = 2\n\n def encode(self, x, seq_len=None):\n if isinstance(self.encoder, CNN1d):\n x = rearrange(x, 'b c f t -> b (c f) t')\n if self.encoder.return_pool_indices:\n h, seq_len, pool_indices = self.encoder(x, seq_len=seq_len)\n else:\n h, seq_len = self.encoder(x, seq_len=seq_len)\n pool_indices = None\n assert not h.shape[1] % self.n_params\n params = tuple(torch.split(h, h.shape[1] // self.n_params, dim=1))\n return params, seq_len, pool_indices\n\n def reparameterize(self, params):\n mu, logvar = params[:2]\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n else:\n return mu\n\n def decode(self, z, seq_len=None, shapes=None, lengths=None, pool_indices=None):\n x_hat, seq_len = self.decoder(\n z, seq_len=seq_len, shapes=shapes, seq_lens=lengths, pool_indices=pool_indices\n )\n if x_hat.dim() == 3:\n b, f, t = x_hat.shape\n x_hat = x_hat.view((b, -1, self.feature_size, t))\n return x_hat, seq_len # (B, C, F, T)\n\n def forward(self, inputs):\n x_target = inputs[self.feature_key]\n seq_len = inputs['seq_len']\n if self.feature_extractor is not None:\n x_target = self.feature_extractor(x_target, seq_len)\n params, seq_len, pool_indices = self.encode(x_target, seq_len)\n x_shape = x_target.shape\n if isinstance(self.encoder, CNN1d):\n x_shape = (x_shape[0], x_shape[1]*x_shape[2], x_shape[3])\n shapes = self.encoder.get_shapes(x_shape)\n if seq_len is None:\n lengths = None\n else:\n lengths = self.encoder.get_seq_lens(seq_len)\n z = self.reparameterize(params)\n x_hat, _ = self.decode(\n z, seq_len, shapes=shapes, lengths=lengths, pool_indices=pool_indices\n )\n return (x_target, x_hat), (params, seq_len)\n\n def review(self, inputs, outputs):\n # visualization\n (x_target, *x_hats), (params, seq_len), *_ = outputs\n (mu, log_var) = params[:2]\n kld = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).sum(dim=1)\n kld = Mean(axis=-1)(kld, seq_len)\n\n mu = mu.contiguous()\n review = dict(\n losses=dict(\n kld=kld.mean(),\n ),\n histograms=dict(\n kld_=kld.flatten(),\n mu_=mu.flatten(),\n log_var_=log_var.flatten(),\n ),\n images=dict(\n targets=x_target[:3, :1],\n latents=mu[:3],\n )\n )\n seq_len = inputs['seq_len']\n for i, x_hat in enumerate(x_hats):\n mse = (x_hat - x_target).pow(2).sum(dim=(1, 2))\n mse = Mean(axis=-1)(mse, seq_len)\n review['losses'][f'rec{i}'] = mse.mean()\n review['histograms'][f'rec{i}_'] = mse\n review['images'][f'x_hat_{i}_'] = x_hat.contiguous()[:3, :1]\n return review\n\n def modify_summary(self, summary):\n summary = super().modify_summary(summary)\n for key, image in summary['images'].items():\n if image.dim() == 3:\n image = image.unsqueeze(1)\n summary['images'][key] = make_grid(\n image.flip(2), normalize=True, scale_each=False, nrow=1\n )\n return summary\n\n @classmethod\n def finalize_dogmatic_config(cls, config):\n config['encoder']['factory'] = CNN1d\n config['feature_extractor'] = {\n 'factory': NormalizedLogMelExtractor,\n }\n in_channels = None\n if config['feature_extractor'] is not None:\n if config['feature_extractor']['factory'] == NormalizedLogMelExtractor:\n config['feature_size'] = config['feature_extractor']['n_mels']\n in_channels = 1 + config['feature_extractor']['add_deltas']+config['feature_extractor']['add_delta_deltas']\n feature_size = config['feature_size']\n if config['encoder']['factory'] == HybridCNN:\n if feature_size is not None:\n config['encoder'].update({\n 'input_size': feature_size,\n })\n if in_channels is not None:\n config['encoder']['cnn_2d']['in_channels'] = in_channels\n content_emb_dim = config['encoder']['cnn_1d']['out_channels'][-1] // 2\n elif config['encoder']['factory'] == CNN1d:\n if feature_size is not None and in_channels is not None:\n config['encoder']['in_channels'] = feature_size * in_channels\n content_emb_dim = config['encoder']['out_channels'][-1] // 2\n else:\n raise ValueError(f'Factory {config[\"encoder\"][\"factory\"]} not allowed.')\n\n config['decoder'] = config['encoder']['factory'].get_transpose_config(config['encoder'])\n if config['decoder']['factory'] == HybridCNNTranspose:\n config['decoder']['cnn_transpose_1d']['in_channels'] = content_emb_dim\n elif config['decoder']['factory'] == CNNTranspose1d:\n config['decoder']['in_channels'] = content_emb_dim\n else:\n raise ValueError(f'Factory {config[\"decoder\"][\"factory\"]} not allowed.')\n\n\nclass GMMVAE(VAE):\n \"\"\"\n >>> config = GMMVAE.get_config(dict(\\\n encoder=dict(\\\n factory=HybridCNN,\\\n input_size=80,\\\n cnn_2d=dict(\\\n in_channels=1, out_channels=3*[32], kernel_size=3, \\\n ), \\\n cnn_1d=dict(\\\n out_channels=3*[32], kernel_size=3\\\n ),\\\n ),\\\n decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\\\n gmm=dict(num_classes=10),\\\n feature_extractor=dict(\\\n factory=NormalizedLogMelExtractor,\\\n sample_rate=16000,\\\n fft_length=512,\\\n n_mels=80,\\\n ),\\\n ))\n >>> config['encoder']['cnn_1d']['in_channels']\n 2560\n >>> config['encoder']['cnn_1d']['out_channels']\n [32, 32, 32]\n >>> config['decoder']['cnn_transpose_1d']['in_channels']\n 16\n >>> gmmvae = GMMVAE.from_config(config)\n >>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None, 'labels': torch.Tensor([1,2,3,4]).long()}\n >>> outputs = gmmvae(inputs)\n >>> outputs[0][0].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[0][1].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[1][0][0].shape\n torch.Size([4, 16, 100])\n >>> outputs[1][0][1].shape\n torch.Size([4, 16, 100])\n >>> outputs[2][0].shape\n torch.Size([4, 100, 10])\n >>> outputs[2][1].shape\n torch.Size([4, 100, 10])\n >>> review = gmmvae.review(inputs, outputs)\n >>> gmmvae.supervised = True\n >>> gmmvae.label_key = 'labels'\n >>> outputs = gmmvae(inputs)\n >>> review = gmmvae.review(inputs, outputs)\n \"\"\"\n def __init__(\n self, encoder: HybridCNN, decoder: HybridCNNTranspose, gmm: GMM, *,\n feature_key='stft', feature_extractor=None, feature_size=None,\n label_key=None, supervised=False,\n ):\n super().__init__(\n encoder=encoder, decoder=decoder,\n feature_key=feature_key, feature_extractor=feature_extractor,\n feature_size=feature_size,\n )\n self.gmm = gmm\n self.label_key = label_key\n self.supervised = supervised\n\n def forward(self, inputs):\n (x, x_hat), ((mu, log_var), seq_len) = super().forward(inputs)\n qz = Normal(\n loc=rearrange(mu, 'b d t -> b t d'),\n scale=torch.exp(0.5 * rearrange(log_var, 'b d t -> b t d'))\n )\n log_class_posterior, log_rho = self.gmm(qz)\n return (x, x_hat), ((mu, log_var), seq_len), (log_class_posterior, log_rho)\n\n def review(self, inputs, outputs):\n review = super().review(inputs, outputs)\n _, (params, seq_len), (log_class_posterior, log_rho) = outputs\n class_labels = inputs[self.label_key] if self.supervised else None\n\n class_posterior = log_class_posterior.exp().detach()\n if class_labels is None:\n kld = -(class_posterior * log_rho).sum(-1)\n class_ce = -(class_posterior * self.gmm.log_class_probs).sum(-1)\n else:\n if class_labels.dim() < 2:\n class_labels = class_labels[:, None]\n class_labels = class_labels.expand(log_rho.shape[:2])\n kld = -log_rho.gather(-1, class_labels[..., None]).squeeze(-1)\n class_ce = -self.gmm.log_class_probs[class_labels]\n kld = Mean(axis=-1)(kld, seq_len)\n class_ce = Mean(axis=-1)(class_ce, seq_len)\n\n max_class_posterior, classes = torch.max(\n torch.exp(log_class_posterior), -1\n )\n review['losses'].update(dict(\n kld=kld.mean(),\n class_ce=class_ce.mean(),\n log_class_prob=self.gmm.log_class_probs.sum()\n ))\n review['scalars'] = dict(\n classes=classes.flatten(),\n )\n if self.label_key is not None:\n labels = inputs[self.label_key]\n review['scalars'].update(dict(\n labels=labels.flatten()\n ))\n review['histograms'].update(dict(\n kld_=kld.flatten(),\n log_class_probs_=self.gmm.log_class_probs.flatten(),\n max_class_posterior_=max_class_posterior.flatten(),\n classes_=classes.flatten()\n ))\n return review\n\n def modify_summary(self, summary):\n predictions = summary['scalars'].pop('classes', None)\n if predictions is not None:\n summary['scalars']['n_classes'] = len(np.unique(predictions))\n labels = summary['scalars'].pop('labels', None)\n if predictions is not None and labels is not None:\n _, labels = np.unique(labels, return_inverse=True)\n _, predictions = np.unique(predictions, return_inverse=True)\n contingency_matrix = metrics.cluster.contingency_matrix(\n labels, predictions\n )\n\n p_true_pred = contingency_matrix / contingency_matrix.sum()\n p_true = p_true_pred.sum(axis=1, keepdims=True)\n p_true_given_pred = contingency_matrix / np.maximum(contingency_matrix.sum(axis=0, keepdims=True), 1)\n h_true = -np.sum(p_true * np.log(np.maximum(p_true, 1e-12)))\n h_true_given_pred = -np.sum(p_true_pred * np.log(np.maximum(p_true_given_pred, 1e-12)))\n nmi = (h_true - h_true_given_pred) / h_true\n summary['scalars']['nmi'] = nmi\n\n prediction_mapping = np.argmax(contingency_matrix, axis=0)\n predictions = prediction_mapping[predictions]\n summary['scalars']['accuracy'] = np.mean(predictions == labels)\n summary = super().modify_summary(summary)\n return summary\n\n @classmethod\n def finalize_dogmatic_config(cls, config):\n super().finalize_dogmatic_config(config)\n if config['decoder']['factory'] == HybridCNNTranspose:\n config['gmm'] = {\n 'factory': GMM,\n 'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']\n }\n elif config['decoder']['factory'] == CNNTranspose1d:\n config['gmm'] = {\n 'factory': GMM,\n 'feature_size': config['decoder']['in_channels']\n }\n\n\nclass HMMVAE(GMMVAE):\n \"\"\"\n >>> config = HMMVAE.get_config(dict(\\\n encoder=dict(\\\n factory=HybridCNN,\\\n input_size=80,\\\n cnn_2d=dict(\\\n in_channels=1, out_channels=3*[32], kernel_size=3, \\\n ), \\\n cnn_1d=dict(\\\n out_channels=3*[32], kernel_size=3\\\n ),\\\n ),\\\n decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\\\n hmm=dict(num_units=10, viterbi_training=True, final_state=True, initial_state=True),\\\n feature_extractor=dict(\\\n factory=NormalizedLogMelExtractor,\\\n sample_rate=16000,\\\n fft_length=512,\\\n n_mels=80,\\\n ),\\\n ))\n >>> config['encoder']['cnn_1d']['in_channels']\n 2560\n >>> config['encoder']['cnn_1d']['out_channels']\n [32, 32, 32]\n >>> config['decoder']['cnn_transpose_1d']['in_channels']\n 16\n >>> hmmvae = HMMVAE.from_config(config)\n >>> inputs = {'stft': torch.rand((4, 1, 100, 257, 2)), 'seq_len': None}\n >>> outputs = hmmvae(inputs)\n >>> outputs[0][0].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[0][1].shape\n torch.Size([4, 1, 80, 100])\n >>> outputs[1][0][0].shape\n torch.Size([4, 16, 100])\n >>> outputs[1][0][1].shape\n torch.Size([4, 16, 100])\n >>> outputs[2][0].shape\n torch.Size([4, 100, 30])\n >>> outputs[2][1].shape\n torch.Size([4, 30, 30])\n >>> outputs[2][2].shape\n torch.Size([4, 100, 30])\n >>> review = hmmvae.review(inputs, outputs)\n \"\"\"\n def __init__(\n self, encoder: HybridCNN, decoder: HybridCNNTranspose, hmm: HMM, *,\n feature_key='stft', feature_extractor=None, feature_size=None,\n label_key=None, supervised=False,\n ):\n super(GMMVAE, self).__init__(\n encoder=encoder, decoder=decoder,\n feature_key=feature_key, feature_extractor=feature_extractor,\n feature_size=feature_size,\n )\n self.hmm = hmm\n self.label_key = label_key\n self.supervised = supervised\n\n def forward(self, inputs):\n (x, x_hat), ((mu, log_var), seq_len) = super(GMMVAE, self).forward(inputs)\n qz = Normal(\n loc=mu.permute((0, 2, 1)),\n scale=torch.exp(0.5 * log_var.permute((0, 2, 1)))\n )\n unit_sequence = inputs[self.label_key] if (self.supervised and self.training) else None\n no_onset = inputs['no_onset'] if 'no_onset' in inputs else False\n no_offset = inputs['no_offset'] if 'no_offset' in inputs else False\n class_posterior, transitions, log_rho = self.hmm(\n qz, seq_len=seq_len, unit_sequence=unit_sequence,\n no_onset=no_onset, no_offset=no_offset\n )\n return (x, x_hat), ((mu, log_var), seq_len), (class_posterior, transitions, log_rho)\n\n def review(self, inputs, outputs):\n review = super(GMMVAE, self).review(inputs, outputs)\n _, (params, seq_len), (class_posterior, transitions, log_rho) = outputs\n kld = -(class_posterior * log_rho).sum(-1)\n kld = Mean(axis=-1)(kld, seq_len)\n log_class_probs = torch.max(self.hmm.log_class_probs, -100 * torch.ones_like(self.hmm.log_class_probs))\n log_transition_mat = torch.max(self.hmm.log_transition_mat, -100 * torch.ones_like(self.hmm.log_transition_mat))\n class_ce = -(class_posterior[:, 0] * log_class_probs).sum(-1) - (transitions*log_transition_mat).sum((1,2))\n class_ce = class_ce / (transitions.sum((1, 2)) + 1)\n\n max_class_posterior, classes = torch.max(class_posterior, -1)\n review['losses'].update(dict(\n kld=kld.mean(),\n class_ce=class_ce.mean(),\n log_class_prob=log_class_probs.sum(),\n ))\n review['scalars'] = dict(\n classes=classes.flatten()//self.hmm.states_per_unit,\n )\n review['histograms'].update(dict(\n kld_=kld.flatten(),\n log_class_probs_=log_class_probs.flatten(),\n max_class_posterior_=max_class_posterior.flatten(),\n classes_=classes.flatten()\n ))\n if self.label_key is not None:\n labels = inputs[self.label_key]\n review['scalars'].update(dict(\n labels=labels.flatten()\n ))\n return review\n\n @classmethod\n def finalize_dogmatic_config(cls, config):\n super(GMMVAE, cls).finalize_dogmatic_config(config)\n\n if config['decoder']['factory'] == HybridCNNTranspose:\n config['hmm'] = {\n 'factory': HMM,\n 'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']\n }\n elif config['decoder']['factory'] == CNNTranspose1d:\n config['hmm'] = {\n 'factory': HMM,\n 'feature_size': config['decoder']['in_channels']\n }\n"
] |
[
[
"torch.max",
"sklearn.metrics.cluster.contingency_matrix",
"torch.split",
"numpy.mean",
"torch.randn_like",
"numpy.argmax",
"torch.ones_like",
"torch.exp",
"numpy.unique",
"numpy.maximum"
]
] |
s1113950/models
|
[
"87261e70a902513f934413f009364c4f2eed6642"
] |
[
"models/recommendation/tensorflow/wide_deep_large_ds/dataset/preprocess_csv_tfrecords.py"
] |
[
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport pandas\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--csv-datafile', type=str,\n help='full path of data file e.g. eval.csv',\n dest='datafile_path',\n required=True)\n\nparser.add_argument('--outputfile-name', type=str,\n help='output tfrecord file name e.g. processed_eval.[tfrecords]',\n dest='outputfile_path',\n default=\"processed_data.tfrecords\",\n required=False)\n\nargs = parser.parse_args()\n\ncsv_file = args.datafile_path\noutput_file = args.outputfile_path\n\nif not os.path.isfile(csv_file):\n print(\"Please input a valid csv file\")\n sys.exit(1)\n\nfilename, file_ext = os.path.splitext(output_file)\nin_filename, _ = os.path.splitext(os.path.basename(csv_file))\n\nif file_ext != \".tfrecords\":\n output_file = output_file + \".tfrecords\"\n\noutput_file = \"{}_{}\".format(in_filename,output_file)\n\nif os.path.isfile(output_file):\n confirmation = input('The output file {} already exists, Do you want to overwrite it ? [y/N]'.format(output_file)).lower()\n if not confirmation.startswith('y'):\n sys.exit(0)\n\ncsv = pandas.read_csv(csv_file, header=None).values\n\nnumeric_feature_names = [\"numeric_1\"]\nstring_feature_names = [\"string_1\"]\n\nprint(numeric_feature_names, len(numeric_feature_names))\nLABEL_COLUMN =[\"clicked\"]\nCATEGORICAL_COLUMNS1 = [\"C\"+str(i)+\"_embedding\" for i in range(1, 27)]\nNUMERIC_COLUMNS1 = [\"I\"+str(i) for i in range(1, 14)]\nTRAIN_DATA_COLUMNS = LABEL_COLUMN+ NUMERIC_COLUMNS1 + CATEGORICAL_COLUMNS1\nCATEGORICAL_COLUMNS2 = [\"C\"+str(i)+\"_embedding\" for i in range(1, 27)]\nNUMERIC_COLUMNS2 = [\"I\"+str(i) for i in range(1, 14)]\n\nCATEGORICAL_COLUMNS1.sort()\nNUMERIC_COLUMNS1.sort()\nprint(\"categorical columns\", CATEGORICAL_COLUMNS1)\nprint(\"numeric column\", NUMERIC_COLUMNS1)\nno_of_rows = 0\n\nwith open(csv_file, 'r') as f:\n nums=[line.strip('\\n').split(',') for line in f.readlines()]\n numpy_arr = np.array(nums)\n min_list,max_list,range_list = [],[],[]\n for i in range(len(TRAIN_DATA_COLUMNS)):\n if TRAIN_DATA_COLUMNS[i] in NUMERIC_COLUMNS1:\n col_min = numpy_arr[:,i].astype(np.float32).min()\n col_max = numpy_arr[:,i].astype(np.float32).max()\n min_list.append(col_min)\n max_list.append(col_max)\n range_list.append(col_max-col_min)\n print('min list',min_list)\n print('max list',max_list)\n print('range list',range_list)\n\n\n\nwith tf.python_io.TFRecordWriter(output_file) as writer:\n print('*****Processing data******')\n for row in csv:\n no_of_rows = no_of_rows+1\n unnormalized_vals = np.array(row[1:14])\n normalized_vals = (unnormalized_vals-min_list)/range_list\n new_categorical_dict = dict(zip(CATEGORICAL_COLUMNS2, row[14:40]))\n new_categorical_list = []\n for i in CATEGORICAL_COLUMNS1:\n new_categorical_list.append(new_categorical_dict[i])\n hash_values = tf.string_to_hash_bucket_fast(\n new_categorical_list, 1000).numpy()\n new_numerical_dict = dict(zip(NUMERIC_COLUMNS2, normalized_vals))\n example = tf.train.Example()\n for i in NUMERIC_COLUMNS1:\n example.features.feature[numeric_feature_names[0]].float_list.value.extend([new_numerical_dict[i]])\n for i in range(0, 26):\n example.features.feature[string_feature_names[0]].int64_list.value.extend([i])\n example.features.feature[string_feature_names[0]].int64_list.value.extend([hash_values[i]])\n\n example.features.feature[\"label\"].int64_list.value.append(row[0])\n writer.write(example.SerializeToString())\n\nprint('Total number of rows ', no_of_rows)\nprint('Generated output file name :'+output_file)"
] |
[
[
"numpy.array",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.enable_eager_execution",
"tensorflow.string_to_hash_bucket_fast",
"tensorflow.train.Example",
"pandas.read_csv"
]
] |
AmeerFaisalAdanan/Malaya
|
[
"706efa5e78e87f74981adebe9974b6df0324fe0a"
] |
[
"malaya/_utils/_utils.py"
] |
[
"from tqdm import tqdm\nimport tensorflow as tf\nimport sentencepiece as spm\nimport numpy as np\nimport requests\nimport os\nfrom pathlib import Path\nfrom .. import _delete_folder\nfrom tensorflow.contrib.seq2seq.python.ops import beam_search_ops\n\n\ndef sentencepiece_tokenizer_xlnet(path_tokenizer):\n sp_model = spm.SentencePieceProcessor()\n sp_model.Load(path_tokenizer)\n return sp_model\n\n\ndef sentencepiece_tokenizer_bert(path_tokenizer, path_vocab):\n from ..texts._text_functions import SentencePieceTokenizer\n\n sp_model = spm.SentencePieceProcessor()\n sp_model.Load(path_tokenizer)\n\n with open(path_vocab) as fopen:\n v = fopen.read().split('\\n')[:-1]\n v = [i.split('\\t') for i in v]\n v = {i[0]: i[1] for i in v}\n tokenizer = SentencePieceTokenizer(v, sp_model)\n cls = '<cls>'\n sep = '<sep>'\n return tokenizer, cls, sep\n\n\ndef add_neutral(x, alpha = 1e-2):\n x = x.copy()\n divide = 1 / x.shape[1]\n x_minus = np.maximum(x - divide, alpha * x)\n x_divide = x_minus / divide\n sum_axis = x_divide.sum(axis = 1, keepdims = True)\n return np.concatenate([x_divide, 1 - sum_axis], axis = 1)\n\n\ndef download_file(url, filename):\n if 'http' in url:\n r = requests.get(url, stream = True)\n else:\n r = requests.get(\n 'http://s3-ap-southeast-1.amazonaws.com/huseinhouse-storage/' + url,\n stream = True,\n )\n total_size = int(r.headers['content-length'])\n os.makedirs(os.path.dirname(filename), exist_ok = True)\n with open(filename, 'wb') as f:\n for data in tqdm(\n iterable = r.iter_content(chunk_size = 1048576),\n total = total_size / 1048576,\n unit = 'MB',\n unit_scale = True,\n ):\n f.write(data)\n\n\ndef generate_session(graph):\n return tf.InteractiveSession(graph = graph)\n\n\ndef load_graph(frozen_graph_filename):\n with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def)\n return graph\n\n\ndef check_available(file):\n for key, item in file.items():\n if 'version' in key:\n continue\n if not os.path.isfile(item):\n return False\n return True\n\n\ndef check_file(file, s3_file):\n base_location = os.path.dirname(file['model'])\n version = base_location + '/version'\n download = False\n if os.path.isfile(version):\n with open(version) as fopen:\n if not file['version'] in fopen.read():\n print('Found old version of %s, deleting..' % (base_location))\n _delete_folder(base_location)\n print('Done.')\n download = True\n else:\n for key, item in file.items():\n if not os.path.exists(item):\n download = True\n break\n else:\n download = True\n\n if download:\n for key, item in file.items():\n if 'version' in key:\n continue\n if not os.path.isfile(item):\n print('downloading frozen %s %s' % (base_location, key))\n download_file(s3_file[key], item)\n with open(version, 'w') as fopen:\n fopen.write(file['version'])\n\n\nclass DisplayablePath(object):\n display_filename_prefix_middle = '├──'\n display_filename_prefix_last = '└──'\n display_parent_prefix_middle = ' '\n display_parent_prefix_last = '│ '\n\n def __init__(self, path, parent_path, is_last):\n self.path = Path(str(path))\n self.parent = parent_path\n self.is_last = is_last\n if self.parent:\n self.depth = self.parent.depth + 1\n else:\n self.depth = 0\n\n @property\n def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name\n\n @classmethod\n def make_tree(cls, root, parent = None, is_last = False, criteria = None):\n root = Path(str(root))\n criteria = criteria or cls._default_criteria\n displayable_root = cls(root, parent, is_last)\n yield displayable_root\n\n children = sorted(\n list(path for path in root.iterdir() if criteria(path)),\n key = lambda s: str(s).lower(),\n )\n count = 1\n for path in children:\n is_last = count == len(children)\n if path.is_dir():\n yield from cls.make_tree(\n path,\n parent = displayable_root,\n is_last = is_last,\n criteria = criteria,\n )\n else:\n yield cls(path, displayable_root, is_last)\n count += 1\n\n @classmethod\n def _default_criteria(cls, path):\n return True\n\n @property\n def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name\n\n def displayable(self):\n if self.parent is None:\n return self.displayname\n\n _filename_prefix = (\n self.display_filename_prefix_last\n if self.is_last\n else self.display_filename_prefix_middle\n )\n\n parts = ['{!s} {!s}'.format(_filename_prefix, self.displayname)]\n\n parent = self.parent\n while parent and parent.parent is not None:\n parts.append(\n self.display_parent_prefix_middle\n if parent.is_last\n else self.display_parent_prefix_last\n )\n parent = parent.parent\n\n return ''.join(reversed(parts))\n\n\nclass _Calculator:\n def __init__(self, tokens):\n self._tokens = tokens\n self._current = tokens[0]\n\n def exp(self):\n result = self.term()\n while self._current in ('+', '-'):\n if self._current == '+':\n self.next()\n result += self.term()\n if self._current == '-':\n self.next()\n result -= self.term()\n return result\n\n def factor(self):\n result = None\n if self._current[0].isdigit() or self._current[-1].isdigit():\n result = np.array([float(i) for i in self._current.split(',')])\n self.next()\n elif self._current is '(':\n self.next()\n result = self.exp()\n self.next()\n return result\n\n def next(self):\n self._tokens = self._tokens[1:]\n self._current = self._tokens[0] if len(self._tokens) > 0 else None\n\n def term(self):\n result = self.factor()\n while self._current in ('*', '/'):\n if self._current == '*':\n self.next()\n result *= self.term()\n if self._current == '/':\n self.next()\n result /= self.term()\n return result\n"
] |
[
[
"numpy.concatenate",
"tensorflow.GraphDef",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"numpy.maximum",
"tensorflow.InteractiveSession"
]
] |
vitalstarorg/docker-ml.py
|
[
"9d49739f0bf5806c2c44dc3b6f56fd0bfc4598d8"
] |
[
"bin/sample1.py"
] |
[
"#!/usr/bin/env python3\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Data for plotting\nt = np.arange(0.0, 2.0, 0.01)\ns = 1 + np.sin(2 * np.pi * t)\n\nfig, ax = plt.subplots()\nax.plot(t, s)\n\nax.set(xlabel='time (s)', ylabel='voltage (mV)',\n title='About as simple as it gets, folks')\nax.grid()\n\n#fig.savefig(\"test.png\")\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.subplots"
]
] |
thu-pacman/AIPerf-MoE
|
[
"fda4f381b4b974721b187cece968dd7bc96a81f4",
"fda4f381b4b974721b187cece968dd7bc96a81f4"
] |
[
"megatron/model/multiple_choice.py",
"megatron/global_vars.py"
] |
[
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Multiple choice model.\"\"\"\n\nimport torch\n\nfrom megatron import get_args, print_rank_last\nfrom megatron import mpu\nfrom megatron.model.bert_model import bert_attention_mask_func, bert_extended_attention_mask, bert_position_ids\nfrom megatron.model.language_model import get_language_model\nfrom megatron.model.utils import get_linear_layer\nfrom megatron.model.utils import init_method_normal\nfrom megatron.model.utils import scaled_init_method_normal\nfrom .module import MegatronModule\n\n\nclass MultipleChoiceBase(MegatronModule):\n\n def __init__(self, num_tokentypes=2):\n super(MultipleChoiceBase, self).__init__(share_word_embeddings=False)\n args = get_args()\n\n init_method = init_method_normal(args.init_method_std)\n\n self.language_model, self._language_model_key = get_language_model(\n attention_mask_func=bert_attention_mask_func,\n num_tokentypes=num_tokentypes,\n add_pooler=True,\n init_method=init_method,\n scaled_init_method=scaled_init_method_normal(args.init_method_std,\n args.num_layers))\n\n # Multi-choice head.\n if mpu.is_pipeline_last_stage():\n self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout)\n self.multichoice_head = get_linear_layer(args.hidden_size, 1,\n init_method)\n self._multichoice_head_key = 'multichoice_head'\n\n def forward(self, model_input, attention_mask, tokentype_ids=None):\n\n # [batch, choices, sequence] --> [batch * choices, sequence] -->\n # transformer --> [batch, choices] --> softmax\n\n # Ensure the shape is [batch-size, choices, sequence]\n assert len(attention_mask.shape) == 3\n num_choices = attention_mask.shape[1]\n\n # Reshape and treat choice dimension the same as batch.\n attention_mask = attention_mask.view(-1, attention_mask.size(-1))\n extended_attention_mask = bert_extended_attention_mask(attention_mask)\n\n kwargs = {}\n if mpu.is_pipeline_first_stage():\n input_ids = model_input\n # Do the same as attention_mask for input_ids, tokentype_ids\n assert len(input_ids.shape) == 3\n assert len(tokentype_ids.shape) == 3\n input_ids = input_ids.view(-1, input_ids.size(-1))\n tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1))\n\n position_ids = bert_position_ids(input_ids)\n args = [input_ids, position_ids, extended_attention_mask]\n kwargs['tokentype_ids'] = tokentype_ids\n else:\n args = [model_input, extended_attention_mask]\n lm_output = self.language_model(*args, **kwargs)\n if mpu.is_pipeline_last_stage():\n _, pooled_output = lm_output\n multichoice_output = self.multichoice_dropout(pooled_output)\n multichoice_logits = self.multichoice_head(multichoice_output)\n\n # Reshape back to separate choices.\n multichoice_logits = multichoice_logits.view(-1, num_choices)\n\n return multichoice_logits\n return lm_output\n\n def state_dict_for_save_checkpoint(self, destination=None, prefix='',\n keep_vars=False):\n \"\"\"For easy load when model is combined with other heads,\n add an extra key.\"\"\"\n\n state_dict_ = {}\n state_dict_[self._language_model_key] \\\n = self.language_model.state_dict_for_save_checkpoint(\n destination, prefix, keep_vars)\n if mpu.is_pipeline_last_stage():\n state_dict_[self._multichoice_head_key] \\\n = self.multichoice_head.state_dict(\n destination, prefix, keep_vars)\n return state_dict_\n\n def load_state_dict(self, state_dict, strict=True):\n \"\"\"Customized load.\"\"\"\n\n self.language_model.load_state_dict(\n state_dict[self._language_model_key], strict=strict)\n if mpu.is_pipeline_last_stage():\n if self._multichoice_head_key in state_dict:\n self.multichoice_head.load_state_dict(\n state_dict[self._multichoice_head_key], strict=strict)\n else:\n print_rank_last('***WARNING*** could not find {} in the checkpoint, '\n 'initializing to random'.format(\n self._multichoice_head_key))\n\nclass MultipleChoice(MultipleChoiceBase):\n\n def __init__(self, num_tokentypes=2):\n super(MultipleChoice, self).__init__(\n num_tokentypes=num_tokentypes)\n\n def forward(self, input_ids, attention_mask,\n tokentype_ids=None):\n return super(MultipleChoice, self).forward(\n input_ids,\n attention_mask,\n tokentype_ids=tokentype_ids)\n\n\nclass MultipleChoiceFirstStage(MultipleChoiceBase):\n\n def __init__(self, num_tokentypes=2):\n super(MultipleChoiceFirstStage, self).__init__(\n num_tokentypes=num_tokentypes)\n\n def forward(self, input_ids, attention_mask,\n tokentype_ids=None):\n return super(MultipleChoiceFirstStage, self).forward(\n input_ids,\n attention_mask,\n tokentype_ids=tokentype_ids)\n\n\nclass MultipleChoiceIntermediateStage(MultipleChoiceBase):\n\n def __init__(self, num_tokentypes=2):\n super(MultipleChoiceIntermediateStage, self).__init__(\n num_tokentypes=num_tokentypes)\n\n def forward(self, hidden_state, attention_mask):\n return super(MultipleChoiceIntermediateStage, self).forward(\n hidden_state,\n attention_mask)\n\n\nclass MultipleChoiceLastStage(MultipleChoiceBase):\n\n def __init__(self, num_tokentypes=2):\n super(MultipleChoiceLastStage, self).__init__(\n num_tokentypes=num_tokentypes)\n\n def forward(self, hidden_state, attention_mask):\n return super(MultipleChoiceLastStage, self).forward(\n hidden_state,\n attention_mask)\n",
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Megatron global variables.\"\"\"\n\nimport os\nimport sys\nimport time\n\nimport torch\n\nfrom megatron.tokenizer import build_tokenizer\nfrom .arguments import parse_args\nfrom .microbatches import build_num_microbatches_calculator\n\n_GLOBAL_ARGS = None\n_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None\n_GLOBAL_TOKENIZER = None\n_GLOBAL_TENSORBOARD_WRITER = None\n_GLOBAL_ADLR_AUTORESUME = None\n_GLOBAL_TIMERS = None\n\n\ndef get_args():\n \"\"\"Return arguments.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_ARGS, 'args')\n return _GLOBAL_ARGS\n\n\ndef get_num_microbatches():\n return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()\n\n\ndef get_current_global_batch_size():\n return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()\n\n\ndef update_num_microbatches(consumed_samples, consistency_check=True):\n _GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,\n consistency_check)\n\n\ndef get_tokenizer():\n \"\"\"Return tokenizer.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')\n return _GLOBAL_TOKENIZER\n\n\ndef get_tensorboard_writer():\n \"\"\"Return tensorboard writer. It can be None so no need\n to check if it is initialized.\"\"\"\n return _GLOBAL_TENSORBOARD_WRITER\n\n\ndef get_adlr_autoresume():\n \"\"\"ADLR autoresume object. It can be None so no need\n to check if it is initialized.\"\"\"\n return _GLOBAL_ADLR_AUTORESUME\n\n\ndef get_timers():\n \"\"\"Return timers.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')\n return _GLOBAL_TIMERS\n\n\ndef set_global_variables(extra_args_provider=None, args_defaults={},\n ignore_unknown_args=False):\n \"\"\"Set args, tokenizer, tensorboard-writer, adlr-autoresume, and timers.\"\"\"\n args = _parse_args(extra_args_provider=extra_args_provider,\n defaults=args_defaults,\n ignore_unknown_args=ignore_unknown_args)\n _build_num_microbatches_calculator(args)\n _ = _build_tokenizer(args)\n _set_tensorboard_writer(args)\n _set_adlr_autoresume(args)\n _set_timers()\n\n\ndef _parse_args(extra_args_provider=None, defaults={},\n ignore_unknown_args=False):\n \"\"\"Parse entire arguments.\"\"\"\n global _GLOBAL_ARGS\n _ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args')\n _GLOBAL_ARGS = parse_args(extra_args_provider=extra_args_provider,\n defaults=defaults,\n ignore_unknown_args=ignore_unknown_args)\n return _GLOBAL_ARGS\n\n\ndef _build_num_microbatches_calculator(args):\n\n global _GLOBAL_NUM_MICROBATCHES_CALCULATOR\n _ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR,\n 'num microbatches calculator')\n\n _GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(\n args)\n\n\ndef _build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n global _GLOBAL_TOKENIZER\n _ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer')\n _GLOBAL_TOKENIZER = build_tokenizer(args)\n return _GLOBAL_TOKENIZER\n\n\ndef rebuild_tokenizer(args):\n global _GLOBAL_TOKENIZER\n _GLOBAL_TOKENIZER = None\n return _build_tokenizer(args)\n\n\ndef _set_tensorboard_writer(args):\n \"\"\"Set tensorboard writer.\"\"\"\n global _GLOBAL_TENSORBOARD_WRITER\n _ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER,\n 'tensorboard writer')\n\n if hasattr(args, 'tensorboard_dir') and \\\n args.tensorboard_dir and args.rank == (args.world_size -1):\n try:\n from torch.utils.tensorboard import SummaryWriter\n print('> setting tensorboard ...')\n _GLOBAL_TENSORBOARD_WRITER = SummaryWriter(\n log_dir=args.tensorboard_dir)\n except ModuleNotFoundError:\n print('WARNING: TensorBoard writing requested but is not '\n 'available (are you using PyTorch 1.1.0 or later?), '\n 'no TensorBoard logs will be written.', flush=True)\n\n\ndef _set_adlr_autoresume(args):\n \"\"\"Initialize ADLR autoresume.\"\"\"\n global _GLOBAL_ADLR_AUTORESUME\n _ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')\n\n if args.adlr_autoresume:\n if args.rank == 0:\n print('enabling autoresume ...', flush=True)\n sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))\n try:\n from userlib.auto_resume import AutoResume\n except BaseException:\n print('ADLR autoresume is not available, exiting ...')\n sys.exit()\n\n _GLOBAL_ADLR_AUTORESUME = AutoResume\n\n\ndef _set_timers():\n \"\"\"Initialize timers.\"\"\"\n global _GLOBAL_TIMERS\n _ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')\n _GLOBAL_TIMERS = Timers()\n\n\ndef _ensure_var_is_initialized(var, name):\n \"\"\"Make sure the input variable is not None.\"\"\"\n assert var is not None, '{} is not initialized.'.format(name)\n\n\ndef _ensure_var_is_not_initialized(var, name):\n \"\"\"Make sure the input variable is not None.\"\"\"\n assert var is None, '{} is already initialized.'.format(name)\n\n\nclass _Timer:\n \"\"\"Timer.\"\"\"\n\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n\nclass Timers:\n \"\"\"Group of timers.\"\"\"\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = _Timer(name)\n return self.timers[name]\n\n def write(self, names, writer, iteration, normalizer=1.0, reset=False):\n \"\"\"Write timers to a tensorboard writer\"\"\"\n # currently when using add_scalars,\n # torch.utils.add_scalars makes each timer its own run, which\n # polutes the runs list, so we just add each as a scalar\n assert normalizer > 0.0\n for name in names:\n value = self.timers[name].elapsed(reset=reset) / normalizer\n writer.add_scalar(name + '-time', value, iteration)\n\n def log(self, names, normalizer=1.0, reset=True):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(\n reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == (\n torch.distributed.get_world_size() - 1):\n print(string, flush=True)\n else:\n print(string, flush=True)\n"
] |
[
[
"torch.nn.Dropout"
],
[
"torch.distributed.get_world_size",
"torch.cuda.synchronize",
"torch.distributed.is_initialized",
"torch.distributed.get_rank",
"torch.utils.tensorboard.SummaryWriter"
]
] |
wogong/pytorch-adda
|
[
"56ed179ced25fda3082b592e7f1751f4a62ac489"
] |
[
"core/pretrain.py"
] |
[
"\"\"\"Pre-train encoder and classifier for source dataset.\"\"\"\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom utils import make_variable, save_model\nfrom .test import eval\n\ndef train_src(encoder, classifier, src_data_loader, tgt_data_loader, params):\n \"\"\"Train classifier for source domain.\"\"\"\n ####################\n # 1. setup network #\n ####################\n\n # setup criterion and optimizer\n optimizer = optim.Adam(\n list(encoder.parameters()) + list(classifier.parameters()),\n lr=params.pre_learning_rate,\n betas=(params.beta1, params.beta2))\n criterion = nn.CrossEntropyLoss()\n\n ####################\n # 2. train network #\n ####################\n\n for epoch in range(params.num_epochs_pre):\n # set train state for Dropout and BN layers\n encoder.train()\n classifier.train()\n\n for step, (images, labels) in enumerate(src_data_loader):\n # make images and labels variable\n images = make_variable(images)\n labels = make_variable(labels.squeeze_())\n\n # zero gradients for optimizer\n optimizer.zero_grad()\n\n # compute loss for critic\n preds = classifier(encoder(images))\n loss = criterion(preds, labels)\n\n # optimize source classifier\n loss.backward()\n optimizer.step()\n\n # print step info\n if ((step + 1) % params.log_step_pre == 0):\n print(\"Epoch [{}/{}] Step [{}/{}]: loss={}\"\n .format(epoch + 1,\n params.num_epochs_pre,\n step + 1,\n len(src_data_loader),\n loss.data[0]))\n\n # eval model on test set\n if ((epoch + 1) % params.eval_step_pre == 0):\n print (\"eval model on source dataset\")\n eval(encoder, classifier, src_data_loader)\n print (\"eval model on target dataset\")\n eval(encoder, classifier, tgt_data_loader)\n\n # save model parameters\n if ((epoch + 1) % params.save_step_pre == 0):\n save_model(encoder, params.model_root, \"{}-source-encoder-{}.pt\".format(params.src_dataset, epoch + 1))\n save_model(classifier, params.model_root, \"{}-source-classifier-{}.pt\".format(params.src_dataset, epoch + 1))\n\n # # save final model\n save_model(encoder, params.model_root, params.src_dataset+\"-source-encoder-final.pt\")\n save_model(classifier, params.model_root, params.src_dataset+\"-source-classifier-final.pt\")\n\n return encoder, classifier\n"
] |
[
[
"torch.nn.CrossEntropyLoss"
]
] |
fanmeilin/MOST_segmentation
|
[
"31d6bd6d77ae5c3dff6aefb9adfc23ea08072ae9"
] |
[
"utils.py"
] |
[
"import os\nimport cv2\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n\n# 注意权重设置能够保证至少过半样本被认为是正确样本\ndef sample_weights_boosted(y_pred, weights, percent=0.25, thres=0.99, y_true=[]):\n \"\"\"\n 参考AdaBoost算法的权重更新算法,对目标分割样本进行权重更新\n\n y_pred: 模型预测的样本分割性能 (1-D)\n weights: 对应于每个样本的权重值\n percent: 提升样本的百分比;即性能排名低于percent的样本将被提升\n thres: 分割性能的目标阈值\n y_true: 默认为空\n\n 返回值:更新后权重,模型集成的权重\n\n \"\"\"\n\n assert len(y_pred) == len(weights), '样本数量和权重数量应一致'\n\n percent_thres = np.percentile(y_pred, percent*100)\n if percent_thres < thres:\n thres = percent_thres\n y_pred = np.array([1 if x >= thres else 0 for x in y_pred])\n if len(y_true)==0:\n y_true = np.ones(y_pred.shape)\n #print(y_pred, y_true)\n\n miss = [int(x) for x in (y_pred != y_true)]\n # Equivalent with 1/-1 to update weights\n miss2 = [x if x==1 else -1 for x in miss]\n #print(miss, miss2)\n # Error\n err_m = np.dot(weights, miss)/sum(weights)\n if (err_m==0):\n # 模型完美预测结果\n return weights, 10 \n if (err_m==1):\n return weights, 0\n\n # 如果是个弱分类器;即分类准确率>0.5\n if err_m < 0.5:\n # Alpha\n alpha_m = 0.5*np.log((1-err_m)/err_m)\n # New weights\n new_weights = np.multiply(weights, np.exp([float(x) * alpha_m for x in miss2]))\n new_weights = new_weights/sum(new_weights)\n\n #print('boosted param', alpha_m, new_weights)\n else:\n alpha_m = 0\n new_weights = weights\n print('WARNING: no sample weights are boosted!')\n\n # 归一化\n return new_weights, alpha_m\n\ndef find_neighbors(maps, target):\n \"\"\"给定图像和目标像素值,找到图像里面所有目标区域(岛屿)\"\"\"\n\n # 用于记录发现的岛屿ID和对应的坐标; island_id:[(坐标),(坐标)]\n islands = {}\n # 发现岛屿的数量计数\n counter = 0\n\n # 坐标的属性; 坐标:island_id\n grid = {}\n \n # 第一步:获得全体目标区域坐标集合\n x, y = np.where(maps==target)\n pts = list(zip(x, y))\n \n # 第二步:获得目标坐标形成的岛屿\n for pt in pts:\n # 未访问的坐标\n if pt not in grid:\n cid = counter # 当前岛屿ID\n islands[cid] = [pt]\n grid[pt] = cid\n \n # 岛屿ID增长1\n counter += 1 \n else:\n cid = grid[pt]\n \n # 寻找邻居并标识\n for m,n in [(0,-1), (0,1), (-1,0), (1,0)]:\n nx, ny = pt[0]+m, pt[1]+n\n # 坐标位于岛屿候选\n if (nx, ny) in pts:\n # 未访问的邻居坐标\n if (nx, ny) not in grid:\n islands[cid].append((nx,ny))\n grid[(nx,ny)] = cid\n else:\n # 已访问的邻居坐标\n oid = grid[(nx,ny)]\n if cid != oid:\n # 合并两个岛屿,更新坐标属性,然后删除被合并岛屿\n #islands[cid].extend(islands[oid])\n #for (i, j) in islands[oid]:\n # grid[(i,j)] = [cid, 1]\n #del islands[oid] \n islands[oid].extend(islands[cid])\n for (i, j) in islands[cid]:\n grid[(i,j)] = oid\n del islands[cid] \n cid = oid \n \n return islands\n\n# 给定岛屿坐标,判断该岛屿是否被指定像素包围\ndef is_island(maps, island, background):\n \"\"\"\n 输入:\n maps - 待分析的图像,numpy矩阵\n island - 岛屿的坐标点集合\n background - 背景像素值\n \"\"\"\n \n row, col = maps.shape\n for (x, y) in island:\n if y-1>=0: \n if (maps[x, y-1]!=background) and ((x,y-1) not in island):\n return False\n \n if y+1<=col-1:\n if (maps[x, y+1]!=background) and ((x,y+1) not in island):\n return False\n\n if x-1>=0:\n if (maps[x-1, y]!=background) and ((x-1, y) not in island):\n return False\n\n if x+1<=row-1: \n if (maps[x+1, y]!=background) and ((x+1, y) not in island):\n return False\n \n return True\n\ndef annotation_fusion(annotation, prediction, mode=3):\n \"\"\"\n 基于语义分割的模型结果,融合标注数据,产生新的标注数据\n\n 输入\n prediction: 语义分割的模型预测mask (0/1)\n annotation: 人工标注的分割mask (0/1)\n mode: 不同的融合算法模式\n 1 - 模型预测+漏掉的人工标注\n 2 - 人工标注+漏掉的模型预测\n 3 - 人工标注+模型预测的并集 (默认)\n\n 返回\n fusion_ 融合后的mask\n \"\"\"\n \n pred_ = prediction.astype(np.int) # predicted mask\n label_ = annotation.astype(np.int) # labeled mask\n\n if (mode==1):\n pred_[pred_==0] = 999\n label_[label_==0] = 999\n label_[label_==1] = -1\n fusion = pred_+label_\n \n target = 998\n bkg = 1998\n islands = find_neighbors(fusion, target)\n masks = np.zeros(fusion.shape)\n for i,j in enumerate(islands): \n if is_island(fusion, islands[j], background=bkg):\n c = np.array([i for i,j in islands[j]])\n d = np.array([j for i,j in islands[j]])\n masks[tuple((c, d))] = 1\n fusion_ = prediction.astype(np.int)+masks\n \n elif (mode==2):\n pred_[pred_==0] = 999\n label_[label_==0] = 999\n label_[label_==1] = -1\n fusion = pred_+label_\n \n target = 1000\n bkg = 1998\n islands = find_neighbors(fusion, target)\n masks = np.zeros(fusion.shape)\n for i,j in enumerate(islands): \n if is_island(fusion, islands[j], background=bkg):\n c = np.array([i for i,j in islands[j]])\n d = np.array([j for i,j in islands[j]])\n masks[tuple((c, d))] = 1\n fusion_ = annotation.astype(np.int)+masks\n \n else:\n # label ∪ pred\n xor_ = (pred_ ^ label_)\n pred_ = xor_ + pred_\n pred_[pred_>1] = 1\n xor_ = (pred_ ^ label_)\n fusion_ = xor_ + label_\n fusion_[fusion_>1] = 1\n\n return fusion_.astype(np.uint8)\n\ndef dice_coef(y_true, y_pred, threshold=0.5):\n \"\"\"\n 计算模型预测的分割mask与标注mask之间的Dice系数值\n \n 输入:\n y_true - 二值化的图像人工标注,格式为[batch_size, width, height]\n y_pred - 模型预测的图像像素分类结果 ([0~1]), ,格式为[batch_size, width, height]\n \n \"\"\"\n \n smooth = 1.0e-5\n y_pred = ops.convert_to_tensor_v2(y_pred)\n threshold = math_ops.cast(threshold, y_pred.dtype)\n y_pred = math_ops.cast(y_pred>threshold, y_pred.dtype)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n intersection = tf.reduce_sum(tf.multiply(y_true, y_pred),axis=[1,2])\n y_true_sum = tf.reduce_sum(tf.multiply(y_true, y_true),axis=[1,2])\n y_pred_sum = tf.reduce_sum(tf.multiply(y_pred, y_pred),axis=[1,2])\n dice = (2.0*intersection+smooth)/(y_true_sum+y_pred_sum+smooth)\n \n return dice\n\n\"\"\"\ndef dice_coef(y_true, y_pred, thres=0.5, morph=False, kernel_size=5):\n 计算模型预测的分割mask与标注mask之间的Dice系数值\n\n 输入:\n y_true - 二值化的图像人工标注(0/1)\n y_pred - 模型预测的图像像素分类结果 ([0~1])\n thres - 对模型预测结果进行二值化;即大于thres被认为是目标\n\n 返回:\n Dice系数值 [0~1]\n\n assert len(y_true)==len(y_pred), \"计算Dice系数值时y_true和y_pred应长度一致!\"\n\n # 对y_pred的二值化预处理\n y_pred = np.array(y_pred)\n y_pred[y_pred>=thres] = 1\n y_pred[y_pred<thres] = 0\n y_pred = np.array(y_pred, dtype=np.uint8).flatten()\n\n y_true = np.array(y_true)\n smooth = y_true.shape[-1]**2*0.01\n\n if morph:\n kernel = (kernel_size,kernel_size)\n true_E = cv2.erode(y_true, kernel, iterations=2).flatten()\n true_D = cv2.dilate(y_true, kernel, iterations=2).flatten()\n y_true_coef = (true_E-true_D+1)\n intersection = np.sum(true_E*y_pred)\n\n dice = (2*intersection+smooth)/(np.sum(true_E)+np.sum(y_pred*y_true_coef)+smooth)\n else:\n y_true = y_true.flatten()\n intersection = np.sum(y_pred*y_true)\n dice = (2.*intersection+smooth)/(np.sum(y_true)+np.sum(y_pred)+smooth)\n\n return dice\n\"\"\"\n\n# 可视化辅助函数 - soma/vessel合成\ndef merge2obj(soma, vessel, thres=1):\n \"\"\"\n Given the prediction of soma and vessel in binary format (width * height),\n this function merges soma and vessel into one single color image\n with vessels marked as red.\n \n Inputs - \n thres: the binary threshold value in given images\n \n Return - \n merged_img: the color image containing both soma and vessel\n \"\"\"\n \n assert len(soma.shape) == len(vessel.shape), \"soma and vessel image must have the same shape\"\n assert len(soma.shape) == 2, 'soma image must be binary or gray format'\n assert len(vessel.shape) == 2, 'vesssel image must be binary or gray format'\n \n sc=cv2.cvtColor(soma,cv2.COLOR_GRAY2BGR)\n vc=cv2.cvtColor(vessel,cv2.COLOR_GRAY2BGR)\n\n sc[sc==thres] = 255\n ind = np.where(vc[:,:,0]!=0)\n for i in range(3):\n for j in range(len(ind[0])):\n x, y = ind[0][j], ind[1][j]\n if i == 0:\n vc[x,y,i] = 255\n else:\n vc[x,y,i] = 0\n \n return vc + sc\n\n# 可视化辅助函数 - 原始图像/人工标注\ndef image_vis(image, soma_label, vessel_label):\n plt.figure(figsize=(14,6))\n\n plt.subplot(131)\n plt.imshow(image,cmap='gray')\n plt.title('Original image')\n\n plt.subplot(132)\n plt.imshow(soma_label,cmap='gray')\n plt.title('Annotated label for soma')\n\n plt.subplot(133)\n plt.imshow(vessel_label,cmap='gray')\n plt.title('Annotated label for vessel')\n\n plt.show()\n\n# 可视化辅助函数 - 原始图像/人工标注/模型预测\ndef vis_image_pred(image, soma_label, vessel_label, soma_pred, vessel_pred, thres=0.5):\n\n soma_pred[soma_pred>=thres] = 1\n soma_pred[soma_pred<thres] = 0\n vessel_pred[vessel_pred>=thres] = 1\n vessel_pred[vessel_pred<thres] = 0\n\n\n plt.figure(figsize=(12,12))\n plt.subplot(231)\n plt.imshow(image,cmap='gray')\n plt.title('Original image')\n\n plt.subplot(232)\n plt.imshow(soma_label,cmap='gray')\n plt.title('Annotated label for soma')\n\n plt.subplot(233)\n plt.imshow(vessel_label,cmap='gray')\n plt.title('Annotated label for vessel')\n\n plt.subplot(235)\n plt.imshow(soma_pred,cmap='gray')\n plt.title('Predicted label for soma')\n\n plt.subplot(236)\n plt.imshow(vessel_pred,cmap='gray')\n plt.title('Predicted label for vessel')\n\n plt.show()\n\ndef vis_training(path, validation=False, acc=\"accuracy\", epochs_of_training=100, epochs_of_boosting=10):\n \"\"\" \n 提升过程中训练曲线的可视化;包括以下变量\n \n ['loss', 'soma_loss', 'vessel_loss', 'soma_'+acc, 'vessel_acc'+acc, \n 'val_loss', 'val_soma_loss', 'val_vessel_loss', 'val_soma_'+acc, 'val_vessel_'+acc]\n \n \"\"\"\n\n import os\n import pickle\n\n loss = 'loss'\n soma_loss = 'soma_loss'\n vessel_loss = 'vessel_loss'\n soma_acc = 'soma_'+acc\n vessel_acc = 'vessel_'+acc\n title = 'Learning curve for TRAINING data'\n \n if validation:\n loss = 'val_'+loss\n soma_loss = 'val_'+soma_loss\n vessel_loss = 'val_'+vessel_loss\n soma_acc = 'val_'+soma_acc\n vessel_acc = 'val_'+vessel_acc\n title = 'Learning curve for VALIDATION data'\n \n plt.figure(figsize=(12,20))\n for epoch in range(epochs_of_boosting):\n file = os.path.join(path, 'history-epoch-'+str(epoch+1).zfill(2)+'.dat')\n with open(file, 'rb') as f:\n history = pickle.load(f)\n num = len(history[loss])\n start = int(epochs_of_training/num)\n x = range(start, epochs_of_training+1, start)\n\n plt.subplot(np.ceil(epochs_of_boosting/3),np.ceil(epochs_of_boosting/3),epoch+1)\n plt.plot(x, np.squeeze(history[loss]),'r')\n plt.plot(x, np.squeeze(history[soma_loss]),'b')\n plt.plot(x, np.squeeze(history[vessel_loss]),'g')\n\n plt.plot(x, np.squeeze(history[soma_acc]),'b--')\n plt.plot(x, np.squeeze(history[vessel_acc]),'g--')\n if epoch<np.ceil(epochs_of_boosting/3)+1:\n plt.title(title)\n if epoch>6:\n plt.xlabel('Number of epochs')\n plt.ylim([0, 1])\n plt.xlim([0, epochs_of_training])\n plt.grid()\n plt.legend(['Total Loss','Loss (soma)','Loss (vessel)','ACC (soma)', 'ACC (vessel)'])\n \n plt.show()\n\ndef vis_boosting(path, epochs_of_boosting=10):\n \"\"\"\n 提升过程中模型性能的可视化;包括在测试集和金标数据上的表现\n\n \"\"\" \n\n import os\n import pickle\n\n test_soma = np.array([])\n test_vessel = np.array([])\n gold_soma = np.array([])\n gold_vessel = np.array([])\n \n for epoch in range(epochs_of_boosting):\n file = os.path.join(path, 'performance-epoch-'+str(epoch+1).zfill(2)+'.dat')\n with open(file, 'rb') as f:\n test_soma_dice,test_vessel_dice,gold_soma_dice,gold_vessel_dice, sw, vw = pickle.load(f)\n \n if len(test_soma):\n test_soma = np.vstack([test_soma, np.array(test_soma_dice)])\n test_vessel = np.vstack([test_vessel, np.array(test_vessel_dice)])\n gold_soma = np.vstack([gold_soma, np.array(gold_soma_dice)]) \n gold_vessel = np.vstack([gold_vessel, np.array(gold_vessel_dice)])\n else:\n test_soma = np.array(test_soma_dice)\n test_vessel = np.array(test_vessel_dice)\n gold_soma = np.array(gold_soma_dice) \n gold_vessel = np.array(gold_vessel_dice)\n \n # 可视化曲线\n x = range(1,11)\n plt.figure(figsize=(12,16))\n \n plt.subplot(3,1,1)\n plt.plot(x, np.mean(gold_soma,axis=1),'b')\n plt.plot(x, np.mean(gold_vessel,axis=1),'g') \n plt.plot(x, np.mean(test_soma,axis=1),'b--')\n plt.plot(x, np.mean(test_vessel,axis=1),'g--') \n plt.title(\"Performance of Boosting Framework (Dice Coef)\")\n plt.xlabel('Number of boosting epochs')\n plt.ylim([0, 1])\n plt.xlim([x[0], x[-1]])\n plt.grid()\n plt.legend(['Gold(soma)','Gold(vessel)','Test(soma)','Test(vessel)'])\n \n plt.subplot(3,2,3)\n plt.plot(x, gold_soma)\n plt.title(\"Gold set (SOMA)\")\n plt.ylim([0, 1])\n plt.xlim([x[0], x[-1]])\n plt.grid()\n \n plt.subplot(3,2,4)\n plt.plot(x, gold_vessel)\n plt.title(\"Gold set (VESSEL)\")\n plt.ylim([0, 1])\n plt.xlim([x[0], x[-1]])\n plt.grid()\n \n plt.subplot(3,2,5)\n plt.plot(x, test_soma)\n plt.title(\"Test set (SOMA)\")\n plt.ylim([0, 1])\n plt.xlim([x[0], x[-1]])\n plt.xlabel('Number of boosting epochs')\n plt.grid()\n \n plt.subplot(3,2,6)\n plt.plot(x, test_vessel)\n plt.title(\"Test set (VESSEL)\")\n plt.ylim([0, 1])\n plt.xlim([x[0], x[-1]])\n plt.xlabel('Number of boosting epochs')\n plt.grid() \n \n plt.show()"
] |
[
[
"numpy.dot",
"matplotlib.pyplot.xlim",
"numpy.mean",
"numpy.where",
"tensorflow.cast",
"tensorflow.python.ops.math_ops.cast",
"numpy.log",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.zeros",
"numpy.percentile",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"matplotlib.pyplot.show",
"numpy.squeeze",
"tensorflow.multiply",
"numpy.ceil",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.imshow"
]
] |
CCS-Lab/pytorch_car_caring
|
[
"8a36d3f689e42afa81c753b4cfa0b890ab8e1000"
] |
[
"src/models/dqn_linear.py"
] |
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DQNLinear(nn.Module):\n \"\"\"\n A feedforward, non-convolutional network.\n There is nothing about this architecture which is specific to Deep-q-learning - in fact,\n the algorithm's performance should be fairly robust to the number and sizes of layers.\n \"\"\"\n\n def __init__(self, num_inputs, num_actions):\n \"\"\"\n :param num_inputs: Number of inputs in the openai gym env's state\n :param num_actions: Number of actions in the env\n \"\"\"\n super(DQNLinear, self).__init__()\n self.linear1 = nn.Linear(num_inputs, 128)\n self.linear2 = nn.Linear(128, 256)\n self.linear3 = nn.Linear(256, 64)\n self.linear4 = nn.Linear(64, num_actions)\n\n def forward(self, x):\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = F.relu(self.linear3(x))\n return self.linear4(x)\n"
] |
[
[
"torch.nn.Linear"
]
] |
vermashresth/chainer-compiler
|
[
"5f5ad365d14398d6ae0214fa012eb10360db8e7e",
"5f5ad365d14398d6ae0214fa012eb10360db8e7e"
] |
[
"testcases/ch2o_tests/node/EmbedID.py",
"testcases/ch2o_tests/model/seq2seq.py"
] |
[
"# coding: utf-8\n\nimport chainer\nimport chainer.links as L\n\n# Network definition\n\n\nclass A(chainer.Chain):\n\n def __init__(self, n_vocab, n_out):\n super(A, self).__init__()\n with self.init_scope():\n self.l1 = L.EmbedID(n_vocab, n_out)\n\n def forward(self, x):\n return self.l1(x)\n\n# ======================================\n\n\nfrom chainer_compiler import ch2o\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(314)\n\n n_vocab = 7\n n_out = 3\n n_batch = 5\n\n model = A(n_vocab, n_out)\n\n v = np.random.randint(n_vocab, size=n_batch)\n ch2o.generate_testcase(model, [v], backprop=True)\n",
"#!/usr/bin/env python\n\nimport argparse\nimport datetime\n\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\n\n\nUNK = 0\nEOS = 1\n\n\ndef sequence_embed(embed, xs):\n x_len = [len(x) for x in xs]\n x_section = numpy.cumsum(x_len[:-1])\n ex = embed(F.concat(xs, axis=0))\n exs = F.split_axis(ex, x_section, 0)\n return exs\n\n\nclass Seq2seq(chainer.Chain):\n\n def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):\n super(Seq2seq, self).__init__()\n with self.init_scope():\n self.embed_x = L.EmbedID(n_source_vocab, n_units)\n self.embed_y = L.EmbedID(n_target_vocab, n_units)\n self.encoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)\n self.decoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)\n self.W = L.Linear(n_units, n_target_vocab)\n\n self.n_layers = n_layers\n self.n_units = n_units\n\n def forward(self, xs, ys):\n xs = [x[::-1] for x in xs]\n\n eos = self.xp.array([EOS], numpy.int32)\n ys_in = [F.concat([eos, y], axis=0) for y in ys]\n ys_out = [F.concat([y, eos], axis=0) for y in ys]\n\n # Both xs and ys_in are lists of arrays.\n exs = sequence_embed(self.embed_x, xs)\n eys = sequence_embed(self.embed_y, ys_in)\n\n batch = len(xs)\n # None represents a zero vector in an encoder.\n hx, cx, _ = self.encoder(None, None, exs)\n _, _, os = self.decoder(hx, cx, eys)\n\n # It is faster to concatenate data before calculating loss\n # because only one matrix multiplication is called.\n concat_os = F.concat(os, axis=0)\n concat_ys_out = F.concat(ys_out, axis=0)\n loss = F.sum(F.softmax_cross_entropy(\n self.W(concat_os), concat_ys_out, reduce='no')) / batch\n\n chainer.report({'loss': loss.data}, self)\n n_words = concat_ys_out.shape[0]\n perp = self.xp.exp(loss.data * batch / n_words)\n chainer.report({'perp': perp}, self)\n return loss\n\n\n# from https://github.com/chainer/chainer/blob/master/examples/seq2seq/seq2seq.py\n\nfrom chainer_compiler import ch2o\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(314)\n\n svn = 8\n tvn = 6\n model = Seq2seq(5, svn, tvn, 4)\n\n v = np.random.rand(7, 4, 2).astype(np.float32)\n w = np.random.rand(7, 4, 2).astype(np.float32)\n\n # TODO(hamaji): Get this pass with elichika.\n # ch2o.generate_testcase(model, [v,w])\n"
] |
[
[
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.seed",
"numpy.random.rand",
"numpy.cumsum"
]
] |
sylvanding/detect-financial-risk
|
[
"dd2a143bb3c3a0fe943261954f3d836bbee08983"
] |
[
"website/model_pred.py"
] |
[
"# -*- coding: utf-8 -*-\nimport warnings\nwarnings.simplefilter(action='ignore', category=Warning)\nimport numpy as np\nfrom tensorflow.keras.models import load_model\n\n\ndef predictFinRisk(fin_ind):\n \"\"\"\n Predict a company's financial risk according to its indexes.\n :param fin_ind: financial indexes\n :return: the probability to be at a great financial condition\n \"\"\"\n # load model\n model_path = 'BPNNmodel.h5'\n model = load_model(model_path)\n # calculate factor scores\n component_path = 'ComponentScoreCoefficient.csv'\n coefficient = np.loadtxt(component_path, dtype=np.float, delimiter=',')\n factors = np.dot(np.asarray(fin_ind), coefficient)\n # Z-score the factors\n zscore_path = 'ZscoreMeanandStd.csv'\n zscore = np.loadtxt(zscore_path, dtype=np.float, delimiter=',')\n factors = (factors - zscore[0, :]) / zscore[1, :]\n # predict probability\n probability = model.predict(factors.reshape(1,-1))\n return probability[0][1]*100\n\n\ndef order_param(multiForm: dict):\n condition = (('x11', (-9999999, 999999999)),\n ('x12', (-9999999, 999999999)),\n ('x13', (-9999999, 999999999)),\n ('x14', (-9999999, 999999999)),\n ('x15', (-9999999, 999999999)),\n ('x21', (-9999999, 999999999)),\n ('x22', (-9999999, 999999999)),\n ('x23', (-9999999, 999999999)),\n ('x24', (-9999999, 999999999)),\n ('x25', (-9999999, 999999999)),\n ('x31', (-9999999, 999999999)),\n ('x32', (-9999999, 999999999)),\n ('x41', (-9999999, 999999999)),\n ('x42', (-9999999, 999999999)),\n ('x43', (-9999999, 999999999)),\n ('x44', (-9999999, 999999999)),\n ('x45', (-9999999, 999999999)),\n ('x51', (-9999999, 999999999)),\n ('x52', (0, 2)),\n ('x53', (0, 2)),\n ('x54', (0, 1)))\n ordered = {}\n for i in condition:\n try:\n d = float(multiForm['form_'+i[0]])\n if i[1][0] <= d <= i[1][1]:\n ordered[i[0]] = d\n else:\n raise Exception\n except:\n return None\n for i in range(1, 4):\n ordered[condition[-i][0]] = int(ordered[condition[-i][0]])\n return ordered\n"
] |
[
[
"tensorflow.keras.models.load_model",
"numpy.loadtxt",
"numpy.asarray"
]
] |
Florian-Barthel/stylegan2
|
[
"4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8",
"4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8",
"4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8"
] |
[
"dnnlib/tflib/ops/fused_bias_act.py",
"sandbox/unit_circle_animation/unit_circle_multiple.py",
"3_rotation_experiments/05_new_label/classifier/confusion_matrix.py"
] |
[
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"Custom TensorFlow ops for efficient bias and activation.\"\"\"\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom .. import custom_ops\nfrom ...util import EasyDict\n\ndef _get_plugin():\n return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')\n\n#----------------------------------------------------------------------------\n\nactivation_funcs = {\n 'linear': EasyDict(func=lambda x, **_: x, def_alpha=None, def_gain=1.0, cuda_idx=1, ref='y', zero_2nd_grad=True),\n 'relu': EasyDict(func=lambda x, **_: tf.nn.relu(x), def_alpha=None, def_gain=np.sqrt(2), cuda_idx=2, ref='y', zero_2nd_grad=True),\n 'lrelu': EasyDict(func=lambda x, alpha, **_: tf.nn.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', zero_2nd_grad=True),\n 'tanh': EasyDict(func=lambda x, **_: tf.nn.tanh(x), def_alpha=None, def_gain=1.0, cuda_idx=4, ref='y', zero_2nd_grad=False),\n 'sigmoid': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x), def_alpha=None, def_gain=1.0, cuda_idx=5, ref='y', zero_2nd_grad=False),\n 'elu': EasyDict(func=lambda x, **_: tf.nn.elu(x), def_alpha=None, def_gain=1.0, cuda_idx=6, ref='y', zero_2nd_grad=False),\n 'selu': EasyDict(func=lambda x, **_: tf.nn.selu(x), def_alpha=None, def_gain=1.0, cuda_idx=7, ref='y', zero_2nd_grad=False),\n 'softplus': EasyDict(func=lambda x, **_: tf.nn.softplus(x), def_alpha=None, def_gain=1.0, cuda_idx=8, ref='y', zero_2nd_grad=False),\n 'swish': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x) * x, def_alpha=None, def_gain=np.sqrt(2), cuda_idx=9, ref='x', zero_2nd_grad=False),\n 'softmax': EasyDict(func=lambda x, **_: tf.nn.softmax(x), def_alpha=None, def_gain=1.0, cuda_idx=10, ref='y', zero_2nd_grad=False)\n}\n\n#----------------------------------------------------------------------------\n\ndef fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, impl='ref'):\n r\"\"\"Fused bias and activation function.\n\n Adds bias `b` to activation tensor `x`, evaluates activation function `act`,\n and scales the result by `gain`. Each of the steps is optional. In most cases,\n the fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports first and second order gradients,\n but not third order gradients.\n\n Args:\n x: Input activation tensor. Can have any shape, but if `b` is defined, the\n dimension corresponding to `axis`, as well as the rank, must be known.\n b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type\n as `x`. The shape must be known, and it must match the dimension of `x`\n corresponding to `axis`.\n axis: The dimension in `x` corresponding to the elements of `b`.\n The value of `axis` is ignored if `b` is not specified.\n act: Name of the activation function to evaluate, or `\"linear\"` to disable.\n Can be e.g. `\"relu\"`, `\"lrelu\"`, `\"tanh\"`, `\"sigmoid\"`, `\"swish\"`, etc.\n See `activation_funcs` for a full list. `None` is not allowed.\n alpha: Shape parameter for the activation function, or `None` to use the default.\n gain: Scaling factor for the output tensor, or `None` to use default.\n See `activation_funcs` for the default scaling of each activation function.\n If unsure, consider specifying `1.0`.\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the same shape and datatype as `x`.\n \"\"\"\n\n impl_dict = {\n 'ref': _fused_bias_act_ref,\n 'cuda': _fused_bias_act_cuda,\n }\n return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)\n\n#----------------------------------------------------------------------------\n\ndef _fused_bias_act_ref(x, b, axis, act, alpha, gain):\n \"\"\"Slow reference implementation of `fused_bias_act()` using standard TensorFlow ops.\"\"\"\n\n # Validate arguments.\n x = tf.convert_to_tensor(x)\n b = tf.convert_to_tensor(b) if b is not None else tf.constant([], dtype=x.dtype)\n act_spec = activation_funcs[act]\n assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])\n assert b.shape[0] == 0 or 0 <= axis < x.shape.rank\n if alpha is None:\n alpha = act_spec.def_alpha\n if gain is None:\n gain = act_spec.def_gain\n\n # Add bias.\n if b.shape[0] != 0:\n x += tf.reshape(b, [-1 if i == axis else 1 for i in range(x.shape.rank)])\n\n # Evaluate activation function.\n x = act_spec.func(x, alpha=alpha)\n\n # Scale by gain.\n if gain != 1:\n x *= gain\n return x\n\n#----------------------------------------------------------------------------\n\ndef _fused_bias_act_cuda(x, b, axis, act, alpha, gain):\n \"\"\"Fast CUDA implementation of `fused_bias_act()` using custom ops.\"\"\"\n\n # Validate arguments.\n x = tf.convert_to_tensor(x)\n empty_tensor = tf.constant([], dtype=x.dtype)\n b = tf.convert_to_tensor(b) if b is not None else empty_tensor\n act_spec = activation_funcs[act]\n assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])\n assert b.shape[0] == 0 or 0 <= axis < x.shape.rank\n if alpha is None:\n alpha = act_spec.def_alpha\n if gain is None:\n gain = act_spec.def_gain\n\n # Special cases.\n if act == 'linear' and b is None and gain == 1.0:\n return x\n if act_spec.cuda_idx is None:\n return _fused_bias_act_ref(x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)\n\n # CUDA kernel.\n cuda_kernel = _get_plugin().fused_bias_act\n cuda_kwargs = dict(axis=axis, act=act_spec.cuda_idx, alpha=alpha, gain=gain)\n\n # Forward pass: y = func(x, b).\n def func_y(x, b):\n y = cuda_kernel(x=x, b=b, ref=empty_tensor, grad=0, **cuda_kwargs)\n y.set_shape(x.shape)\n return y\n\n # Backward pass: dx, db = grad(dy, x, y)\n def grad_dx(dy, x, y):\n ref = {'x': x, 'y': y}[act_spec.ref]\n dx = cuda_kernel(x=dy, b=empty_tensor, ref=ref, grad=1, **cuda_kwargs)\n dx.set_shape(x.shape)\n return dx\n def grad_db(dx):\n if b.shape[0] == 0:\n return empty_tensor\n db = dx\n if axis < x.shape.rank - 1:\n db = tf.reduce_sum(db, list(range(axis + 1, x.shape.rank)))\n if axis > 0:\n db = tf.reduce_sum(db, list(range(axis)))\n db.set_shape(b.shape)\n return db\n\n # Second order gradients: d_dy, d_x = grad2(d_dx, d_db, x, y)\n def grad2_d_dy(d_dx, d_db, x, y):\n ref = {'x': x, 'y': y}[act_spec.ref]\n d_dy = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=1, **cuda_kwargs)\n d_dy.set_shape(x.shape)\n return d_dy\n def grad2_d_x(d_dx, d_db, x, y):\n ref = {'x': x, 'y': y}[act_spec.ref]\n d_x = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=2, **cuda_kwargs)\n d_x.set_shape(x.shape)\n return d_x\n\n # Fast version for piecewise-linear activation funcs.\n @tf.custom_gradient\n def func_zero_2nd_grad(x, b):\n y = func_y(x, b)\n @tf.custom_gradient\n def grad(dy):\n dx = grad_dx(dy, x, y)\n db = grad_db(dx)\n def grad2(d_dx, d_db):\n d_dy = grad2_d_dy(d_dx, d_db, x, y)\n return d_dy\n return (dx, db), grad2\n return y, grad\n\n # Slow version for general activation funcs.\n @tf.custom_gradient\n def func_nonzero_2nd_grad(x, b):\n y = func_y(x, b)\n def grad_wrap(dy):\n @tf.custom_gradient\n def grad_impl(dy, x):\n dx = grad_dx(dy, x, y)\n db = grad_db(dx)\n def grad2(d_dx, d_db):\n d_dy = grad2_d_dy(d_dx, d_db, x, y)\n d_x = grad2_d_x(d_dx, d_db, x, y)\n return d_dy, d_x\n return (dx, db), grad2\n return grad_impl(dy, x)\n return y, grad_wrap\n\n # Which version to use?\n if act_spec.zero_2nd_grad:\n return func_zero_2nd_grad(x, b)\n return func_nonzero_2nd_grad(x, b)\n\n#----------------------------------------------------------------------------\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport dnnlib.tflib as tflib\nimport training.misc as misc\nimport os\nfrom training import dataset\nfrom tqdm import tqdm\nimport tensorflow as tf\n\ntflib.init_tf()\n\nfig, axes = plt.subplots(2, 4, subplot_kw=dict(projection='polar'), figsize=(16, 9))\nfig.tight_layout(pad=3.0)\n\nrotation_offset = 108\ntfrecord_dir = '../../datasets/car_labels_new_rotation_oversample'\ndset = dataset.TFRecordDataset(tfrecord_dir)\nbatch_size = 8\ndset.configure(batch_size)\n\n# network_dir = '../../results/00190-stylegan2-car_labels_new_rotation_oversample-2gpu-config-f-euclidean-interpolate-50-percent_continue_189-v7-256'\n# network_dir = '../../results/00189-stylegan2-car_labels_new_rotation_oversample-2gpu-config-f-euclidean-v7-256-resume-00274'\n\nnetwork_dir = '../../results/00207-stylegan2-car_labels_v7_oversample_filter-2gpu-config-f-squared_euclidean_10_interpolate_50_percent_int_reg_remove_half_fl_fr_square-256'\n\nfile = sorted(os.listdir(network_dir))[-1]\nG, D, Gs = misc.load_pkl(network_dir + '/' + file)\nmean_dist_disc = []\nmean_dist_gen = []\n\ntitles = [\n 'Front Center',\n 'Front Left',\n 'Profile Left',\n 'Rear Left',\n 'Rear Center',\n 'Rear Right',\n 'Profile Right',\n 'Front Right'\n]\n\nall_rotations = np.array([\n [1.0, 0.0],\n [0.7071, 0.7071],\n [0.0, 1.0],\n [-0.7071, 0.7071],\n [-1.0, 0.0],\n [-0.7071, -0.7071],\n [0.0, -1.0],\n [0.7071, -0.7071],\n [0., 0.]\n],\n dtype=np.float32\n)\nall_rotations = np.tile(np.expand_dims(all_rotations, axis=0), [batch_size, 1, 1])\nall_label_rotations = [[], [], [], [], [], [], [], [], []]\nall_fake_rotations = [[], [], [], [], [], [], [], [], []]\nall_pred_rotations = [[], [], [], [], [], [], [], [], []]\n\nlabel_placeholder = tf.placeholder(tf.float32, shape=G.input_shapes[1])\nimage_placeholder = tf.placeholder(tf.float32, shape=D.input_shapes[0])\n\nlatents = tf.random_normal(shape=[batch_size] + G.input_shapes[0][1:])\nfake_images = G.get_output_for(latents, label_placeholder, is_training=True)\nreal_scores = D.get_output_for(image_placeholder, label_placeholder, is_training=True)\nfake_scores = D.get_output_for(fake_images, label_placeholder, is_training=True)\n\nfor i in tqdm(range(30)):\n images, labels = dset.get_minibatch_np(batch_size)\n images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])\n fake_images_out, real_scores_out, fake_scores_out = tflib.run([fake_images, real_scores, fake_scores],\n feed_dict={\n label_placeholder: labels,\n image_placeholder: images\n })\n\n labels_rotation = labels[:, rotation_offset:rotation_offset + 2]\n fake_rotations = fake_scores_out[:, rotation_offset:rotation_offset + 2]\n pred_rotations = real_scores_out[:, rotation_offset:rotation_offset + 2]\n\n labels_rotation_tile = np.tile(np.expand_dims(labels_rotation, axis=1), [1, 9, 1])\n distances = np.linalg.norm(all_rotations - labels_rotation_tile, axis=-1)\n rotation_index = np.argmin(distances, axis=-1)\n print(rotation_index)\n for j in range(batch_size):\n all_label_rotations[rotation_index[j]].append(labels_rotation[j])\n all_fake_rotations[rotation_index[j]].append(fake_rotations[j])\n all_pred_rotations[rotation_index[j]].append(pred_rotations[j])\n\nindex = 0\n\nfor i in range(2):\n for j in range(4):\n mean_disc = 0\n mean_gen = 0\n for k in range(len(all_label_rotations[index])):\n ax = axes[i, j]\n ax.set_rscale('linear')\n current_label = np.array(all_label_rotations[index][k])\n current_pred = np.array(all_pred_rotations[index][k])\n current_fake = np.array(all_fake_rotations[index][k])\n mean_disc += np.linalg.norm(current_label - current_pred, ord=2)\n mean_gen += np.linalg.norm(current_label - current_fake, ord=2)\n\n abs_label_real = [np.linalg.norm(current_label, ord=2), np.linalg.norm(current_pred, ord=2)]\n angle_label_real = [np.arctan2(current_label[1], current_label[0]),\n np.arctan2(current_pred[1], current_pred[0])]\n ax.plot(angle_label_real, abs_label_real, c='green')\n ax.scatter(angle_label_real, abs_label_real, c='green')\n\n abs_label_fake = [np.linalg.norm(current_label, ord=2), np.linalg.norm(current_fake, ord=2)]\n angle_label_fake = [np.arctan2(current_label[1], current_label[0]),\n np.arctan2(current_fake[1], current_fake[0])]\n ax.plot(angle_label_fake, abs_label_fake, c='red')\n ax.scatter(angle_label_fake, abs_label_fake, c='red')\n\n # abs_zero_fake = [0, np.linalg.norm(current_fake, ord=2)]\n # angle_zero_fake = [0, np.arctan2(current_fake[1], current_fake[0])]\n # ax.plot(angle_zero_fake, abs_zero_fake, c='orange')\n # ax.scatter(angle_zero_fake, abs_zero_fake, c='orange')\n mean_disc /= len(all_label_rotations[index])\n mean_gen /= len(all_label_rotations[index])\n mean_dist_disc.append(mean_disc)\n mean_dist_gen.append(mean_gen)\n index += 1\n\nindex = 0\nfor i in range(2):\n for j in range(4):\n ax = axes[i, j]\n ax.title.set_text(titles[index] + '\\nmean_dist_disc: ' + str(np.round(mean_dist_disc[index], 4)) + '\\nmean_dist_gen: ' + str(np.round(mean_dist_gen[index], 4)))\n index += 1\n\nplt.savefig(\"plot_circle_multiple_207.png\")\nplt.show()\n",
"import seaborn as sn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport result_models\nfrom dnnlib import tflib\nfrom training import dataset\nimport tensorflow as tf\nfrom training import misc\nfrom tqdm import tqdm\nimport numpy as np\n\ntflib.init_tf()\ndset = dataset.TFRecordDataset('./../../../datasets/car_images_256', label_file='./../../../datasets/car_labels/cars_v5-rxx.labels', max_label_size='full', repeat=False, shuffle_mb=0)\ndset.configure(minibatch_size=10)\nrotation_offset = 108\n\nclassifier = result_models.load_rotation_classifier_one_hot('./../../../results')\n\nx, labels = dset.get_minibatch_tf()\nx = tf.cast(x, tf.float32)\nx = misc.adjust_dynamic_range(x, [0, 255], [-1, 1])\nlabels = labels[:, rotation_offset:rotation_offset + 8]\nprediction = classifier.get_output_for(x)\n\nprediction_out_list = []\nlabel_out_list = []\niterations = 1000\nfor i in tqdm(range(iterations)):\n prediction_out, label_out = tflib.run([prediction, labels])\n prediction_out_list.append(prediction_out)\n label_out_list.append(label_out)\n\nlabel_out_list = np.reshape(np.stack(label_out_list, axis=0), [10 * iterations, 8])\nprediction_out_list = np.reshape(np.stack(prediction_out_list, axis=0), [10 * iterations, 8])\nindices_labels = np.argmax(label_out_list, axis=1)\nindices_prediction = np.argmax(prediction_out_list, axis=1)\narray = np.zeros([8, 8])\n\nfor i in range(label_out_list.shape[0]):\n array[indices_prediction[i], indices_labels[i]] += 1\n\nsum_x_axis = np.tile(np.expand_dims(np.sum(array, axis=1), axis=-1), [1, 8])\narray = array / sum_x_axis\n\nrotations = [\n 'Front Center',\n 'Front Left',\n 'Profile Left',\n 'Rear Left',\n 'Rear Center',\n 'Rear Right',\n 'Profile Right',\n 'Front Right'\n]\ndf_cm = pd.DataFrame(array, index=[i for i in rotations],\n columns=[i for i in rotations])\nplt.figure(figsize=(9, 5))\n\nsn.heatmap(df_cm, annot=True, cmap=\"YlGnBu\", cbar=False, fmt='.3f')\n\nplt.title('Confusion Matrix for the One-Hot Classifier')\nplt.xlabel('prediction')\nplt.ylabel('label')\nplt.subplots_adjust(bottom=0.15, left=0.15)\nplt.savefig('confusion_matrix.png')\nplt.show()\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.relu",
"tensorflow.nn.leaky_relu",
"tensorflow.constant",
"tensorflow.nn.selu",
"tensorflow.nn.softplus",
"tensorflow.nn.softmax",
"numpy.sqrt",
"tensorflow.nn.elu",
"tensorflow.nn.tanh",
"tensorflow.nn.sigmoid"
],
[
"numpy.array",
"numpy.linalg.norm",
"numpy.argmin",
"numpy.round",
"matplotlib.pyplot.savefig",
"tensorflow.placeholder",
"numpy.arctan2",
"matplotlib.pyplot.show",
"tensorflow.random_normal",
"numpy.expand_dims"
],
[
"numpy.zeros",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.stack",
"numpy.argmax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"tensorflow.cast"
]
] |
spatialaudio/aes148-shelving-filter
|
[
"a11de97d6be79c23ffc55084ca95d9da15f3e3eb"
] |
[
"python/mkfig-ripple-and-zp-frequency.py"
] |
[
"\"\"\"Shelving Filter Cascade with Adjustable Transition Slope and Bandwidth\nFrank Schultz, Nara Hahn, Sascha Spors\nIn: Proc. of 148th AES Convention, Virtual Vienna, May 2020, Paper 10339\nhttp://www.aes.org/e-lib/browse.cfm?elib=20756\n\nFig. 4c, slide 10/12\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import MultipleLocator\nfrom scipy.signal import sos2zpk\nfrom util import low_shelving_2nd_cascade, shelving_filter_parameters, \\\n shelving_slope_parameters, sosfreqs, db, set_rcparams, \\\n set_outdir\n\nset_rcparams()\noutdir = set_outdir()\n\nBiquad_per_octave = 2/3, 1, 2\nlabels = ['2/3', '1', '3']\n\nw0 = 1\nBWd = 9\nQ = 1 / np.sqrt(2)\nGd = 10 * np.log10(0.5) * BWd\nslope = shelving_slope_parameters(BWd=BWd, Gd=Gd)[0]\n\n# Frequency-domain evaluation\nwmin, wmax, num_w = 2**-9.5, 2**4.5, 1000\nw = np.logspace(np.log10(wmin), np.log10(wmax), num=num_w)\n\n# Filter design\nshelving_filters = []\nzpks = []\nH = np.zeros((len(Biquad_per_octave), num_w), dtype='complex')\nGain = np.zeros(len(Biquad_per_octave))\nfor n, biquad_per_octave in enumerate(Biquad_per_octave):\n num_biquad, Gb, G = \\\n shelving_filter_parameters(biquad_per_octave=biquad_per_octave,\n Gd=Gd, BWd=BWd)\n Gain[n] = G\n sos = low_shelving_2nd_cascade(w0, Gb, num_biquad, biquad_per_octave)\n H[n] = sosfreqs(sos, worN=w)[1]\n shelving_filters.append(sos)\n zpks.append(sos2zpk(sos))\n\n# desired response\nwl, wh = w0 * 2**(-BWd), w0\nHmag = np.clip(np.log2(w/w0) * slope, G, 0)\nHmag = np.log2(w/w0) * slope\n\n# Plots\nGlim = -0.21, 0.21\nphilim = -3, 47\nwlim = wmin, wmax\nwticks = 2**(np.arange(np.ceil(np.log2(w)[0]/2)*2,\n np.floor(np.log2(w[-1])/2)*2 + 2, 2))\nkw = {'lw': 2, 'alpha': 1, 'basex': 2}\ncolors = cm.get_cmap('Blues')\n\n# frequency response\nfig, ax = plt.subplots(figsize=(10, 4), ncols=2, gridspec_kw={'wspace': 0.25})\nfor n, (biquad_per_octave, H_n) in enumerate(zip(Biquad_per_octave, H)):\n col = colors((n + 2) / (len(H) + 2))\n ax[0].semilogx(w, db(H_n) - Hmag, c=col, **kw,\n label='{:0.0f}'.format(biquad_per_octave))\n\n# Zeros and poles\nkw_p = dict(c='k', marker='x', ls='none')\nkw_z = dict(marker='o', mew=0.75, ls='none', mfc='none')\nfor n, (zpk) in enumerate(zpks):\n z, p, _ = zpk\n num_pole, num_zero = len(z), len(p)\n voffset = -n\n col = colors((n + 2) / (len(H) + 2))\n ax[1].plot(np.abs(p), voffset * np.ones(num_pole), **kw_p)\n ax[1].plot(np.abs(z), voffset * np.ones(num_zero), c=col, **kw_z)\nax[1].set_xscale('log', basex=2)\n\n# decorations\nax[0].set_xlim(wmin, wmax)\nax[0].set_ylim(Glim)\nax[0].set_xticks(wticks)\nax[0].grid(True)\nax[0].yaxis.set_major_locator(MultipleLocator(0.1))\nax[0].yaxis.set_minor_locator(MultipleLocator(0.05))\nax[0].legend(labels, title='Biquad per octave', loc='upper right',\n facecolor='w', fontsize=10)\nax[0].set_xlabel(r'$\\omega$ / $\\omega_\\textrm{\\footnotesize u}$')\nax[0].set_ylabel('Level Error in dB')\nax[1].set_xlim(wmin, wmax)\nax[1].set_ylim(-2.5, 0.5)\nax[1].set_xticks(wticks)\nax[1].grid(True)\nax[1].yaxis.set_major_locator(MultipleLocator(1))\nax[1].set_yticks([0, -1, -2])\nax[1].set_yticklabels(labels)\nax[1].set_xlabel(r'$|s|$ / $\\omega_\\textrm{\\footnotesize u}$')\nax[1].set_ylabel(r'Biquad per octave $N_O$')\n\n# desired bandwidth\nkw_bw = dict(color='lightgray', alpha=0.5)\nGmin, Gmax = ax[0].get_ylim()\nax[0].fill_between(x=(wl, wh), y1=Gmin, y2=Gmax, **kw_bw)\nax[1].fill_between(x=(wl, wh), y1=-4, y2=1, **kw_bw)\nax[0].text(np.sqrt(wl*wh) * 2**0.1, 0.19,\n 'Bandwidth ${}$ oct'.format(BWd),\n va='top', ha='center', fontsize=10)\n\nplt.savefig(outdir + 'ripple-and-zp-frequency.pdf', bbox_inches='tight')\n"
] |
[
[
"matplotlib.ticker.MultipleLocator",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.savefig",
"numpy.ones",
"scipy.signal.sos2zpk",
"matplotlib.pyplot.subplots",
"numpy.sqrt",
"numpy.abs",
"numpy.log10",
"numpy.log2"
]
] |
threexc/SiGPyC
|
[
"81bdf2b691c601e266b2ea0249ad0bad074a567a"
] |
[
"ants/analyzer.py"
] |
[
"import math\nimport os\nimport re\nimport sys\nfrom textwrap import dedent\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass IQSamplesFile():\n def __init__(self, iq_samples_file_name, sample_rate = 20e6, noise_threshold = 0.02):\n match = re.search(\"iqsamples_(video|voice|best_effort|background)_run(\\d+)\\.bin\", iq_samples_file_name)\n if not match:\n raise Exception(\"ERROR: Could not parse file name {} to ac and run#\".format(iq_samples_file_name))\n self.access_category = match.group(1)\n self.run = match.group(2)\n\n print(\"Loading {}...\".format(iq_samples_file_name))\n with open(iq_samples_file_name, mode='rb') as file:\n raw_data = np.fromfile(file, dtype=np.float32)\n file_length = len(raw_data)\n \n # In-phase data is at the even indices, quadrature is at the odd ones\n in_phase_data = raw_data[0:file_length:2]\n quadrature_data = raw_data[1:file_length:2]\n # Find the common length of the two data subsets to ensure proper plots\n data_length = min(len(in_phase_data), len(quadrature_data))\n duration = data_length/sample_rate\n\n in_phase_data = in_phase_data[0:data_length]\n quadrature_data = quadrature_data[0:data_length]\n\n print(\"Found {} samples, corresponding to a duration of {}s\".format(data_length, duration))\n\n # set the time axis from the data length and the total duration\n self.time = np.linspace(0, duration, num=data_length)\n # combine inphase and quadrature components as a complex number, cos(wt) + 1j*sin(wt)\n complex_data = in_phase_data + (1j*quadrature_data)\n self.power_data = abs(complex_data)**2\n #self.power_data = power_data[int(len(power_data)/100):-1]\n threshold = noise_threshold * np.mean(self.power_data)\n\n # disregard the fluctuations of the signal in the packet\n packet_indices = np.concatenate(np.where(self.power_data > threshold))\n # possible wait time between packets in seconds\n wait_time = 4e-6\n # number of samples from the start of the packet to safely assume that the packet is complete but before the arrival of the next packet\n window_size = int(sample_rate*wait_time)\n temp_vector = np.diff(packet_indices)\n temp_indices = np.concatenate(np.where(temp_vector > window_size))\n\n # an array to store the indices of the end of the packet\n self.packet_end_indices = np.array([0]*(len(temp_indices)+1))\n self.packet_end_indices[0:(len(temp_indices))] = packet_indices[temp_indices]\n self.packet_end_indices[len(temp_indices)] = packet_indices[-1]\n\n # an array to store the indices of the start of the packet\n self.packet_start_indices = np.array([0]*(len(temp_indices)+1))\n self.packet_start_indices[1:(len(temp_indices)+1)] = packet_indices[temp_indices+1]\n self.packet_start_indices[0] = packet_indices[0]\n\n self.packet_indicator = np.zeros([data_length, 1])\n self.packet_indicator[self.packet_start_indices] = 1\n self.packet_indicator[self.packet_end_indices] = 1\n \n def plot(self, start = 0, num_samples = -1, filename = None):\n matplotlib.rcParams['agg.path.chunksize'] = 10000\n plt.figure()\n plt.plot(self.time[start:num_samples], np.sqrt(self.power_data[start:num_samples]), 'b-', self.time[start:num_samples], self.packet_indicator[start:num_samples], 'r-')\n plt.title(\"Plot of the magnitude of the signal vs Time\")\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Signal magnitude\") #find out if the power is in Watts or dB?\n\n if filename is None:\n plt.show()\n else:\n plt.draw()\n plt.savefig(filename)\n plt.close() \n\nclass ANTS_Analyzer():\n class Stats():\n def __init__(self, count, min, mean, max, violations):\n self.count = count\n self.min = min\n self.mean = mean\n self.max = max\n self.violations = violations\n \n class Results():\n def __init__(self):\n self.interframe_spacing = None\n self.number_of_packets = None\n self.txop_durations = None\n self.txop_stats = None\n self.backoff_stats = None\n self.backoff_bin_probabilities = None\n\n self.access_category = None\n self.txop_limit = None\n self.kp1 = None\n self.p_max = None\n \n self.txop_factor = None\n self.backoff_kullback_leibler_divergence = None\n self.dist_factor = None\n self.aggressiveness_factor = None\n self.norm_factor = None\n self.geometric_factor = None\n\n self.last_iq_file = None\n \n def to_string(self):\n results = dedent(\"\"\"\\\n Found {} packets\n\n {} TXOPs\n min/mean/max Txop: {:.3f}µs {:.3f}µs {:.3f}µs\n Txop exceeding Txop limit: {}\n Txop factor: {:.3f}\n\n {} Backoffs\n min/mean/max Backoff: {:.3f}µs {:.3f}µs {:.3f}µs\n Backoff exceeding CW: {}\n Backoff Kullback-Leibler Divergence: {:.3f}\n Backoff KL Factor: {:.3f}\n\n Aggressiveness Factor: {:.3f}\n Norm Factor: {:.3f}\n Geometric Factor: {:.3f}\n \"\"\".format(self.number_of_packets,\n self.txop_stats.count,\n self.txop_stats.min, self.txop_stats.mean, self.txop_stats.max,\n self.txop_stats.violations,\n self.txop_factor,\n self.txop_stats.count,\n self.backoff_stats.min, self.backoff_stats.mean, self.backoff_stats.max,\n self.backoff_stats.violations,\n self.backoff_kullback_leibler_divergence,\n self.dist_factor,\n self.aggressiveness_factor,\n self.norm_factor,\n self.geometric_factor))\n if any(self.backoff_bin_probabilities > self.p_max):\n results = results + \"Backoff bin probability violation.\\n\"\n else:\n results = results + \"Backoff bin probability compliant.\\n\"\n if any(self.txop_durations > self.txop_limit*1e3):\n results = results + \"Txop duration violation.\\n\"\n else:\n results = results + \"Txop duration compliant.\\n\" \n \n norm_factor_percent = (1 - self.norm_factor)*100\n if self.aggressiveness_factor > 0:\n aggression = abs(self.aggressiveness_factor)*100\n results = results + \"{:.3f}% aggressive / {:.3f}% compliant\".format(aggression, norm_factor_percent)\n else:\n submission = abs(self.aggressiveness_factor)*100\n results = results + \"{:.3f}% submissive / {:.3f}% compliant\".format(submission, norm_factor_percent)\n \n return results\n \n def plot(self, path):\n plt.figure(1)\n plt.xlim((0,250))\n plt.hist(self.interframe_spacing, bins=750)\n plt.title(\"Histogram of the inter-frame spacing\")\n plt.xlabel(\"Inter-frame spacing (microsecond)\")\n plt.ylabel(\"Frequency\")\n plt.draw()\n plt.savefig(os.path.join(path, 'interframe_spacing_histogram_{}.svg'.format(self.access_category)))\n plt.close()\n \n plt.figure(2)\n compliant_txop_durations = self.txop_durations[self.txop_durations < self.txop_limit*1e3]\n violating_txop_durations = self.txop_durations[self.txop_durations >= self.txop_limit*1e3]\n plt.hist(compliant_txop_durations, bins=50)\n plt.hist(violating_txop_durations, bins=50, color='red')\n plt.title(\"Histogram of the Txop durations\")\n plt.xlabel(\"Txop duration (milli second)\")\n plt.ylabel(\"Frequency\")\n Gender = ['Compliant Txop', 'Violating Txop']\n plt.legend(Gender, loc=2)\n plt.draw()\n plt.savefig(os.path.join(path, 'txop_durations_histogram_{}.svg'.format(self.access_category)))\n plt.close()\n\n plt.figure(3)\n t = np.linspace(0, self.kp1-1, num=self.kp1)\n plt.bar(t, self.backoff_bin_probabilities, color='b', width=0.25)\n plt.bar(t+0.25, self.p_max, color='r', width=0.25)\n plt.title(\"Bin Probability and Threshold\")\n plt.xlabel(\"Bin\")\n plt.ylabel(\"Probability\")\n Gender = ['Bin Probability', 'Compliance Upper threshold']\n plt.legend(Gender, loc=2)\n plt.draw()\n plt.savefig(os.path.join(path, 'bin_probability_{}.svg'.format(self.access_category)))\n plt.close()\n\n self.last_iq_file.plot(start = 0, num_samples = 100000, filename = os.path.join(path, \"signal_magnitude_plot_{}.svg\".format(self.access_category)))\n \n def __init__(self, uut_type, sample_rate = 20e6):\n self.uut_type = uut_type\n self.sample_rate = sample_rate\n self.access_category = None\n\n self.packet_duration = None\n self.interframe_spacing = None\n\n self.sifs = 25\n self.slot = 9\n\n def loadIqSamples(self, iq_samples_file_name, noise_threshold=0.02):\n iqFile = IQSamplesFile(iq_samples_file_name, sample_rate=self.sample_rate, noise_threshold=noise_threshold)\n if self.access_category is not None:\n if self.access_category != iqFile.access_category:\n raise Exception(\"ERROR: Cannot mix samples from different access categories\")\n else:\n self.set_access_category(iqFile.access_category)\n \n # calculate the interframe spacing by finding the time difference between the end and the start of each consecutive packets (in microseconds)\n interframe_spacing = (1e6/self.sample_rate)*(iqFile.packet_start_indices[1:]-iqFile.packet_end_indices[0:-1])\n packet_duration = (1e6/self.sample_rate)*(iqFile.packet_end_indices-iqFile.packet_start_indices)\n\n print(\"Found {} packets with {} IFSs in between\".format(len(packet_duration), len(interframe_spacing)))\n if self.packet_duration is not None:\n self.packet_duration = np.concatenate([self.packet_duration, packet_duration])\n self.interframe_spacing = np.concatenate([self.interframe_spacing, [2 * self.sifs], interframe_spacing])\n else:\n self.packet_duration = packet_duration\n self.interframe_spacing = interframe_spacing\n\n self.last_iq_file = iqFile\n\n def get_results(self): \n if self.packet_duration is None:\n print (\"ERROR: No iq samples to analyze.\")\n return\n\n results = ANTS_Analyzer.Results()\n results.interframe_spacing = self.interframe_spacing\n results.last_iq_file = self.last_iq_file\n \n number_of_packets = len(self.packet_duration)\n interframe_spacing_length = len(self.interframe_spacing)\n if number_of_packets != interframe_spacing_length + 1:\n print(\"ERROR: Expected one more packets than interframe spaces, but got {} / {}\".format(number_of_packets, interframe_spacing_length))\n return\n results.number_of_packets = number_of_packets\n\n COT = np.where(self.interframe_spacing > self.sifs)\n COT = np.asarray(COT).T\n\n print(\"Analyzing {} packets, {} IFSs, {} COTs\".format(number_of_packets, interframe_spacing_length, len(COT)))\n\n max_packet_duration = max(self.packet_duration)\n print(\"Max packet duration: {:.3f}µs\".format(max_packet_duration))\n txop_durations = []\n # leave out last COT, it might be incomplete\n for i in range(0, len(COT)-1):\n start = COT[i][0]\n end = COT[i+1][0]\n # sum the packets and the IFSs _within_ the txop\n txop_duration = np.sum(self.packet_duration[start+1 : end+1]) + np.sum(self.interframe_spacing[start+1 : end])\n txop_durations.append(txop_duration)\n txop_durations = np.array(txop_durations)\n results.txop_durations = txop_durations\n\n mean_txop = np.mean(txop_durations)\n max_txop = np.max(txop_durations)\n min_txop = np.min(txop_durations)\n print(\"TXOP Min/Mean/Max: {:.3f}µs / {:.3f}µs / {:.3f}µs\".format(min_txop, mean_txop, max_txop))\n \n if self.txop_limit == 0:\n # no txop limit, no violations\n \tviolating_durations = 0\n else:\n \tviolating_durations = np.count_nonzero(txop_durations > self.txop_limit * 1e3)\n\n results.txop_stats = ANTS_Analyzer.Stats(len(txop_durations), min_txop, mean_txop, max_txop, violating_durations)\n txop_factor = violating_durations/len(txop_durations)\n print(\"Found {:d} violating durations (> {:d}ms)\".format(violating_durations, self.txop_limit))\n\n if self.uut_type == \"Supervising\" and (self.access_category == \"voice\" or self.access_category == \"video\") :\n correct_back_off = np.concatenate(np.where(self.interframe_spacing > self.sifs))\n else:\n correct_back_off = np.concatenate(np.where(self.interframe_spacing > (self.sifs + 2)))\n \n BFmin = self.aifs - self.slot/2\n BFmax = self.aifs + self.slot*(self.n-1) + self.slot/2\n BFmid = (BFmax + BFmin)/2\n\n back_offs = self.interframe_spacing[correct_back_off]\n results.backoff_stats = ANTS_Analyzer.Stats(len(back_offs), np.min(back_offs), np.mean(back_offs), np.max(back_offs), np.count_nonzero(back_offs > BFmax))\n blen = len(back_offs)\n b = np.asarray(np.zeros(self.n))\n b2 = np.asarray(np.zeros(self.kp1))\n\n for i in range(0, blen):\n \tx = back_offs[i]\n \tif x <= BFmin:\n \t\tb[0] = b[0] + 1\n \telif x >= BFmax:\n \t\tb[self.n-1] = b[self.n-1] + 1\n \telse:\n \t\tindex = math.ceil((x-BFmin)/self.slot)-1\n \t\tb[index] = b[index] + 1\n\n \tif x < self.mind:\n \t\tb2[0] = b2[0] + 1\n \telif x >= self.maxd:\n \t\tb2[self.kp1-1] = b2[self.kp1-1] + 1\n \telse:\n \t\tindex = math.ceil((x-self.mind)/self.slot)\n \t\tb2[index] = b2[index] + 1\n\n prob = b/sum(b)\n nz_prob = prob[np.where(prob > 0)]\n backoff_kullback_leibler_divergence = sum(np.multiply(nz_prob, np.log10(nz_prob/(1/self.n))))\n dist_factor = 1 - np.exp(-1 * backoff_kullback_leibler_divergence)\n kk = np.arange((self.n - 1)/2, - (self.n - 1)/2-1, -1)\n accum = sum(np.multiply(b, kk))\n avrg = accum / blen\n mid = (self.n-1)/2\n aggressiveness_factor = avrg/mid\n\n # Norm Factor\n norm_factor = math.sqrt(aggressiveness_factor**2 + txop_factor**2 + dist_factor**2 )/2\n \n # Geometric mean Factor\n geometric_factor = ((1 - abs(aggressiveness_factor)) * (1 - txop_factor) * (1 - dist_factor) )**(1/3)\n\n # Calculate the observed cumulative probabilities (p)\n e = blen # total observed periods\n p = np.asarray(np.zeros(self.kp1))\n for i in range(0, self.kp1):\n p[i] = ANTS_Analyzer.sum_range(b2, 0, i)/e\n\n results.backoff_bin_probabilities = p \n results.access_category = self.access_category\n results.txop_limit = self.txop_limit\n results.kp1 = self.kp1\n results.p_max = self.p_max\n results.txop_factor = txop_factor\n results.backoff_kullback_leibler_divergence = backoff_kullback_leibler_divergence\n results.dist_factor = dist_factor\n results.aggressiveness_factor = aggressiveness_factor\n results.norm_factor = norm_factor\n results.geometric_factor = geometric_factor\n\n return results\n\n def sum_range(l,a,b):\n sum = 0\n for i in range(a, b+1,1):\n sum += l[i]\n return sum\n\n def set_access_category(self, access_category):\n \"\"\"Sets the parameters that depend on the access category\"\"\"\n if access_category == \"video\":\n self.txop_limit = 4\n self.aifs = 34\n self.n = 8\n self.kp1 = 9\n self.mind = 32\n self.maxd = 95\n elif access_category == \"best_effort\":\n \tself.txop_limit = 6\n \tself.aifs = 43\n \tself.n = 16\n \tself.kp1 = 17\n \tself.mind = 41\n \tself.maxd = 176\n elif access_category == \"background\":\n \tself.txop_limit = 6\n \tself.aifs = 79\n \tself.n = 16\n \tself.kp1 = 17\n \tself.mind = 77\n \tself.maxd = 212\n elif access_category == \"voice\":\n \tself.txop_limit = 2\n \tself.aifs = 34\n \tself.n = 4\n \tself.kp1 = 5\n \tself.mind = 32\n \tself.maxd = 59\n else:\n raise Exception(\"Unknown access category {}\".format(access_category))\n self.access_category = access_category\n\n self.p_max = np.asarray(np.zeros(self.kp1))\n self.p_max[0] = 0.05\n self.p_max[self.kp1 - 1] = 1\n\n if self.access_category == \"voice\":\n \tfor i in range(1, 4):\n \t\tself.p_max[i] = self.p_max[0] + i * 0.25\n elif self.access_category == \"video\":\n \tself.p_max[1] = 0.18\n \tfor i in range(2, 7):\n \t\tself.p_max[i] = self.p_max[1] + (i - 1) * 0.125\n \tself.p_max[self.kp1 - 2] = 1\n elif self.access_category == \"best_effort\" or self.access_category == \"background\":\n \tself.p_max[1] = 0.12\n \tfor i in range(2, 16):\n \t\tself.p_max[i] = self.p_max[1] + (i - 1) * 0.0625\n else:\n raise Exception(\"Unknown access category {}\".format(access_category))\n print(\"Running analysis for access category {}\".format(self.access_category))\n\ndef main():\n if len(sys.argv) < 2:\n fileName = './tests/iqsamples_voice_run1.bin'\n print(\"Please supply the name of the .bin file\")\n else:\n fileName = sys.argv[1]\n\n print(\"Analyzing {}...\".format(fileName))\n analyzer = ANTS_Analyzer(\"Supervising\")\n analyzer.loadIqSamples(fileName)\n results = analyzer.get_results()\n print(\"--- RESULTS ---\")\n print(results.to_string())\n\n results.plot('.')\n\nif __name__ == \"__main__\":\n main() \n"
] |
[
[
"matplotlib.pyplot.xlim",
"numpy.min",
"numpy.mean",
"numpy.exp",
"numpy.multiply",
"numpy.where",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.bar",
"numpy.max",
"numpy.concatenate",
"numpy.count_nonzero",
"matplotlib.pyplot.savefig",
"numpy.arange",
"numpy.sqrt",
"numpy.log10",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.diff",
"matplotlib.pyplot.hist",
"numpy.fromfile",
"matplotlib.pyplot.show",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.linspace"
]
] |
esnmrdi/phd
|
[
"2eede68a52e3e9c5931cad55dc6989c14f2e5d5b"
] |
[
"model-ensemble-lookback.py"
] |
[
"# %%\n# Ensemble Forecasting of RNN Models Trained for Lookbacks Ranging from 1 to 6\n# Ehsan Moradi, Ph.D. Candidate\n\n# %%\n# Import required libraries\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom scipy.stats.mstats import trimmed_mean, winsorize\nfrom sklearn.ensemble import (\n GradientBoostingRegressor,\n AdaBoostRegressor,\n RandomForestRegressor,\n)\nfrom sklearn.linear_model import LinearRegression, Ridge\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.model_selection import train_test_split, ParameterGrid\nfrom sklearn.utils import check_random_state\n\n\n# %%\n# Load data from Excel to a pandas dataframe\ndef load_data_from_Excel(sensor, vehicle):\n directory = (\n \"../../Google Drive/Academia/PhD Thesis/Field Experiments/\"\n + sensor\n + \"/\"\n + vehicle\n + \"/Processed/RNN/\"\n )\n input_file = \"{0} - RNN - 05.xlsx\".format(vehicle)\n input_path = directory + input_file\n df = pd.read_excel(input_path, sheet_name=\"Sheet1\", header=0)\n return df\n\n\n# %%\n# Log model settings and corresponding scores to a file (one by one)\ndef log_model_settings_and_score(row):\n directory = \"../../Google Drive/Academia/PhD Thesis/Charts, Tables, Forms, Flowcharts, Spreadsheets, Figures/\"\n output_file = \"Paper III - Ensemble Lookback Results.csv\"\n output_path = directory + output_file\n with open(output_path, \"a\") as f:\n writer = csv.writer(f)\n writer.writerow(row)\n return None\n\n\n# %%\n# Definition of the custom loss function\ndef rmse(y_true, y_pred):\n return np.sqrt(np.mean(np.square(np.array(y_pred) - np.array(y_true))))\n\n\n# %%\n# Experiments to include in modeling\n# The boolean points to whether the experiment type is obd_only or pems_included.\nEXPERIMENTS = (\n (\"009 Renault Logan 2014 (1.6L Manual)\", True),\n (\"010 JAC J5 2015 (1.8L Auto)\", True),\n (\"011 JAC S5 2017 (2.0L TC Auto)\", True),\n (\"012 IKCO Dena 2016 (1.65L Manual)\", True),\n (\"013 Geely Emgrand7 2014 (1.8L Auto)\", True),\n (\"014 Kia Cerato 2016 (2.0L Auto)\", True),\n (\"015 VW Jetta 2016 (1.4L TC Auto)\", False),\n (\"016 Hyundai Sonata Sport 2019 (2.4L Auto)\", True),\n (\"017 Chevrolet Trax 2019 (1.4L TC Auto)\", True),\n (\"018 Hyundai Azera 2006 (3.8L Auto)\", True),\n (\"019 Hyundai Elantra GT 2019 (2.0L Auto)\", True),\n (\"020 Honda Civic 2014 (1.8L Auto)\", False),\n (\"021 Chevrolet N300 2014 (1.2L Manual)\", True),\n (\"022 Chevrolet Spark GT 2012 (1.2L Manual)\", True),\n (\"023 Mazda 2 2012 (1.4L Auto)\", True),\n (\"024 Renault Logan 2010 (1.4L Manual)\", True),\n (\"025 Chevrolet Captiva 2010 (2.4L Auto)\", True),\n (\"026 Nissan Versa 2013 (1.6L Auto)\", True),\n (\"027 Chevrolet Cruze 2011 (1.8L Manual)\", True),\n (\"028 Nissan Sentra 2019 (1.8L Auto)\", True),\n (\"029 Ford Escape 2006 (3.0L Auto)\", False),\n (\"030 Ford Focus 2012 (2.0L Auto)\", False),\n (\"031 Mazda 3 2016 (2.0L Auto)\", False),\n (\"032 Toyota RAV4 2016 (2.5L Auto)\", False),\n (\"033 Toyota Corolla 2019 (1.8L Auto)\", False),\n (\"034 Toyota Yaris 2015 (1.5L Auto)\", False),\n (\"035 Kia Rio 2013 (1.6L Auto)\", False),\n (\"036 Jeep Patriot 2010 (2.4L Auto)\", False),\n (\"037 Chevrolet Malibu 2019 (1.5L TC Auto)\", False),\n (\"038 Kia Optima 2012 (2.4L Auto)\", False),\n (\"039 Honda Fit 2009 (1.5L Auto)\", False),\n (\"040 Mazda 6 2009 (2.5L Auto)\", False),\n (\"041 Nissan Micra 2019 (1.6L Auto)\", False),\n (\"042 Nissan Rouge 2020 (2.5L Auto)\", False),\n (\"043 Mazda CX-3 2019 (2.0L Auto)\", False),\n)\n\n# %%\n# General settings\npd.options.mode.chained_assignment = None\nSETTINGS = {\n \"SENSOR_DEPENDENT\": {\n \"Veepeak\": (\"FCR_LH\",),\n \"3DATX parSYNC Plus\": (\"CO2_KGS\", \"NO_KGS\", \"NO2_KGS\", \"PM_KGS\"),\n },\n # \"TRIM_LIMITS\": [0.0, 0.2, 0.4],\n # \"WINSORIZE_LIMITS\": [0.2, 0.4],\n \"ESTIMATORS\": (\n LinearRegression(normalize=True),\n Ridge(alpha=0.1, normalize=True),\n Ridge(alpha=1.0, normalize=True),\n SVR(C=1.0),\n SVR(C=10.0),\n DecisionTreeRegressor(splitter=\"best\"),\n DecisionTreeRegressor(splitter=\"random\"),\n GradientBoostingRegressor(n_estimators=10),\n GradientBoostingRegressor(n_estimators=100),\n AdaBoostRegressor(n_estimators=10),\n AdaBoostRegressor(n_estimators=100),\n RandomForestRegressor(n_estimators=10),\n RandomForestRegressor(n_estimators=100),\n MLPRegressor(hidden_layer_sizes=(100,)),\n MLPRegressor(hidden_layer_sizes=(100, 100,)),\n ),\n}\n\n# %%\n# Application of ensemble learning methods\n# including Trimmed Mean, Winsorized Mean, Linear Regression, Ridge Regression, SVR,\n# Decision Tree, Gradient Boosting, Ada Boosting, Random Forest, and MLP\n# Batch execution on all the vehicles\nrng = check_random_state(0)\nfor sensor, dependents in SETTINGS[\"SENSOR_DEPENDENT\"].items():\n for dependent in dependents:\n features = [\"{0}_PRED_L{1}\".format(dependent, str(i + 1)) for i in range(6)]\n vehicles = (\n (item[0] for item in EXPERIMENTS)\n if sensor == \"Veepeak\"\n else (item[0] for item in EXPERIMENTS if not item[1])\n )\n for vehicle in vehicles:\n df_input = load_data_from_Excel(sensor, vehicle)\n df_input.dropna(inplace=True)\n X, y = df_input[features], df_input[dependent]\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=rng\n )\n df_output = pd.DataFrame()\n df_output[dependent] = y_test\n df_output[features] = X_test\n print(vehicle)\n\n # for trim_limit in SETTINGS[\"TRIM_LIMITS\"]:\n # ensemble = trimmed_mean(\n # X_test, limits=trim_limit, inclusive=(True, True), axis=1\n # )\n # df_output[\n # \"{0}_PRED_TRIMMED_{1}\".format(dependent, str(trim_limit))\n # ] = ensemble\n # score_components = [rmse(y_test, X_test[col]) for col in X_test]\n # score_ensemble = rmse(y_test, ensemble)\n # row = [\n # dependent,\n # vehicle,\n # \"Trimmed Mean (limit={})\".format(trim_limit),\n # score_ensemble,\n # ] + score_components\n # log_model_settings_and_score(row)\n # print(\n # \"Trimmed mean {1}\\nscore {2}.\".format(\n # vehicle, trim_limit, score_ensemble\n # )\n # )\n # for winsorize_limit in SETTINGS[\"WINSORIZE_LIMITS\"]:\n # ensemble = np.mean(\n # winsorize(\n # X_test, limits=winsorize_limit, inclusive=(True, True), axis=1\n # ),\n # axis=1,\n # )\n # df_output[\n # \"{0}_PRED_WINSORIZED_{1}\".format(dependent, str(winsorize_limit))\n # ] = ensemble\n # score_components = [rmse(y_test, X_test[col]) for col in X_test]\n # score_ensemble = rmse(y_test, ensemble)\n # row = [\n # dependent,\n # vehicle,\n # \"Winsorized Mean (limit={})\".format(winsorize_limit),\n # score_ensemble,\n # ] + score_components\n # log_model_settings_and_score(row)\n # print(\n # \"Winsorized mean {1}\\nscore {2}.\".format(\n # vehicle, winsorize_limit, score_ensemble\n # )\n # )\n for estimator in SETTINGS[\"ESTIMATORS\"]:\n ensemble = estimator.fit(X_train, y_train).predict(X_test)\n df_output[\"{0}_PRED_{1}\".format(dependent, estimator)] = ensemble\n score_ensemble = rmse(y_test, ensemble)\n score_components = [rmse(y_test, X_test[col]) for col in X_test]\n row = [dependent, vehicle, estimator, score_ensemble] + score_components\n log_model_settings_and_score(row)\n print(\"{0}\\n{1}\\nscore {2}.\".format(vehicle, estimator, score_ensemble))\n\n# %%\n"
] |
[
[
"sklearn.ensemble.AdaBoostRegressor",
"numpy.array",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.linear_model.LinearRegression",
"pandas.DataFrame",
"pandas.read_excel",
"sklearn.linear_model.Ridge",
"sklearn.utils.check_random_state",
"sklearn.svm.SVR",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.neural_network.MLPRegressor",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.model_selection.train_test_split"
]
] |
GateBuilder/Qcodes
|
[
"b3729f188847f9a7db77f23f8c1b3a2a635a2ac2"
] |
[
"qcodes/instrument_drivers/stanford_research/SR830.py"
] |
[
"from functools import partial\nimport numpy as np\n\nfrom qcodes import VisaInstrument\nfrom qcodes.instrument.parameter import ArrayParameter\nfrom qcodes.utils.validators import Numbers, Ints, Enum, Strings\n\nfrom typing import Tuple\n\n\nclass ChannelBuffer(ArrayParameter):\n \"\"\"\n Parameter class for the two channel buffers\n\n Currently always returns the entire buffer\n TODO (WilliamHPNielsen): Make it possible to query parts of the buffer.\n The instrument natively supports this in its TRCL call.\n \"\"\"\n\n def __init__(self, name: str, instrument: 'SR830', channel: int) -> None:\n \"\"\"\n Args:\n name (str): The name of the parameter\n instrument (SR830): The parent instrument\n channel (int): The relevant channel (1 or 2). The name should\n should match this.\n \"\"\"\n self._valid_channels = (1, 2)\n\n if channel not in self._valid_channels:\n raise ValueError('Invalid channel specifier. SR830 only has '\n 'channels 1 and 2.')\n\n if not isinstance(instrument, SR830):\n raise ValueError('Invalid parent instrument. ChannelBuffer '\n 'can only live on an SR830.')\n\n super().__init__(name,\n shape=(1,), # dummy initial shape\n unit='V', # dummy initial unit\n setpoint_names=('Time',),\n setpoint_labels=('Time',),\n setpoint_units=('s',),\n docstring='Holds an acquired (part of the) '\n 'data buffer of one channel.')\n\n self.channel = channel\n self._instrument = instrument\n\n def prepare_buffer_readout(self):\n \"\"\"\n Function to generate the setpoints for the channel buffer and\n get the right units\n \"\"\"\n\n N = self._instrument.buffer_npts() # problem if this is zero?\n # TODO (WilliamHPNielsen): what if SR was changed during acquisition?\n SR = self._instrument.buffer_SR()\n if SR == 'Trigger':\n self.setpoint_units = ('',)\n self.setpoint_names = ('trig_events',)\n self.setpoint_labels = ('Trigger event number',)\n self.setpoints = (tuple(np.arange(0, N)),)\n else:\n dt = 1/SR\n self.setpoint_units = ('s',)\n self.setpoint_names = ('Time',)\n self.setpoint_labels = ('Time',)\n self.setpoints = (tuple(np.linspace(0, N*dt, N)),)\n\n self.shape = (N,)\n\n params = self._instrument.parameters\n # YES, it should be: \"is not 'none'\" NOT \"is not None\"\n if params['ch{}_ratio'.format(self.channel)].get() is not 'none':\n self.unit = '%'\n else:\n disp = params['ch{}_display'.format(self.channel)].get()\n if disp == 'Phase':\n self.unit = 'deg'\n else:\n self.unit = 'V'\n\n if self.channel == 1:\n self._instrument._buffer1_ready = True\n else:\n self._instrument._buffer2_ready = True\n\n def get_raw(self):\n \"\"\"\n Get command. Returns numpy array\n \"\"\"\n if self.channel == 1:\n ready = self._instrument._buffer1_ready\n else:\n ready = self._instrument._buffer2_ready\n\n if not ready:\n raise RuntimeError('Buffer not ready. Please run '\n 'prepare_buffer_readout')\n N = self._instrument.buffer_npts()\n if N == 0:\n raise ValueError('No points stored in SR830 data buffer.'\n ' Can not poll anything.')\n\n # poll raw binary data\n self._instrument.write('TRCL ? {}, 0, {}'.format(self.channel, N))\n rawdata = self._instrument.visa_handle.read_raw()\n\n # parse it\n realdata = np.fromstring(rawdata, dtype='<i2')\n numbers = realdata[::2]*2.0**(realdata[1::2]-124)\n if self.shape[0] != N:\n raise RuntimeError(\"SR830 got {} points in buffer expected {}\".format(N, self.shape[0]))\n return numbers\n\n\nclass SR830(VisaInstrument):\n \"\"\"\n This is the qcodes driver for the Stanford Research Systems SR830\n Lock-in Amplifier\n \"\"\"\n\n _VOLT_TO_N = {2e-9: 0, 5e-9: 1, 10e-9: 2,\n 20e-9: 3, 50e-9: 4, 100e-9: 5,\n 200e-9: 6, 500e-9: 7, 1e-6: 8,\n 2e-6: 9, 5e-6: 10, 10e-6: 11,\n 20e-6: 12, 50e-6: 13, 100e-6: 14,\n 200e-6: 15, 500e-6: 16, 1e-3: 17,\n 2e-3: 18, 5e-3: 19, 10e-3: 20,\n 20e-3: 21, 50e-3: 22, 100e-3: 23,\n 200e-3: 24, 500e-3: 25, 1: 26}\n _N_TO_VOLT = {v: k for k, v in _VOLT_TO_N.items()}\n\n _CURR_TO_N = {2e-15: 0, 5e-15: 1, 10e-15: 2,\n 20e-15: 3, 50e-15: 4, 100e-15: 5,\n 200e-15: 6, 500e-15: 7, 1e-12: 8,\n 2e-12: 9, 5e-12: 10, 10e-12: 11,\n 20e-12: 12, 50e-12: 13, 100e-12: 14,\n 200e-12: 15, 500e-12: 16, 1e-9: 17,\n 2e-9: 18, 5e-9: 19, 10e-9: 20,\n 20e-9: 21, 50e-9: 22, 100e-9: 23,\n 200e-9: 24, 500e-9: 25, 1e-6: 26}\n _N_TO_CURR = {v: k for k, v in _CURR_TO_N.items()}\n\n _VOLT_ENUM = Enum(*_VOLT_TO_N.keys())\n _CURR_ENUM = Enum(*_CURR_TO_N.keys())\n\n _INPUT_CONFIG_TO_N = {\n 'a': 0,\n 'a-b': 1,\n 'I 1M': 2,\n 'I 100M': 3,\n }\n\n _N_TO_INPUT_CONFIG = {v: k for k, v in _INPUT_CONFIG_TO_N.items()}\n\n def __init__(self, name, address, **kwargs):\n super().__init__(name, address, **kwargs)\n\n # Reference and phase\n self.add_parameter('phase',\n label='Phase',\n get_cmd='PHAS?',\n get_parser=float,\n set_cmd='PHAS {:.2f}',\n unit='deg',\n vals=Numbers(min_value=-360, max_value=729.99))\n\n self.add_parameter('reference_source',\n label='Reference source',\n get_cmd='FMOD?',\n set_cmd='FMOD {}',\n val_mapping={\n 'external': 0,\n 'internal': 1,\n },\n vals=Enum('external', 'internal'))\n\n self.add_parameter('frequency',\n label='Frequency',\n get_cmd='FREQ?',\n get_parser=float,\n set_cmd='FREQ {:.4f}',\n unit='Hz',\n vals=Numbers(min_value=1e-3, max_value=102e3))\n\n self.add_parameter('ext_trigger',\n label='External trigger',\n get_cmd='RSLP?',\n set_cmd='RSLP {}',\n val_mapping={\n 'sine': 0,\n 'TTL rising': 1,\n 'TTL falling': 2,\n })\n\n self.add_parameter('harmonic',\n label='Harmonic',\n get_cmd='HARM?',\n get_parser=int,\n set_cmd='HARM {:d}',\n vals=Ints(min_value=1, max_value=19999))\n\n self.add_parameter('amplitude',\n label='Amplitude',\n get_cmd='SLVL?',\n get_parser=float,\n set_cmd='SLVL {:.3f}',\n unit='V',\n vals=Numbers(min_value=0.004, max_value=5.000))\n\n # Input and filter\n self.add_parameter('input_config',\n label='Input configuration',\n get_cmd='ISRC?',\n get_parser=self._get_input_config,\n set_cmd='ISRC {}',\n set_parser=self._set_input_config,\n vals=Enum(*self._INPUT_CONFIG_TO_N.keys()))\n\n self.add_parameter('input_shield',\n label='Input shield',\n get_cmd='IGND?',\n set_cmd='IGND {}',\n val_mapping={\n 'float': 0,\n 'ground': 1,\n })\n\n self.add_parameter('input_coupling',\n label='Input coupling',\n get_cmd='ICPL?',\n set_cmd='ICPL {}',\n val_mapping={\n 'AC': 0,\n 'DC': 1,\n })\n\n self.add_parameter('notch_filter',\n label='Notch filter',\n get_cmd='ILIN?',\n set_cmd='ILIN {}',\n val_mapping={\n 'off': 0,\n 'line in': 1,\n '2x line in': 2,\n 'both': 3,\n })\n\n # Gain and time constant\n self.add_parameter(name='sensitivity',\n label='Sensitivity',\n get_cmd='SENS?',\n set_cmd='SENS {:d}',\n get_parser=self._get_sensitivity,\n set_parser=self._set_sensitivity\n )\n\n self.add_parameter('reserve',\n label='Reserve',\n get_cmd='RMOD?',\n set_cmd='RMOD {}',\n val_mapping={\n 'high': 0,\n 'normal': 1,\n 'low noise': 2,\n })\n\n self.add_parameter('time_constant',\n label='Time constant',\n get_cmd='OFLT?',\n set_cmd='OFLT {}',\n unit='s',\n val_mapping={\n 10e-6: 0, 30e-6: 1,\n 100e-6: 2, 300e-6: 3,\n 1e-3: 4, 3e-3: 5,\n 10e-3: 6, 30e-3: 7,\n 100e-3: 8, 300e-3: 9,\n 1: 10, 3: 11,\n 10: 12, 30: 13,\n 100: 14, 300: 15,\n 1e3: 16, 3e3: 17,\n 10e3: 18, 30e3: 19,\n })\n\n self.add_parameter('filter_slope',\n label='Filter slope',\n get_cmd='OFSL?',\n set_cmd='OFSL {}',\n unit='dB/oct',\n val_mapping={\n 6: 0,\n 12: 1,\n 18: 2,\n 24: 3,\n })\n\n self.add_parameter('sync_filter',\n label='Sync filter',\n get_cmd='SYNC?',\n set_cmd='SYNC {}',\n val_mapping={\n 'off': 0,\n 'on': 1,\n })\n\n def parse_offset_get(s):\n parts = s.split(',')\n\n return float(parts[0]), int(parts[1])\n\n # TODO: Parameters that can be set with multiple arguments\n # For the OEXP command for example two arguments are needed\n self.add_parameter('X_offset',\n get_cmd='OEXP? 1',\n get_parser=parse_offset_get)\n\n self.add_parameter('Y_offset',\n get_cmd='OEXP? 2',\n get_parser=parse_offset_get)\n\n self.add_parameter('R_offset',\n get_cmd='OEXP? 3',\n get_parser=parse_offset_get)\n\n # Aux input/output\n for i in [1, 2, 3, 4]:\n self.add_parameter('aux_in{}'.format(i),\n label='Aux input {}'.format(i),\n get_cmd='OAUX? {}'.format(i),\n get_parser=float,\n unit='V')\n\n self.add_parameter('aux_out{}'.format(i),\n label='Aux output {}'.format(i),\n get_cmd='AUXV? {}'.format(i),\n get_parser=float,\n set_cmd='AUXV {0}, {{}}'.format(i),\n unit='V')\n\n # Setup\n self.add_parameter('output_interface',\n label='Output interface',\n get_cmd='OUTX?',\n set_cmd='OUTX {}',\n val_mapping={\n 'RS232': '0\\n',\n 'GPIB': '1\\n',\n })\n\n # Channel setup\n for ch in range(1, 3):\n\n # detailed validation and mapping performed in set/get functions\n self.add_parameter('ch{}_ratio'.format(ch),\n label='Channel {} ratio'.format(ch),\n get_cmd=partial(self._get_ch_ratio, ch),\n set_cmd=partial(self._set_ch_ratio, ch),\n vals=Strings())\n self.add_parameter('ch{}_display'.format(ch),\n label='Channel {} display'.format(ch),\n get_cmd=partial(self._get_ch_display, ch),\n set_cmd=partial(self._set_ch_display, ch),\n vals=Strings())\n self.add_parameter('ch{}_databuffer'.format(ch),\n channel=ch,\n parameter_class=ChannelBuffer)\n\n # Data transfer\n self.add_parameter('X',\n get_cmd='OUTP? 1',\n get_parser=float,\n unit='V')\n\n self.add_parameter('Y',\n get_cmd='OUTP? 2',\n get_parser=float,\n unit='V')\n\n self.add_parameter('R',\n get_cmd='OUTP? 3',\n get_parser=float,\n unit='V')\n\n self.add_parameter('P',\n get_cmd='OUTP? 4',\n get_parser=float,\n unit='deg')\n \n # Data buffer settings\n self.add_parameter('buffer_SR',\n label='Buffer sample rate',\n get_cmd='SRAT ?',\n set_cmd=self._set_buffer_SR,\n unit='Hz',\n val_mapping={62.5e-3: 0,\n 0.125: 1,\n 0.250: 2,\n 0.5: 3,\n 1: 4, 2: 5,\n 4: 6, 8: 7,\n 16: 8, 32: 9,\n 64: 10, 128: 11,\n 256: 12, 512: 13,\n 'Trigger': 14},\n get_parser=int\n )\n\n self.add_parameter('buffer_acq_mode',\n label='Buffer acquistion mode',\n get_cmd='SEND ?',\n set_cmd='SEND {}',\n val_mapping={'single shot': 0,\n 'loop': 1},\n get_parser=int)\n\n self.add_parameter('buffer_trig_mode',\n label='Buffer trigger start mode',\n get_cmd='TSTR ?',\n set_cmd='TSTR {}',\n val_mapping={'ON': 1, 'OFF': 0},\n get_parser=int)\n\n self.add_parameter('buffer_npts',\n label='Buffer number of stored points',\n get_cmd='SPTS ?',\n get_parser=int)\n\n # Auto functions\n self.add_function('auto_gain', call_cmd='AGAN')\n self.add_function('auto_reserve', call_cmd='ARSV')\n self.add_function('auto_phase', call_cmd='APHS')\n self.add_function('auto_offset', call_cmd='AOFF {0}',\n args=[Enum(1, 2, 3)])\n\n # Interface\n self.add_function('reset', call_cmd='*RST')\n\n self.add_function('disable_front_panel', call_cmd='OVRM 0')\n self.add_function('enable_front_panel', call_cmd='OVRM 1')\n\n self.add_function('send_trigger', call_cmd='TRIG',\n docstring=(\"Send a software trigger. \"\n \"This command has the same effect as a \"\n \"trigger at the rear panel trigger\"\n \" input.\"))\n\n self.add_function('buffer_start', call_cmd='STRT',\n docstring=(\"The buffer_start command starts or \"\n \"resumes data storage. buffer_start\"\n \" is ignored if storage is already in\"\n \" progress.\"))\n\n self.add_function('buffer_pause', call_cmd='PAUS',\n docstring=(\"The buffer_pause command pauses data \"\n \"storage. If storage is already paused \"\n \"or reset then this command is ignored.\"))\n\n self.add_function('buffer_reset', call_cmd='REST',\n docstring=(\"The buffer_reset command resets the data\"\n \" buffers. The buffer_reset command can \"\n \"be sent at any time - any storage in \"\n \"progress, paused or not, will be reset.\"\n \" This command will erase the data \"\n \"buffer.\"))\n\n # Initialize the proper units of the outputs and sensitivities\n self.input_config()\n\n # start keeping track of buffer setpoints\n self._buffer1_ready = False\n self._buffer2_ready = False\n\n self.connect_message()\n \n \n SNAP_PARAMETERS = {\n 'x': '1', \n 'y': '2', \n 'r': '3', \n 'p': '4', \n 'phase': '4', \n 'θ' : '4',\n 'aux1': '5', \n 'aux2': '6', \n 'aux3': '7', \n 'aux4': '8', \n 'freq': '9', \n 'ch1': '10',\n 'ch2': '11' \n }\n \n def snap(self, *parameters: str) -> Tuple[float, ...]:\n \"\"\"\n Get between 2 and 6 parameters at a single instant. This provides a \n coherent snapshot of measured signals. Pick up to 6 from: X, Y, R, θ, \n the aux inputs 1-4, frequency, or what is currently displayed on \n channels 1 and 2.\n\n Reading X and Y (or R and θ) gives a coherent snapshot of the signal.\n Snap is important when the time constant is very short, a time constant\n less than 100 ms.\n\n Args:\n *parameters\n From 2 to 6 strings of names of parameters for which the values\n are requested. including: 'x', 'y', 'r', 'p', 'phase' or 'θ',\n 'aux1', 'aux2', 'aux3', 'aux4', 'freq', 'ch1', and 'ch2'.\n \n Returns:\n A tuple of floating point values in the same order as requested.\n\n Examples:\n lockin.snap('x','y') -> tuple(x,y)\n \n lockin.snap('aux1','aux2','freq','phase') \n -> tuple(aux1,aux2,freq,phase)\n\n Note:\n Volts for x, y, r, and aux 1-4\n Degrees for θ\n Hertz for freq\n Unknown for ch1 and ch2. It will depend on what was set.\n\n - If X,Y,R and θ are all read, then the values of X,Y are recorded\n approximately 10 µs apart from R,θ. Thus, the values of X and Y \n may not yield the exact values of R and θ from a single snap.\n - The values of the Aux Inputs may have an uncertainty of \n up to 32 µs.\n - The frequency is computed only every other period or 40 ms, \n whichever is longer. \n \"\"\"\n if not 2 <= len(parameters) <= 6:\n raise KeyError(\n 'It is only possible to request values of 2 to 6 parameters'\n ' at a time.')\n\n for name in parameters:\n if name.lower() not in self.SNAP_PARAMETERS:\n raise KeyError(f'{name} is an unknown parameter. Refer'\n f' to `SNAP_PARAMETERS` for a list of valid'\n f' parameter names')\n\n p_ids = [self.SNAP_PARAMETERS[name.lower()] for name in parameters]\n output = self.ask(f'SNAP? {\",\".join(p_ids)}')\n\n return tuple(float(val) for val in output.split(','))\n\n def increment_sensitivity(self):\n \"\"\"\n Increment the sensitivity setting of the lock-in. This is equivalent\n to pushing the sensitivity up button on the front panel. This has no\n effect if the sensitivity is already at the maximum.\n\n Returns:\n Whether or not the sensitivity was actually changed.\n \"\"\"\n return self._change_sensitivity(1)\n\n def decrement_sensitivity(self):\n \"\"\"\n Decrement the sensitivity setting of the lock-in. This is equivalent\n to pushing the sensitivity down button on the front panel. This has no\n effect if the sensitivity is already at the minimum.\n\n Returns:\n Whether or not the sensitivity was actually changed.\n \"\"\"\n return self._change_sensitivity(-1)\n\n def _change_sensitivity(self, dn):\n _ = self.sensitivity.get()\n n = int(self.sensitivity.raw_value)\n if self.input_config() in ['a', 'a-b']:\n n_to = self._N_TO_VOLT\n else:\n n_to = self._N_TO_CURR\n\n if n + dn > max(n_to.keys()) or n + dn < min(n_to.keys()):\n return False\n\n self.sensitivity.set(n_to[n + dn])\n return True\n\n def _set_buffer_SR(self, SR):\n self.write('SRAT {}'.format(SR))\n self._buffer1_ready = False\n self._buffer2_ready = False\n\n def _get_ch_ratio(self, channel):\n val_mapping = {1: {0: 'none',\n 1: 'Aux In 1',\n 2: 'Aux In 2'},\n 2: {0: 'none',\n 1: 'Aux In 3',\n 2: 'Aux In 4'}}\n resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])\n\n return val_mapping[channel][resp]\n\n def _set_ch_ratio(self, channel, ratio):\n val_mapping = {1: {'none': 0,\n 'Aux In 1': 1,\n 'Aux In 2': 2},\n 2: {'none': 0,\n 'Aux In 3': 1,\n 'Aux In 4': 2}}\n vals = val_mapping[channel].keys()\n if ratio not in vals:\n raise ValueError('{} not in {}'.format(ratio, vals))\n ratio = val_mapping[channel][ratio]\n disp_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])\n self.write('DDEF {}, {}, {}'.format(channel, disp_val, ratio))\n self._buffer_ready = False\n\n def _get_ch_display(self, channel):\n val_mapping = {1: {0: 'X',\n 1: 'R',\n 2: 'X Noise',\n 3: 'Aux In 1',\n 4: 'Aux In 2'},\n 2: {0: 'Y',\n 1: 'Phase',\n 2: 'Y Noise',\n 3: 'Aux In 3',\n 4: 'Aux In 4'}}\n resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])\n\n return val_mapping[channel][resp]\n\n def _set_ch_display(self, channel, disp):\n val_mapping = {1: {'X': 0,\n 'R': 1,\n 'X Noise': 2,\n 'Aux In 1': 3,\n 'Aux In 2': 4},\n 2: {'Y': 0,\n 'Phase': 1,\n 'Y Noise': 2,\n 'Aux In 3': 3,\n 'Aux In 4': 4}}\n vals = val_mapping[channel].keys()\n if disp not in vals:\n raise ValueError('{} not in {}'.format(disp, vals))\n disp = val_mapping[channel][disp]\n # Since ratio AND display are set simultaneously,\n # we get and then re-set the current ratio value\n ratio_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])\n self.write('DDEF {}, {}, {}'.format(channel, disp, ratio_val))\n self._buffer_ready = False\n\n def _set_units(self, unit):\n # TODO:\n # make a public parameter function that allows to change the units\n for param in [self.X, self.Y, self.R, self.sensitivity]:\n param.unit = unit\n\n def _get_input_config(self, s):\n mode = self._N_TO_INPUT_CONFIG[int(s)]\n\n if mode in ['a', 'a-b']:\n self.sensitivity.vals = self._VOLT_ENUM\n self._set_units('V')\n else:\n self.sensitivity.vals = self._CURR_ENUM\n self._set_units('A')\n\n return mode\n\n def _set_input_config(self, s):\n if s in ['a', 'a-b']:\n self.sensitivity.vals = self._VOLT_ENUM\n self._set_units('V')\n else:\n self.sensitivity.vals = self._CURR_ENUM\n self._set_units('A')\n\n return self._INPUT_CONFIG_TO_N[s]\n\n def _get_sensitivity(self, s):\n if self.input_config() in ['a', 'a-b']:\n return self._N_TO_VOLT[int(s)]\n else:\n return self._N_TO_CURR[int(s)]\n\n def _set_sensitivity(self, s):\n if self.input_config() in ['a', 'a-b']:\n return self._VOLT_TO_N[s]\n else:\n return self._CURR_TO_N[s]\n"
] |
[
[
"numpy.linspace",
"numpy.fromstring",
"numpy.arange"
]
] |
IINemo/libact
|
[
"f9ddedcc009bfc70beeb04c9018b22eeaeafb155"
] |
[
"libact/query_strategies/uncertainty_sampling.py"
] |
[
"\"\"\" Uncertainty Sampling\n\nThis module contains a class that implements two of the most well-known\nuncertainty sampling query strategies: the least confidence method and the\nsmallest margin method (margin sampling).\n\n\"\"\"\nimport numpy as np\n\nfrom libact.base.interfaces import QueryStrategy, ContinuousModel, \\\n ProbabilisticModel\nfrom libact.base.dataset import ensure_sklearn_compat\nfrom libact.utils import inherit_docstring_from, zip\n\n\nclass UncertaintySampling(QueryStrategy):\n\n \"\"\"Uncertainty Sampling\n\n This class implements Uncertainty Sampling active learning algorithm [1]_.\n\n Parameters\n ----------\n model: :py:class:`libact.base.interfaces.ContinuousModel` or :py:class:`libact.base.interfaces.ProbabilisticModel` object instance\n The base model used for training.\n\n method: {'lc', 'sm', 'entropy'}, optional (default='lc')\n least confidence (lc), it queries the instance whose posterior\n probability of being positive is nearest 0.5 (for binary\n classification);\n smallest margin (sm), it queries the instance whose posterior\n probability gap between the most and the second probable labels is\n minimal;\n entropy, requires :py:class:`libact.base.interfaces.ProbabilisticModel`\n to be passed in as model parameter;\n\n\n Attributes\n ----------\n model: :py:class:`libact.base.interfaces.ContinuousModel` or :py:class:`libact.base.interfaces.ProbabilisticModel` object instance\n The model trained in last query.\n\n\n Examples\n --------\n Here is an example of declaring a UncertaintySampling query_strategy\n object:\n\n .. code-block:: python\n\n from libact.query_strategies import UncertaintySampling\n from libact.models import LogisticRegression\n\n qs = UncertaintySampling(\n dataset, # Dataset object\n model=LogisticRegression(C=0.1)\n )\n\n Note that the model given in the :code:`model` parameter must be a\n :py:class:`ContinuousModel` which supports predict_real method.\n\n\n References\n ----------\n\n .. [1] Settles, Burr. \"Active learning literature survey.\" University of\n Wisconsin, Madison 52.55-66 (2010): 11.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(UncertaintySampling, self).__init__(*args, **kwargs)\n\n self.model = kwargs.pop('model', None)\n if self.model is None:\n raise TypeError(\n \"__init__() missing required keyword-only argument: 'model'\"\n )\n if not isinstance(self.model, ContinuousModel) and \\\n not isinstance(self.model, ProbabilisticModel):\n raise TypeError(\n \"model has to be a ContinuousModel or ProbabilisticModel\"\n )\n\n self.model.train(self.dataset)\n\n self.method = kwargs.pop('method', 'lc')\n if self.method not in ['lc', 'sm', 'entropy']:\n raise TypeError(\n \"supported methods are ['lc', 'sm', 'entropy'], the given one \"\n \"is: \" + self.method\n )\n\n if self.method=='entropy' and \\\n not isinstance(self.model, ProbabilisticModel):\n raise TypeError(\n \"method 'entropy' requires model to be a ProbabilisticModel\"\n )\n\n def make_query(self, return_score=False):\n \"\"\"Return the index of the sample to be queried and labeled and\n selection score of each sample. Read-only.\n\n No modification to the internal states.\n\n Returns\n -------\n ask_id : int\n The index of the next unlabeled sample to be queried and labeled.\n\n score : list of (index, score) tuple\n Selection score of unlabled entries, the larger the better.\n\n \"\"\"\n dataset = self.dataset\n self.model.train(dataset)\n\n unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())\n X_pool = ensure_sklearn_compat(X_pool)\n\n if isinstance(self.model, ProbabilisticModel):\n dvalue = self.model.predict_proba(X_pool)\n elif isinstance(self.model, ContinuousModel):\n dvalue = self.model.predict_real(X_pool)\n\n if self.method == 'lc': # least confident\n score = -np.max(dvalue, axis=1)\n\n elif self.method == 'sm': # smallest margin\n if np.shape(dvalue)[1] > 2:\n # Find 2 largest decision values\n dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2])\n score = -np.abs(dvalue[:, 0] - dvalue[:, 1])\n\n elif self.method == 'entropy':\n score = np.sum(-dvalue * np.log(dvalue), axis=1)\n\n ask_id = np.argmax(score)\n\n if return_score:\n return unlabeled_entry_ids[ask_id], \\\n list(zip(unlabeled_entry_ids, score))\n else:\n return unlabeled_entry_ids[ask_id]\n"
] |
[
[
"numpy.max",
"numpy.log",
"numpy.shape",
"numpy.argmax",
"numpy.abs",
"numpy.partition"
]
] |
tagny/iLID
|
[
"38f5dcae0dc84fd9b78e170748aa38cd8f524c70"
] |
[
"web-server/server.py"
] |
[
"# System imports\nimport sys, subprocess, time\nimport numpy as np\nfrom os import path\nfrom flask.ext.cors import CORS\nfrom flask import *\nfrom flask.json import jsonify\nfrom werkzeug import secure_filename\nfrom flask_extensions import *\n\nlib_path = os.path.abspath(os.path.join('../evaluation'))\nsys.path.append(lib_path)\nfrom predict import predict\nfrom convert_to_mono_wav import convert as convert_to_mono_wav\n\nstatic_assets_path = path.join(path.dirname(__file__), \"dist\")\napp = Flask(__name__, static_folder= static_assets_path)\nCORS(app)\n\n\n# ----- Routes ----------\n\n@app.route(\"/\", defaults={\"fall_through\": \"\"})\n@app.route(\"/<path:fall_through>\")\ndef index(fall_through):\n if fall_through:\n return redirect(url_for(\"index\"))\n else:\n return app.send_static_file(\"index.html\")\n\n\n@app.route(\"/dist/<path:asset_path>\")\ndef send_static(asset_path):\n return send_from_directory(static_assets_path, asset_path)\n\n\n@app.route(\"/audio/<path:audio_path>\")\ndef send_audio(audio_path):\n return send_file_partial(path.join(app.config[\"UPLOAD_FOLDER\"], audio_path))\n\n\n@app.route(\"/api/upload\", methods=[\"POST\"])\ndef uploadAudio():\n\n def is_allowed(filename):\n return len(filter(lambda ext: ext in filename, [\"wav\", \"mp3\", \"ogg\"])) > 0\n\n file = request.files.getlist(\"audio\")[0]\n\n if file and is_allowed(file.filename):\n filename = secure_filename(file.filename)\n file_path = path.join(app.config[\"UPLOAD_FOLDER\"], filename)\n file.save(file_path)\n\n # convert_to_mono_wav(file_path, True)\n\n response = jsonify(get_prediction(file_path))\n else:\n response = bad_request(\"Invalid file\")\n\n return response\n\n\n@app.route(\"/api/example/<int:example_id>\")\ndef use_example(example_id):\n if example_id <= 3:\n filename = \"audio%s.wav\" % example_id\n file_path = path.join(app.config[\"UPLOAD_FOLDER\"], \"examples\", filename)\n response = jsonify(get_prediction(file_path))\n else:\n response = bad_request(\"Invalid Example\")\n\n return response\n\n\ndef bad_request(reason):\n response = jsonify({\"error\" : reason})\n response.status_code = 400\n return response\n\n\n# -------- Prediction & Features --------\ndef get_prediction(file_path):\n\n LABEL_MAP = {\n 0 : \"English\",\n 1 : \"German\",\n 2 : \"French\",\n 3 : \"Spanish\"\n }\n\n # TODO remove this for production\n # predictions = [[0.3, 0.7]]\n predictions = predict(file_path, app.config[\"PROTOTXT\"], app.config[\"MODEL\"], app.config[\"UPLOAD_FOLDER\"])\n predictions = np.mean(predictions, axis=0).tolist()\n\n print(predictions)\n\n pred_with_label = {LABEL_MAP[index] : prob for index, prob in enumerate(predictions)}\n\n file_path = file_path + \"?cachebuster=%s\" % time.time()\n result = {\n \"audio\" : {\n \"url\" : \"%s\" % file_path,\n },\n \"predictions\" : pred_with_label\n }\n\n return result\n\n\nif __name__ == \"__main__\":\n # Start the server\n app.config.update(\n DEBUG = True,\n SECRET_KEY = \"asassdfs\",\n CORS_HEADERS = \"Content-Type\",\n UPLOAD_FOLDER = \"audio\",\n MODEL = os.path.join(\"model\", \"berlin_net_iter_10000.caffemodel\"),\n PROTOTXT = os.path.join(\"model\", \"net_mel_2lang_bn_deploy.prototxt\")\n )\n\n # Make sure all frontend assets are compiled\n # subprocess.Popen(\"webpack\")\n\n # Start the Flask app\n app.run(port=9000)\n"
] |
[
[
"numpy.mean"
]
] |
heavengate/models
|
[
"f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7",
"f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7",
"f05c910f8a8e3105de8c2f1d81e83ca00d2c7ec7"
] |
[
"PaddleCV/PaddleVideo/utils/train_utils.py",
"PaddleRec/gnn/train.py",
"PaddleCV/PaddleDetection/ppdet/data/tools/x2coco.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import profiler\nimport logging\nimport shutil\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_lr_and_step():\n try:\n # In optimizers, if learning_rate is set as constant, lr_var\n # name is 'learning_rate_0', and iteration counter is not \n # recorded. If learning_rate is set as decayed values from \n # learning_rate_scheduler, lr_var name is 'learning_rate', \n # and iteration counter is recorded with name '@LR_DECAY_COUNTER@', \n # better impliment is required here\n lr_var = fluid.global_scope().find_var(\"learning_rate\")\n if not lr_var:\n lr_var = fluid.global_scope().find_var(\"learning_rate_0\")\n lr = np.array(lr_var.get_tensor())\n\n lr_count = '[-]'\n lr_count_var = fluid.global_scope().find_var(\"@LR_DECAY_COUNTER@\")\n if lr_count_var:\n lr_count = np.array(lr_count_var.get_tensor())\n logger.info(\"------- learning rate {}, learning rate counter {} -----\"\n .format(np.array(lr), np.array(lr_count)))\n except:\n logger.warn(\"Unable to get learning_rate and LR_DECAY_COUNTER.\")\n\n\ndef test_with_dataloader(exe,\n compiled_test_prog,\n test_dataloader,\n test_fetch_list,\n test_metrics,\n log_interval=0,\n save_model_name=''):\n if not test_dataloader:\n logger.error(\"[TEST] get dataloader failed.\")\n test_metrics.reset()\n test_iter = 0\n\n for data in test_dataloader():\n test_outs = exe.run(compiled_test_prog,\n fetch_list=test_fetch_list,\n feed=data)\n test_metrics.accumulate(test_outs)\n if log_interval > 0 and test_iter % log_interval == 0:\n test_metrics.calculate_and_log_out(test_outs, \\\n info = '[TEST] test_iter {} '.format(test_iter))\n test_iter += 1\n test_metrics.finalize_and_log_out(\"[TEST] Finish\")\n\n\ndef train_with_dataloader(exe, train_prog, compiled_train_prog, train_dataloader, \\\n train_fetch_list, train_metrics, epochs = 10, \\\n log_interval = 0, valid_interval = 0, save_dir = './', \\\n save_model_name = 'model', fix_random_seed = False, \\\n compiled_test_prog = None, test_dataloader = None, \\\n test_fetch_list = None, test_metrics = None, \\\n is_profiler = None, profiler_path = None):\n if not train_dataloader:\n logger.error(\"[TRAIN] get dataloader failed.\")\n epoch_periods = []\n train_loss = 0\n for epoch in range(epochs):\n log_lr_and_step()\n\n train_iter = 0\n epoch_periods = []\n\n for data in train_dataloader():\n cur_time = time.time()\n train_outs = exe.run(compiled_train_prog,\n fetch_list=train_fetch_list,\n feed=data)\n period = time.time() - cur_time\n epoch_periods.append(period)\n if log_interval > 0 and (train_iter % log_interval == 0):\n train_metrics.calculate_and_log_out(train_outs, \\\n info = '[TRAIN] Epoch {}, iter {} '.format(epoch, train_iter))\n train_iter += 1\n \n # NOTE: profiler tools, used for benchmark\n if is_profiler and epoch == 0 and train_iter == log_interval:\n profiler.start_profiler(\"All\")\n elif is_profiler and epoch == 0 and train_iter == log_interval + 5:\n profiler.stop_profiler(\"total\", profiler_path)\n return\n\n if len(epoch_periods) < 1:\n logger.info(\n 'No iteration was executed, please check the data reader')\n sys.exit(1)\n\n logger.info('[TRAIN] Epoch {} training finished, average time: {}'.\n format(epoch, np.mean(epoch_periods[1:])))\n save_model(\n exe,\n train_prog,\n save_dir,\n save_model_name,\n \"_epoch{}\".format(epoch),\n save_type='.pdckpt')\n save_model(\n exe,\n train_prog,\n save_dir,\n save_model_name,\n \"_epoch{}\".format(epoch),\n save_type='.pdparams')\n if compiled_test_prog and valid_interval > 0 and (\n epoch + 1) % valid_interval == 0:\n test_with_dataloader(exe, compiled_test_prog, test_dataloader,\n test_fetch_list, test_metrics, log_interval,\n save_model_name)\n\n save_model(\n exe,\n train_prog,\n save_dir,\n save_model_name,\n '_final',\n save_type='.pdckpt')\n save_model(\n exe,\n train_prog,\n save_dir,\n save_model_name,\n '_final',\n save_type='.pdparams')\n #when fix_random seed for debug\n if fix_random_seed:\n cards = os.environ.get('CUDA_VISIBLE_DEVICES')\n gpu_num = len(cards.split(\",\"))\n print(\"kpis\\ttrain_cost_card{}\\t{}\".format(gpu_num, train_loss))\n print(\"kpis\\ttrain_speed_card{}\\t{}\".format(gpu_num,\n np.mean(epoch_periods)))\n\n\ndef save_model(exe,\n program,\n save_dir,\n model_name,\n postfix=None,\n save_type='.pdckpt'):\n \"\"\"\n save_type: '.pdckpt' or '.pdparams', '.pdckpt' for all persistable variables, \n '.pdparams' for parameters only\n \"\"\"\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n saved_model_name = model_name + postfix + save_type\n\n if save_type == '.pdckpt':\n fluid.io.save_persistables(\n exe, save_dir, main_program=program, filename=saved_model_name)\n elif save_type == '.pdparams':\n fluid.io.save_params(\n exe, save_dir, main_program=program, filename=saved_model_name)\n else:\n raise NotImplementedError(\n 'save_type {} not implemented, it should be either {} or {}'\n .format(save_type, '.pdckpt', '.pdparams'))\n return\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport numpy as np\nimport os\nfrom functools import partial\nimport logging\nimport time\nimport paddle\nimport paddle.fluid as fluid\nimport argparse\nimport network\nimport reader\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(\"fluid\")\nlogger.setLevel(logging.INFO)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"gnn\")\n parser.add_argument(\n '--train_path', type=str, default='./data/diginetica/train.txt', help='dir of training data')\n parser.add_argument(\n '--config_path', type=str, default='./data/diginetica/config.txt', help='dir of config')\n parser.add_argument(\n '--model_path', type=str, default='./saved_model', help=\"path of model parameters\")\n parser.add_argument(\n '--epoch_num', type=int, default=30, help='number of epochs to train for')\n parser.add_argument(\n '--batch_size', type=int, default=100, help='input batch size')\n parser.add_argument(\n '--hidden_size', type=int, default=100, help='hidden state size')\n parser.add_argument(\n '--l2', type=float, default=1e-5, help='l2 penalty')\n parser.add_argument(\n '--lr', type=float, default=0.001, help='learning rate')\n parser.add_argument(\n '--step', type=int, default=1, help='gnn propogation steps')\n parser.add_argument(\n '--lr_dc', type=float, default=0.1, help='learning rate decay rate')\n parser.add_argument(\n '--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay')\n parser.add_argument(\n '--use_cuda', type=int, default=0, help='whether to use gpu')\n parser.add_argument(\n '--use_parallel', type=int, default=1, help='whether to use parallel executor')\n parser.add_argument(\n '--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')\n return parser.parse_args()\n\n\ndef train():\n args = parse_args()\n\n if args.enable_ce:\n SEED = 102\n fluid.default_main_program().random_seed = SEED\n fluid.default_startup_program().random_seed = SEED\n\n batch_size = args.batch_size\n items_num = reader.read_config(args.config_path)\n loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size,\n args.step)\n\n data_reader = reader.Data(args.train_path, True)\n logger.info(\"load data complete\")\n\n use_cuda = True if args.use_cuda else False\n use_parallel = True if args.use_parallel else False\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n\n exe = fluid.Executor(place)\n step_per_epoch = data_reader.length // batch_size\n optimizer = fluid.optimizer.Adam(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=args.lr,\n decay_steps=step_per_epoch * args.lr_dc_step,\n decay_rate=args.lr_dc),\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=args.l2))\n optimizer.minimize(loss)\n\n exe.run(fluid.default_startup_program())\n\n all_vocab = fluid.global_scope().var(\"all_vocab\").get_tensor()\n all_vocab.set(\n np.arange(1, items_num).astype(\"int64\").reshape((-1, 1)), place)\n\n feed_list = [e.name for e in feed_datas]\n\n if use_parallel:\n train_exe = fluid.ParallelExecutor(\n use_cuda=use_cuda, loss_name=loss.name)\n else:\n train_exe = exe\n\n logger.info(\"begin train\")\n\n total_time = []\n ce_info = []\n start_time = time.time()\n loss_sum = 0.0\n acc_sum = 0.0\n global_step = 0\n PRINT_STEP = 500\n py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True))\n for i in range(args.epoch_num):\n epoch_sum = []\n py_reader.start()\n try:\n while True:\n res = train_exe.run(fetch_list=[loss.name, acc.name])\n loss_sum += res[0].mean()\n acc_sum += res[1].mean()\n epoch_sum.append(res[0].mean())\n global_step += 1\n if global_step % PRINT_STEP == 0:\n ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP])\n total_time.append(time.time() - start_time)\n logger.info(\"global_step: %d, loss: %.4lf, train_acc: %.4lf\" % (\n global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP))\n loss_sum = 0.0\n acc_sum = 0.0\n start_time = time.time()\n except fluid.core.EOFException:\n py_reader.reset()\n logger.info(\"epoch loss: %.4lf\" % (np.mean(epoch_sum)))\n save_dir = os.path.join(args.model_path, \"epoch_\" + str(i))\n fetch_vars = [loss, acc]\n fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe)\n logger.info(\"model saved in \" + save_dir)\n\n # only for ce\n if args.enable_ce:\n gpu_num = get_cards(args)\n ce_loss = 0\n ce_acc = 0\n ce_time = 0\n try:\n ce_loss = ce_info[-1][0]\n ce_acc = ce_info[-1][1]\n ce_time = total_time[-1]\n except:\n print(\"ce info error\")\n print(\"kpis\\teach_pass_duration_card%s\\t%s\" %\n (gpu_num, ce_time))\n print(\"kpis\\ttrain_loss_card%s\\t%f\" %\n (gpu_num, ce_loss))\n print(\"kpis\\ttrain_acc_card%s\\t%f\" %\n (gpu_num, ce_acc))\n\n\ndef get_cards(args):\n num = 0\n cards = os.environ.get('CUDA_VISIBLE_DEVICES')\n num = len(cards.split(\",\"))\n return num\n\n\nif __name__ == \"__main__\":\n train()\n",
"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport glob\nimport json\nimport os\nimport os.path as osp\nimport sys\nimport shutil\n\nimport numpy as np\nimport PIL.ImageDraw\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(MyEncoder, self).default(obj)\n\n\ndef getbbox(self, points):\n polygons = points\n mask = self.polygons_to_mask([self.height, self.width], polygons)\n return self.mask2box(mask)\n\n\ndef images_labelme(data, num):\n image = {}\n image['height'] = data['imageHeight']\n image['width'] = data['imageWidth']\n image['id'] = num + 1\n image['file_name'] = data['imagePath'].split('/')[-1]\n return image\n\ndef images_cityscape(data, num, img_file):\n image = {}\n image['height'] = data['imgHeight']\n image['width'] = data['imgWidth']\n image['id'] = num + 1\n image['file_name'] = img_file\n return image \n\n\ndef categories(label, labels_list):\n category = {}\n category['supercategory'] = 'component'\n category['id'] = len(labels_list) + 1\n category['name'] = label\n return category\n\n\ndef annotations_rectangle(points, label, image_num, object_num, label_to_num):\n annotation = {}\n seg_points = np.asarray(points).copy()\n seg_points[1, :] = np.asarray(points)[2, :]\n seg_points[2, :] = np.asarray(points)[1, :]\n annotation['segmentation'] = [list(seg_points.flatten())]\n annotation['iscrowd'] = 0\n annotation['image_id'] = image_num + 1\n annotation['bbox'] = list(\n map(float, [\n points[0][0], points[0][1], points[1][0] - points[0][0], points[1][\n 1] - points[0][1]\n ]))\n annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]\n annotation['category_id'] = label_to_num[label]\n annotation['id'] = object_num + 1\n return annotation\n\n\ndef annotations_polygon(height, width, points, label, image_num, object_num, label_to_num):\n annotation = {}\n annotation['segmentation'] = [list(np.asarray(points).flatten())]\n annotation['iscrowd'] = 0\n annotation['image_id'] = image_num + 1\n annotation['bbox'] = list(map(float, get_bbox(height, width, points)))\n annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]\n annotation['category_id'] = label_to_num[label]\n annotation['id'] = object_num + 1\n return annotation\n\n\ndef get_bbox(height, width, points):\n polygons = points\n mask = np.zeros([height, width], dtype=np.uint8)\n mask = PIL.Image.fromarray(mask)\n xy = list(map(tuple, polygons))\n PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)\n mask = np.array(mask, dtype=bool)\n index = np.argwhere(mask == 1)\n rows = index[:, 0]\n clos = index[:, 1]\n left_top_r = np.min(rows)\n left_top_c = np.min(clos)\n right_bottom_r = np.max(rows)\n right_bottom_c = np.max(clos)\n return [\n left_top_c, left_top_r, right_bottom_c - left_top_c,\n right_bottom_r - left_top_r\n ]\n\n\ndef deal_json(ds_type, img_path, json_path):\n data_coco = {}\n label_to_num = {}\n images_list = []\n categories_list = []\n annotations_list = []\n labels_list = []\n image_num = -1\n object_num = -1\n for img_file in os.listdir(img_path):\n img_label = img_file.split('.')[0]\n if img_file.split('.')[-1] not in ['bmp', 'jpg', 'jpeg', 'png', 'JPEG', 'JPG', 'PNG']:\n continue\n label_file = osp.join(json_path, img_label + '.json')\n print('Generating dataset from:', label_file)\n image_num = image_num + 1\n with open(label_file) as f:\n data = json.load(f)\n if ds_type == 'labelme':\n images_list.append(images_labelme(data, image_num))\n elif ds_type == 'cityscape':\n images_list.append(images_cityscape(data, image_num, img_file)) \n if ds_type == 'labelme':\n for shapes in data['shapes']:\n object_num = object_num + 1\n label = shapes['label']\n if label not in labels_list:\n categories_list.append(categories(label, labels_list))\n labels_list.append(label)\n label_to_num[label] = len(labels_list)\n points = shapes['points']\n p_type = shapes['shape_type']\n if p_type == 'polygon':\n annotations_list.append(\n annotations_polygon(data['imageHeight'], data[\n 'imageWidth'], points, label, image_num, object_num, label_to_num))\n\n if p_type == 'rectangle':\n points.append([points[0][0], points[1][1]])\n points.append([points[1][0], points[0][1]])\n annotations_list.append(\n annotations_rectangle(points, label, image_num, object_num, label_to_num))\n elif ds_type == 'cityscape':\n for shapes in data['objects']:\n object_num = object_num + 1\n label = shapes['label']\n if label not in labels_list:\n categories_list.append(categories(label, labels_list))\n labels_list.append(label)\n label_to_num[label] = len(labels_list)\n points = shapes['polygon']\n annotations_list.append(\n annotations_polygon(data['imgHeight'], data[\n 'imgWidth'], points, label, image_num, object_num, label_to_num))\n data_coco['images'] = images_list\n data_coco['categories'] = categories_list\n data_coco['annotations'] = annotations_list\n return data_coco\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dataset_type', help='the type of dataset')\n parser.add_argument('--json_input_dir', help='input annotated directory')\n parser.add_argument('--image_input_dir', help='image directory')\n parser.add_argument(\n '--output_dir', help='output dataset directory', default='../../../')\n parser.add_argument(\n '--train_proportion',\n help='the proportion of train dataset',\n type=float,\n default=1.0)\n parser.add_argument(\n '--val_proportion',\n help='the proportion of validation dataset',\n type=float,\n default=0.0)\n parser.add_argument(\n '--test_proportion',\n help='the proportion of test dataset',\n type=float,\n default=0.0)\n args = parser.parse_args()\n try:\n assert args.dataset_type in ['labelme', 'cityscape']\n except AssertionError as e:\n print('Now only support the cityscape dataset and labelme dataset!!')\n os._exit(0)\n try:\n assert os.path.exists(args.json_input_dir)\n except AssertionError as e:\n print('The json folder does not exist!')\n os._exit(0)\n try:\n assert os.path.exists(args.image_input_dir)\n except AssertionError as e:\n print('The image folder does not exist!')\n os._exit(0)\n try:\n assert args.train_proportion + args.val_proportion + args.test_proportion == 1.0\n except AssertionError as e:\n print(\n 'The sum of pqoportion of training, validation and test datase must be 1!'\n )\n os._exit(0)\n\n # Allocate the dataset.\n total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))\n if args.train_proportion != 0:\n train_num = int(total_num * args.train_proportion)\n os.makedirs(args.output_dir + '/train')\n else:\n train_num = 0\n if args.val_proportion == 0.0:\n val_num = 0\n test_num = total_num - train_num\n if args.test_proportion != 0.0:\n os.makedirs(args.output_dir + '/test')\n else:\n val_num = int(total_num * args.val_proportion)\n test_num = total_num - train_num - val_num\n os.makedirs(args.output_dir + '/val')\n if args.test_proportion != 0.0:\n os.makedirs(args.output_dir + '/test')\n count = 1\n for img_name in os.listdir(args.image_input_dir):\n if count <= train_num:\n shutil.copyfile(\n osp.join(args.image_input_dir, img_name),\n osp.join(args.output_dir + '/train/', img_name))\n else:\n if count <= train_num + val_num:\n shutil.copyfile(\n osp.join(args.image_input_dir, img_name),\n osp.join(args.output_dir + '/val/', img_name))\n else:\n shutil.copyfile(\n osp.join(args.image_input_dir, img_name),\n osp.join(args.output_dir + '/test/', img_name))\n count = count + 1\n\n # Deal with the json files.\n if not os.path.exists(args.output_dir + '/annotations'):\n os.makedirs(args.output_dir + '/annotations')\n if args.train_proportion != 0:\n train_data_coco = deal_json(args.dataset_type,\n args.output_dir + '/train',\n args.json_input_dir)\n train_json_path = osp.join(args.output_dir + '/annotations',\n 'instance_train.json')\n json.dump(\n train_data_coco,\n open(train_json_path, 'w'),\n indent=4,\n cls=MyEncoder)\n if args.val_proportion != 0:\n val_data_coco = deal_json(args.dataset_type,\n args.output_dir + '/val', \n args.json_input_dir)\n val_json_path = osp.join(args.output_dir + '/annotations',\n 'instance_val.json')\n json.dump(\n val_data_coco, open(val_json_path, 'w'), indent=4, cls=MyEncoder)\n if args.test_proportion != 0:\n test_data_coco = deal_json(args.dataset_type,\n args.output_dir + '/test',\n args.json_input_dir)\n test_json_path = osp.join(args.output_dir + '/annotations',\n 'instance_test.json')\n json.dump(\n test_data_coco, open(test_json_path, 'w'), indent=4, cls=MyEncoder)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"numpy.mean"
],
[
"numpy.arange",
"numpy.mean"
],
[
"numpy.max",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.min",
"numpy.argwhere"
]
] |
Aiden-Jeon/pytorch-lightning
|
[
"963c26764682fa4cf64c93c5a7572ae0040e9c32"
] |
[
"tests/callbacks/test_early_stopping.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nimport pickle\nfrom typing import List, Optional\nfrom unittest import mock\n\nimport cloudpickle\nimport numpy as np\nimport pytest\nimport torch\n\nfrom pytorch_lightning import seed_everything, Trainer\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.helpers import BoringModel\nfrom tests.helpers.datamodules import ClassifDataModule\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.simple_models import ClassificationModel\n\n_logger = logging.getLogger(__name__)\n\n\nclass EarlyStoppingTestRestore(EarlyStopping):\n # this class has to be defined outside the test function, otherwise we get pickle error\n def __init__(self, expected_state, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.expected_state = expected_state\n # cache the state for each epoch\n self.saved_states = []\n\n def on_train_start(self, trainer, pl_module):\n if self.expected_state:\n assert self.on_save_checkpoint(trainer, pl_module, {}) == self.expected_state\n\n def on_train_epoch_end(self, trainer, pl_module):\n super().on_train_epoch_end(trainer, pl_module)\n self.saved_states.append(self.on_save_checkpoint(trainer, pl_module, {}).copy())\n\n\ndef test_resume_early_stopping_from_checkpoint(tmpdir):\n \"\"\"\n Prevent regressions to bugs:\n https://github.com/PyTorchLightning/pytorch-lightning/issues/1464\n https://github.com/PyTorchLightning/pytorch-lightning/issues/1463\n \"\"\"\n seed_everything(42)\n model = ClassificationModel()\n dm = ClassifDataModule()\n checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor=\"train_loss\", save_top_k=1)\n early_stop_callback = EarlyStoppingTestRestore(None, monitor=\"train_loss\")\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[early_stop_callback, checkpoint_callback],\n num_sanity_val_steps=0,\n max_epochs=4,\n )\n trainer.fit(model, datamodule=dm)\n\n assert len(early_stop_callback.saved_states) == 4\n\n checkpoint_filepath = checkpoint_callback.kth_best_model_path\n # ensure state is persisted properly\n checkpoint = torch.load(checkpoint_filepath)\n # the checkpoint saves \"epoch + 1\"\n early_stop_callback_state = early_stop_callback.saved_states[checkpoint[\"epoch\"] - 1]\n assert 4 == len(early_stop_callback.saved_states)\n assert checkpoint[\"callbacks\"][\"EarlyStoppingTestRestore\"] == early_stop_callback_state\n\n # ensure state is reloaded properly (assertion in the callback)\n early_stop_callback = EarlyStoppingTestRestore(early_stop_callback_state, monitor=\"train_loss\")\n new_trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n resume_from_checkpoint=checkpoint_filepath,\n callbacks=[early_stop_callback],\n )\n\n with pytest.raises(MisconfigurationException, match=r\"You restored a checkpoint with current_epoch\"):\n new_trainer.fit(model)\n\n\n@mock.patch.dict(os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_early_stopping_no_extraneous_invocations(tmpdir):\n \"\"\"Test to ensure that callback methods aren't being invoked outside of the callback handler.\"\"\"\n model = ClassificationModel()\n dm = ClassifDataModule()\n early_stop_callback = EarlyStopping(monitor=\"train_loss\")\n expected_count = 4\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[early_stop_callback],\n limit_train_batches=4,\n limit_val_batches=4,\n max_epochs=expected_count,\n )\n trainer.fit(model, datamodule=dm)\n\n assert trainer.early_stopping_callback == early_stop_callback\n assert trainer.early_stopping_callbacks == [early_stop_callback]\n assert len(trainer.dev_debugger.early_stopping_history) == expected_count\n\n\n@pytest.mark.parametrize(\n \"loss_values, patience, expected_stop_epoch\",\n [([6, 5, 5, 5, 5, 5], 3, 4), ([6, 5, 4, 4, 3, 3], 1, 3), ([6, 5, 6, 5, 5, 5], 3, 4)],\n)\ndef test_early_stopping_patience(tmpdir, loss_values: list, patience: int, expected_stop_epoch: int):\n \"\"\"Test to ensure that early stopping is not triggered before patience is exhausted.\"\"\"\n\n class ModelOverrideValidationReturn(BoringModel):\n validation_return_values = torch.tensor(loss_values)\n\n def validation_epoch_end(self, outputs):\n loss = self.validation_return_values[self.current_epoch]\n self.log(\"test_val_loss\", loss)\n\n model = ModelOverrideValidationReturn()\n early_stop_callback = EarlyStopping(monitor=\"test_val_loss\", patience=patience, verbose=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[early_stop_callback],\n val_check_interval=1.0,\n num_sanity_val_steps=0,\n max_epochs=10,\n progress_bar_refresh_rate=0,\n )\n trainer.fit(model)\n assert trainer.current_epoch == expected_stop_epoch\n\n\n@pytest.mark.parametrize(\"validation_step_none\", [True, False])\n@pytest.mark.parametrize(\n \"loss_values, patience, expected_stop_epoch\",\n [([6, 5, 5, 5, 5, 5], 3, 4), ([6, 5, 4, 4, 3, 3], 1, 3), ([6, 5, 6, 5, 5, 5], 3, 4)],\n)\ndef test_early_stopping_patience_train(\n tmpdir, validation_step_none: bool, loss_values: list, patience: int, expected_stop_epoch: int\n):\n \"\"\"Test to ensure that early stopping is not triggered before patience is exhausted.\"\"\"\n\n class ModelOverrideTrainReturn(BoringModel):\n train_return_values = torch.tensor(loss_values)\n\n def training_epoch_end(self, outputs):\n loss = self.train_return_values[self.current_epoch]\n self.log(\"train_loss\", loss)\n\n model = ModelOverrideTrainReturn()\n\n if validation_step_none:\n model.validation_step = None\n\n early_stop_callback = EarlyStopping(\n monitor=\"train_loss\", patience=patience, verbose=True, check_on_train_epoch_end=True\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[early_stop_callback],\n num_sanity_val_steps=0,\n max_epochs=10,\n progress_bar_refresh_rate=0,\n )\n trainer.fit(model)\n assert trainer.current_epoch == expected_stop_epoch\n\n\ndef test_pickling(tmpdir):\n early_stopping = EarlyStopping()\n\n early_stopping_pickled = pickle.dumps(early_stopping)\n early_stopping_loaded = pickle.loads(early_stopping_pickled)\n assert vars(early_stopping) == vars(early_stopping_loaded)\n\n early_stopping_pickled = cloudpickle.dumps(early_stopping)\n early_stopping_loaded = cloudpickle.loads(early_stopping_pickled)\n assert vars(early_stopping) == vars(early_stopping_loaded)\n\n\ndef test_early_stopping_no_val_step(tmpdir):\n \"\"\"Test that early stopping callback falls back to training metrics when no validation defined.\"\"\"\n\n model = ClassificationModel()\n dm = ClassifDataModule()\n model.validation_step = None\n model.val_dataloader = None\n\n stopping = EarlyStopping(monitor=\"train_loss\", min_delta=0.1, patience=0, check_on_train_epoch_end=True)\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[stopping], overfit_batches=0.20, max_epochs=10)\n trainer.fit(model, datamodule=dm)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch < trainer.max_epochs - 1\n\n\n@pytest.mark.parametrize(\n \"stopping_threshold,divergence_theshold,losses,expected_epoch\",\n [\n (None, None, [8, 4, 2, 3, 4, 5, 8, 10], 5),\n (2.9, None, [9, 8, 7, 6, 5, 6, 4, 3, 2, 1], 8),\n (None, 15.9, [9, 4, 2, 16, 32, 64], 3),\n ],\n)\ndef test_early_stopping_thresholds(tmpdir, stopping_threshold, divergence_theshold, losses, expected_epoch):\n class CurrentModel(BoringModel):\n def validation_epoch_end(self, outputs):\n val_loss = losses[self.current_epoch]\n self.log(\"abc\", val_loss)\n\n model = CurrentModel()\n early_stopping = EarlyStopping(\n monitor=\"abc\", stopping_threshold=stopping_threshold, divergence_threshold=divergence_theshold\n )\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[early_stopping], overfit_batches=0.20, max_epochs=20)\n trainer.fit(model)\n assert trainer.current_epoch == expected_epoch, \"early_stopping failed\"\n\n\n@pytest.mark.parametrize(\"stop_value\", [torch.tensor(np.inf), torch.tensor(np.nan)])\ndef test_early_stopping_on_non_finite_monitor(tmpdir, stop_value):\n\n losses = [4, 3, stop_value, 2, 1]\n expected_stop_epoch = 2\n\n class CurrentModel(BoringModel):\n def validation_epoch_end(self, outputs):\n val_loss = losses[self.current_epoch]\n self.log(\"val_loss\", val_loss)\n\n model = CurrentModel()\n early_stopping = EarlyStopping(monitor=\"val_loss\", check_finite=True)\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[early_stopping], overfit_batches=0.20, max_epochs=10)\n trainer.fit(model)\n assert trainer.current_epoch == expected_stop_epoch\n assert early_stopping.stopped_epoch == expected_stop_epoch\n\n\n@pytest.mark.parametrize(\"step_freeze, min_steps, min_epochs\", [(5, 1, 1), (5, 1, 3), (3, 15, 1)])\ndef test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: int, min_steps: int, min_epochs: int):\n \"\"\"Excepted Behaviour:\n IF `min_steps` was set to a higher value than the `trainer.global_step` when `early_stopping` is being triggered,\n THEN the trainer should continue until reaching `trainer.global_step` == `min_steps`, and stop.\n\n IF `min_epochs` resulted in a higher number of steps than the `trainer.global_step`\n when `early_stopping` is being triggered,\n THEN the trainer should continue until reaching\n `trainer.global_step` == `min_epochs * len(train_dataloader)`, and stop.\n This test validate this expected behaviour\n\n IF both `min_epochs` and `min_steps` are provided and higher than the `trainer.global_step`\n when `early_stopping` is being triggered,\n THEN the highest between `min_epochs * len(train_dataloader)` and `min_steps` would be reached.\n\n Caveat: IF min_steps is divisible by len(train_dataloader), then it will do min_steps + len(train_dataloader)\n\n This test validate those expected behaviours\n \"\"\"\n\n _logger.disabled = True\n\n original_loss_value = 10\n limit_train_batches = 3\n patience = 3\n\n class Model(BoringModel):\n def __init__(self, step_freeze):\n super().__init__()\n\n self._step_freeze = step_freeze\n\n self._loss_value = 10.0\n self._eps = 1e-1\n self._count_decrease = 0\n self._values = []\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n return {\"test_val_loss\": self._loss_value}\n\n def validation_epoch_end(self, outputs):\n _mean = np.mean([x[\"test_val_loss\"] for x in outputs])\n if self.trainer.global_step <= self._step_freeze:\n self._count_decrease += 1\n self._loss_value -= self._eps\n self._values.append(_mean)\n self.log(\"test_val_loss\", _mean)\n\n model = Model(step_freeze)\n model.training_step_end = None\n model.test_dataloader = None\n early_stop_callback = EarlyStopping(monitor=\"test_val_loss\", patience=patience, verbose=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[early_stop_callback],\n limit_train_batches=limit_train_batches,\n limit_val_batches=2,\n min_steps=min_steps,\n min_epochs=min_epochs,\n )\n trainer.fit(model)\n\n # Make sure loss was properly decreased\n assert abs(original_loss_value - (model._count_decrease) * model._eps - model._loss_value) < 1e-6\n\n pos_diff = (np.diff(model._values) == 0).nonzero()[0][0]\n\n # Compute when the latest validation epoch end happened\n latest_validation_epoch_end = (pos_diff // limit_train_batches) * limit_train_batches\n if pos_diff % limit_train_batches == 0:\n latest_validation_epoch_end += limit_train_batches\n\n # Compute early stopping latest step\n by_early_stopping = latest_validation_epoch_end + (1 + limit_train_batches) * patience\n\n # Compute min_epochs latest step\n by_min_epochs = min_epochs * limit_train_batches\n\n # Make sure the trainer stops for the max of all minimum requirements\n assert trainer.global_step == max(min_steps, by_early_stopping, by_min_epochs), (\n trainer.global_step,\n max(min_steps, by_early_stopping, by_min_epochs),\n step_freeze,\n min_steps,\n min_epochs,\n )\n\n _logger.disabled = False\n\n\ndef test_early_stopping_mode_options():\n with pytest.raises(MisconfigurationException, match=\"`mode` can be .* got unknown_option\"):\n EarlyStopping(mode=\"unknown_option\")\n\n\nclass EarlyStoppingModel(BoringModel):\n def __init__(self, expected_end_epoch: int, early_stop_on_train: bool):\n super().__init__()\n self.expected_end_epoch = expected_end_epoch\n self.early_stop_on_train = early_stop_on_train\n\n def _epoch_end(self) -> None:\n losses = [8, 4, 2, 3, 4, 5, 8, 10]\n loss = losses[self.current_epoch]\n self.log(\"abc\", torch.tensor(loss))\n self.log(\"cba\", torch.tensor(0))\n\n def training_epoch_end(self, outputs):\n if not self.early_stop_on_train:\n return\n self._epoch_end()\n\n def validation_epoch_end(self, outputs):\n if self.early_stop_on_train:\n return\n self._epoch_end()\n\n def on_train_end(self) -> None:\n assert self.trainer.current_epoch == self.expected_end_epoch, \"Early Stopping Failed\"\n\n\n_ES_CHECK = dict(check_on_train_epoch_end=True)\n_ES_CHECK_P3 = dict(patience=3, check_on_train_epoch_end=True)\n_NO_WIN = dict(marks=RunIf(skip_windows=True))\n\n\n@pytest.mark.parametrize(\n \"callbacks, expected_stop_epoch, check_on_train_epoch_end, accelerator, num_processes\",\n [\n ([EarlyStopping(\"abc\"), EarlyStopping(\"cba\", patience=3)], 3, False, None, 1),\n ([EarlyStopping(\"cba\", patience=3), EarlyStopping(\"abc\")], 3, False, None, 1),\n pytest.param([EarlyStopping(\"abc\"), EarlyStopping(\"cba\", patience=3)], 3, False, \"ddp_cpu\", 2, **_NO_WIN),\n pytest.param([EarlyStopping(\"cba\", patience=3), EarlyStopping(\"abc\")], 3, False, \"ddp_cpu\", 2, **_NO_WIN),\n ([EarlyStopping(\"abc\", **_ES_CHECK), EarlyStopping(\"cba\", **_ES_CHECK_P3)], 3, True, None, 1),\n ([EarlyStopping(\"cba\", **_ES_CHECK_P3), EarlyStopping(\"abc\", **_ES_CHECK)], 3, True, None, 1),\n pytest.param(\n [EarlyStopping(\"abc\", **_ES_CHECK), EarlyStopping(\"cba\", **_ES_CHECK_P3)], 3, True, \"ddp_cpu\", 2, **_NO_WIN\n ),\n pytest.param(\n [EarlyStopping(\"cba\", **_ES_CHECK_P3), EarlyStopping(\"abc\", **_ES_CHECK)], 3, True, \"ddp_cpu\", 2, **_NO_WIN\n ),\n ],\n)\ndef test_multiple_early_stopping_callbacks(\n tmpdir,\n callbacks: List[EarlyStopping],\n expected_stop_epoch: int,\n check_on_train_epoch_end: bool,\n accelerator: Optional[str],\n num_processes: int,\n):\n \"\"\"Ensure when using multiple early stopping callbacks we stop if any signals we should stop.\"\"\"\n\n model = EarlyStoppingModel(expected_stop_epoch, check_on_train_epoch_end)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=callbacks,\n overfit_batches=0.20,\n max_epochs=20,\n accelerator=accelerator,\n num_processes=num_processes,\n )\n trainer.fit(model)\n"
] |
[
[
"numpy.mean",
"torch.tensor",
"torch.load",
"numpy.diff"
]
] |
beppeben/wavebet
|
[
"74e1019b78139018c44c5993c1318db237ea8e60"
] |
[
"train.py"
] |
[
"\"\"\"Training script for the WaveNet network on the VCTK corpus.\n\nThis script trains a network with the WaveNet using data from the VCTK corpus,\nwhich can be freely downloaded at the following site (~10 GB):\nhttp://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\n\nfrom wavenet import WaveNetModel, DataReader, optimizer_factory\n\nBATCH_SIZE = 1\nDATA_DIRECTORY = './data'\nTESTDATA_DIRECTORY = './testdata'\nLOGDIR_ROOT = './logdir'\nCHECKPOINT_EVERY = 5000\nNUM_STEPS = int(1e5)\nLEARNING_RATE = 1e-3\nWAVENET_PARAMS = './wavenet_params.json'\nSTARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now())\nSAMPLE_SIZE = None\nL2_REGULARIZATION_STRENGTH = 0\nEPSILON = 0.001\nMOMENTUM = 0.9\nMAX_TO_KEEP = 5\nMETADATA = False\n\n\ndef get_arguments():\n def _str_to_bool(s):\n \"\"\"Convert string to bool (in argparse context).\"\"\"\n if s.lower() not in ['true', 'false']:\n raise ValueError('Argument needs to be a '\n 'boolean, got {}'.format(s))\n return {'true': True, 'false': False}[s.lower()]\n\n parser = argparse.ArgumentParser(description='WaveNet example network')\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,\n help='How many files to process at once. Default: ' + str(BATCH_SIZE) + '.')\n parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,\n help='The directory containing the training data.')\n parser.add_argument('--testdata_dir', type=str, default=TESTDATA_DIRECTORY,\n help='The directory containing the training data.')\n parser.add_argument('--store_metadata', type=bool, default=METADATA,\n help='Whether to store advanced debugging information '\n '(execution time, memory consumption) for use with '\n 'TensorBoard. Default: ' + str(METADATA) + '.')\n parser.add_argument('--logdir', type=str, default=None,\n help='Directory in which to store the logging '\n 'information for TensorBoard. '\n 'If the model already exists, it will restore '\n 'the state and will continue training. '\n 'Cannot use with --logdir_root and --restore_from.')\n parser.add_argument('--logdir_root', type=str, default=None,\n help='Root directory to place the logging '\n 'output and generated model. These are stored '\n 'under the dated subdirectory of --logdir_root. '\n 'Cannot use with --logdir.')\n parser.add_argument('--restore_from', type=str, default=None,\n help='Directory in which to restore the model from. '\n 'This creates the new model under the dated directory '\n 'in --logdir_root. '\n 'Cannot use with --logdir.')\n parser.add_argument('--checkpoint_every', type=int,\n default=CHECKPOINT_EVERY,\n help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')\n parser.add_argument('--num_steps', type=int, default=NUM_STEPS,\n help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')\n parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,\n help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')\n parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,\n help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')\n parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,\n help='Concatenate and cut samples to this many '\n 'samples. Default: ' + str(SAMPLE_SIZE) + '.')\n parser.add_argument('--l2_regularization_strength', type=float,\n default=L2_REGULARIZATION_STRENGTH,\n help='Coefficient in the L2 regularization. '\n 'Default: False')\n parser.add_argument('--optimizer', type=str, default='adam',\n choices=optimizer_factory.keys(),\n help='Select the optimizer specified by this option. Default: adam.')\n parser.add_argument('--momentum', type=float,\n default=MOMENTUM, help='Specify the momentum to be '\n 'used by sgd or rmsprop optimizer. Ignored by the '\n 'adam optimizer. Default: ' + str(MOMENTUM) + '.')\n parser.add_argument('--histograms', type=_str_to_bool, default=False,\n help='Whether to store histogram summaries. Default: False')\n parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,\n help='Maximum amount of checkpoints that will be kept alive. Default: '\n + str(MAX_TO_KEEP) + '.')\n return parser.parse_args()\n\n\ndef save(saver, sess, logdir, step):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n print('Storing checkpoint to {} ...'.format(logdir), end=\"\")\n sys.stdout.flush()\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n saver.save(sess, checkpoint_path, global_step=step)\n print(' Done.')\n\n\ndef load(saver, sess, logdir):\n print(\"Trying to restore saved checkpoints from {} ...\".format(logdir),\n end=\"\")\n\n ckpt = tf.train.get_checkpoint_state(logdir)\n if ckpt:\n print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path))\n global_step = int(ckpt.model_checkpoint_path\n .split('/')[-1]\n .split('-')[-1])\n print(\" Global step was: {}\".format(global_step))\n print(\" Restoring...\", end=\"\")\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\" Done.\")\n return global_step\n else:\n print(\" No checkpoint found.\")\n return None\n\n\ndef get_default_logdir(logdir_root):\n logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)\n return logdir\n\n\ndef validate_directories(args):\n \"\"\"Validate and arrange directory related arguments.\"\"\"\n\n # Validation\n if args.logdir and args.logdir_root:\n raise ValueError(\"--logdir and --logdir_root cannot be \"\n \"specified at the same time.\")\n\n if args.logdir and args.restore_from:\n raise ValueError(\n \"--logdir and --restore_from cannot be specified at the same \"\n \"time. This is to keep your previous model from unexpected \"\n \"overwrites.\\n\"\n \"Use --logdir_root to specify the root of the directory which \"\n \"will be automatically created with current date and time, or use \"\n \"only --logdir to just continue the training from the last \"\n \"checkpoint.\")\n\n # Arrangement\n logdir_root = args.logdir_root\n if logdir_root is None:\n logdir_root = LOGDIR_ROOT\n\n logdir = args.logdir\n if logdir is None:\n logdir = get_default_logdir(logdir_root)\n print('Using default logdir: {}'.format(logdir))\n\n restore_from = args.restore_from\n if restore_from is None:\n # args.logdir and args.restore_from are exclusive,\n # so it is guaranteed the logdir here is newly created.\n restore_from = logdir\n\n return {\n 'logdir': logdir,\n 'logdir_root': args.logdir_root,\n 'restore_from': restore_from\n }\n\n\ndef main():\n args = get_arguments()\n\n try:\n directories = validate_directories(args)\n except ValueError as e:\n print(\"Some arguments are wrong:\")\n print(str(e))\n return\n\n logdir = directories['logdir']\n restore_from = directories['restore_from']\n\n # Even if we restored the model, we will treat it as new training\n # if the trained model is written into an arbitrary location.\n is_overwritten_training = logdir != restore_from\n\n with open(args.wavenet_params, 'r') as f:\n wavenet_params = json.load(f)\n\n # Create coordinator.\n coord = tf.train.Coordinator()\n \n #prediction_lag = 360\n #quantization_thresholds = [0.01]\n \n # Load data from csv files\n with tf.name_scope('create_inputs'):\n reader = DataReader(\n args.data_dir,\n args.testdata_dir,\n coord,\n receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[\"filter_width\"],\n wavenet_params[\"dilations\"]),\n prediction_lag = wavenet_params[\"prediction_lag\"],\n quantization_thresholds = wavenet_params[\"quantization_thresholds\"],\n sample_size = args.sample_size)\n data_batch = reader.dequeue(args.batch_size)\n \n dropout = tf.placeholder(dtype=tf.float32, shape=None)\n \n # Create network.\n net = WaveNetModel(\n batch_size=args.batch_size,\n dilations=wavenet_params[\"dilations\"],\n filter_width=wavenet_params[\"filter_width\"],\n residual_channels=wavenet_params[\"residual_channels\"],\n dilation_channels=wavenet_params[\"dilation_channels\"],\n skip_channels=wavenet_params[\"skip_channels\"],\n input_channels = reader.num_features(),\n output_channels = reader.num_return_categories(),\n dropout = dropout,\n use_biases=wavenet_params[\"use_biases\"],\n histograms=args.histograms)\n\n if args.l2_regularization_strength == 0:\n args.l2_regularization_strength = None\n \n loss, raw_loss, correct_ratio = net.loss(input_batch=data_batch,\n l2_regularization_strength=args.l2_regularization_strength,\n weights = [1., 1., 1.])\n #weights = reader.get_category_inv_weights())\n \n train_loss_summary = tf.summary.merge([\n tf.summary.scalar('train_raw_loss', raw_loss),\n tf.summary.scalar('train_total_loss', loss)])\n test_loss_summary = tf.summary.scalar('test_raw_loss', raw_loss)\n test_correct_ratio_sell_summary = tf.summary.scalar('correct_ratio_sell', correct_ratio[2][0])\n test_correct_ratio_mid_summary = tf.summary.scalar('correct_ratio_mid', correct_ratio[2][1])\n test_correct_ratio_buy_summary = tf.summary.scalar('correct_ratio_buy', correct_ratio[2][2])\n test_summaries = [test_loss_summary, test_correct_ratio_sell_summary,\n test_correct_ratio_mid_summary, test_correct_ratio_buy_summary]\n test_summaries = tf.summary.merge(test_summaries)\n \n optimizer = optimizer_factory[args.optimizer](\n learning_rate=args.learning_rate,\n momentum=args.momentum)\n trainable = tf.trainable_variables()\n optim = optimizer.minimize(loss, var_list=trainable)\n\n # Set up logging for TensorBoard.\n writer = tf.summary.FileWriter(logdir)\n writer.add_graph(tf.get_default_graph())\n run_metadata = tf.RunMetadata()\n #summaries = tf.summary.merge_all()\n\n # Set up session\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)\n\n try:\n saved_global_step = load(saver, sess, restore_from)\n if is_overwritten_training or saved_global_step is None:\n # The first training step will be saved_global_step + 1,\n # therefore we put -1 here for new or overwritten trainings.\n saved_global_step = -1\n\n except:\n print(\"Something went wrong while restoring checkpoint. \"\n \"We will terminate training to avoid accidentally overwriting \"\n \"the previous model.\")\n raise\n\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n test_every = 50\n reader.start_threads(sess, test_every)\n\n step = None\n last_saved_step = saved_global_step\n min_test_loss = 10\n \n try:\n for step in range(saved_global_step + 1, args.num_steps):\n start_time = time.time()\n if args.store_metadata and step % 50 == 0:\n # Slow run that stores extra information for debugging.\n print('Storing metadata')\n run_options = tf.RunOptions(\n trace_level=tf.RunOptions.FULL_TRACE)\n summary, loss_value, _ = sess.run(\n [summaries, raw_loss, optim],\n options=run_options,\n run_metadata=run_metadata)\n writer.add_summary(summary, step)\n writer.add_run_metadata(run_metadata,\n 'step_{:04d}'.format(step))\n tl = timeline.Timeline(run_metadata.step_stats)\n timeline_path = os.path.join(logdir, 'timeline.trace')\n with open(timeline_path, 'w') as f:\n f.write(tl.generate_chrome_trace_format(show_memory=True))\n else:\n summary, loss_value, _ = sess.run([train_loss_summary, raw_loss, optim],\n feed_dict={dropout: 1})\n writer.add_summary(summary, step)\n\n duration = time.time() - start_time\n print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'\n .format(step, loss_value, duration))\n \n if step % test_every == 0:\n summary, test_loss, ratios = sess.run([test_summaries, raw_loss, correct_ratio], feed_dict={dropout: 1})\n writer.add_summary(summary, step)\n min_test_loss = min(min_test_loss, test_loss)\n print('test loss = {:.3f}, min loss = {:.3f}'.format(test_loss, min_test_loss))\n print('correct ratio = {:.3f}'.format(ratios[0]))\n print('test true categories = {}'.format(ratios[1]))\n print('correct ratios = {}'.format(ratios[2]))\n print('false positives = {}'.format(ratios[3]))\n \n if step != 0 and step % args.checkpoint_every == 0:\n #loss_test_value = sess.run(loss_test)\n #print('test loss = {:.3f}'.format(loss_test_value))\n save(saver, sess, logdir, step)\n last_saved_step = step\n\n except KeyboardInterrupt:\n # Introduce a line break after ^C is displayed so save message\n # is on its own line.\n print()\n finally:\n if step > last_saved_step:\n save(saver, sess, logdir, step)\n coord.request_stop()\n coord.join(threads)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.RunOptions",
"tensorflow.trainable_variables",
"tensorflow.train.start_queue_runners",
"tensorflow.summary.merge",
"tensorflow.python.client.timeline.Timeline",
"tensorflow.train.Coordinator",
"tensorflow.summary.scalar",
"tensorflow.get_default_graph",
"tensorflow.train.get_checkpoint_state",
"tensorflow.RunMetadata",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.summary.FileWriter",
"tensorflow.global_variables_initializer"
]
] |
AlexJew/CityEnergyAnalyst
|
[
"6eb372c79e5100a2d0abce78561ae368fb409cd1"
] |
[
"legacy/4D_plots_ArcGIS/radiation_total.py"
] |
[
"\"\"\"\nTotal radiation calculator from the insolation files\n\"\"\"\nfrom __future__ import division\nimport pandas as pd\nfrom cea.utilities import dbf\nimport cea.inputlocator\nimport cea.config\nfrom cea.utilities import epwreader\nfrom cea.constants import HOURS_IN_YEAR\n\n\n__author__ = \"Sreepathi Bhargava Krishna\"\n__copyright__ = \"Copyright 2016, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Sreepathi Bhargava Krishna\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\ndef calc_spatio_temporal_visuals(locator, list_of_buildings, initial_date, config):\n\n # now the dates in which the building demand is calculated is stored in 'date'\n date = pd.date_range(initial_date, periods=HOURS_IN_YEAR, freq='H')\n time = date.strftime(\"%Y%m%d%H%M%S\")\n\n # this loop checks if all the buildings are selected and gets the building names from Total demand.csv file\n if 'all' in list_of_buildings:\n building_names = locator.get_zone_building_names()\n else:\n building_names = list_of_buildings\n\n for i, building in enumerate(building_names):\n\n sensors_rad = pd.read_json(locator.get_radiation_building_sensors(building_name=building))\n sensors_metadata = pd.read_csv(locator.get_radiation_metadata(building_name= building))\n\n sensors_metadata_roof = sensors_metadata[sensors_metadata.TYPE == 'roofs']\n sensors_metadata_walls = sensors_metadata[sensors_metadata.TYPE == 'walls']\n sensors_metadata_windows = sensors_metadata[sensors_metadata.TYPE == 'windows']\n\n # segregating the surfaces with SURFACE TYPE as roof\n roof_cols = [c for c in sensors_rad.columns if c in sensors_metadata_roof.SURFACE.tolist()]\n sensors_rad_roof = pd.DataFrame()\n # Calculating weighted average for all the surfaces with SURFACE TYPE as roof\n for i in roof_cols:\n sensors_rad_roof[i] = sensors_rad[i] * (sensors_metadata_roof[sensors_metadata_roof.SURFACE == i].AREA_m2.values)\n sensors_rad_roof = sensors_rad_roof / (sensors_metadata_roof.AREA_m2.sum(0))\n\n # segregating the surfaces with SURFACE TYPE as walls\n walls_cols = [c for c in sensors_rad.columns if c in sensors_metadata_walls.SURFACE.tolist()]\n sensors_rad_walls = pd.DataFrame()\n # Calculating weighted average for all the surfaces with SURFACE TYPE as walls\n for i in walls_cols:\n sensors_rad_walls[i] = sensors_rad[i] * (sensors_metadata_walls[sensors_metadata_walls.SURFACE == i].AREA_m2.values)\n sensors_rad_walls = sensors_rad_walls / (sensors_metadata_walls.AREA_m2.sum(0))\n\n sensors_rad_final = pd.DataFrame()\n sensors_rad_final['total_roof_rad_Wperm2'] = sensors_rad_roof.sum(1)\n sensors_rad_final['total_wall_rad_Wperm2'] = sensors_rad_walls.sum(1)\n sensors_rad_final['date'] = time\n sensors_rad_final.to_csv(locator.radiation_results(building_name=building), index=True, float_format='%.2f')\n\ndef main(config):\n\n locator = cea.inputlocator.InputLocator(scenario=config.scenario)\n initial_date = '1/1/2015'\n list_of_buildings = ['all'] # 'all' for all buildings or else provide a list of building names\n calc_spatio_temporal_visuals(locator, list_of_buildings, initial_date, config)\n\nif __name__ == '__main__':\n main(cea.config.Configuration())"
] |
[
[
"pandas.DataFrame",
"pandas.date_range"
]
] |
goshdarngames/Autumn-CA
|
[
"85bd1db5aa08882a01d6954e085b76e316ac5712"
] |
[
"autumn_ca/benchmark.py"
] |
[
"import numpy as np\r\n\r\nfrom lib.simulation import Simulation\r\n\r\nfrom lib.rule_function import conway_rule\r\n\r\nfrom Conway.lib.neighbourhood import moore_neighbourhood\r\n\r\n\r\nSIMULATION_SIZE = (100,100)\r\nNUMBER_OF_STEPS = 2 \r\n\r\nsim = Simulation( SIMULATION_SIZE, conway_rule) \r\n \r\ndef time_numpy ():\r\n \"\"\"\r\n Times the numpy implementation. To run use command-line:\r\n \r\n python3 -m timeit -s 'from benchmark import time_numpy' 'time_numpy()'\r\n \"\"\"\r\n\r\n \r\n \r\n for i in range(NUMBER_OF_STEPS):\r\n \r\n next(sim)\r\n \r\n \r\n#time_numpy ()\r\n\r\nneighbourhood_data = np.zeros(10000,dtype='uint8').reshape(100,100)\r\nneighbourhood_pos = (0,0)\r\nn_out = np.zeros(9, dtype='uint8')\r\n\r\ndef time_neighbourhoods ():\r\n \"\"\"\r\n Times the numpy implementation. To run use command-line:\r\n \r\n python3 -m timeit -s 'from benchmark import time_neighbourhoods' 'time_neighbourhoods()'\r\n \"\"\"\r\n \r\n for _ in range(10000):\r\n moore_neighbourhood (neighbourhood_data, \r\n neighbourhood_pos[0], neighbourhood_pos[1],\r\n n_out) \r\n\r\n\r\n\r\n"
] |
[
[
"numpy.zeros"
]
] |
joll05/AdventOfCode2019
|
[
"faa61058dd048dfc039889eaa4bd361d34b9dc7b",
"faa61058dd048dfc039889eaa4bd361d34b9dc7b"
] |
[
"Day 3/solution1.py",
"Day 11/solution1.py"
] |
[
"import numpy as np\n\ndef manhattan(x, y):\n return abs(x) + abs(y)\n\nf = open(\"input.txt\")\n\nwires = f.readlines()\nwires[0] = wires[0].split(\",\")\nwires[1] = wires[1].split(\",\")\n\nmax = int(input(\"Max: \"))\n\ncrossedPositions = np.zeros((max, max), dtype=bool)\n\nclosestCrossing = [999999, 999999]\n\nposX = 0\nposY = 0\n\ncheckingMode = False\nfor wire in wires:\n posX = 0\n posY = 0\n\n for i in wire:\n direction = i[0]\n\n for step in range(int(i[1:])):\n if(direction == \"R\"):\n posX += 1\n elif(direction == \"L\"):\n posX -= 1\n elif(direction == \"U\"):\n posY += 1\n elif(direction == \"D\"):\n posY -= 1\n else:\n print(\"Something went wrong.\")\n\n if(checkingMode):\n if(crossedPositions[posX, posY] == True):\n if(manhattan(closestCrossing[0], closestCrossing[1]) > manhattan(posX, posY)):\n closestCrossing = [posX, posY]\n else:\n crossedPositions[posX, posY] = True\n\n checkingMode = True\n\nprint(manhattan(closestCrossing[0], closestCrossing[1]))\n",
"import computer\nimport numpy as np\nimport time\n\nPosition = (0, 0)\n\nCanvas = np.full([200, 200], -1, dtype=int)\nCanvas[0, 0] = 1\n\nCorners = [(0, 0), (0, 0)]\n\nTileCount = 0\n\nDirection = 0\n\ndef AddVectors(vec1, vec2):\n\n if(len(vec1) != len(vec2)):\n return None\n\n out = []\n for v in range(len(vec1)):\n out += [vec1[v] + vec2[v]]\n\n return tuple(out)\n\ndef SendInput():\n global Canvas\n global Position\n \n if(Canvas[Position] == 1):\n return 1\n else:\n return 0\n\ndef MoveRobot():\n global Direction\n global Position\n global Corners\n \n if(Direction == 0):\n Position = AddVectors(Position, (0, 1))\n elif(Direction == 1):\n Position = AddVectors(Position, (1, 0))\n elif(Direction == 2):\n Position = AddVectors(Position, (0, -1))\n elif(Direction == 3):\n Position = AddVectors(Position, (-1, 0))\n\n print(Position)\n\n if(Position[0] < Corners[0][0] or Position[1] < Corners[0][1]):\n Corners[0] = Position\n elif(Position[0] > Corners[1][0] or Position[1] > Corners[1][1]):\n Corners[1] = Position\n\nTurning = False\ndef RecieveOutput(out):\n global Turning\n global Direction\n global Canvas\n global Position\n global TileCount\n \n if(not Turning):\n if(Canvas[Position] == -1):\n TileCount += 1\n Canvas[Position] = out\n else:\n if(out == 0):\n Direction -= 1\n else:\n Direction += 1\n\n if(Direction < 0):\n Direction += 4\n elif(Direction > 3):\n Direction -= 4\n\n MoveRobot()\n\n Turning = not Turning\n\ncomputer.Run(RecieveOutput, SendInput)\n\nblackChar = u\"\\u25A0\"\nwhiteChar = u\"\\u25A1\"\n\nfor x in range(Corners[0][0] - 1, Corners[1][0] + 2):\n out = \"\"\n for y in range(Corners[0][1] - 1, Corners[1][1] + 2):\n if(Canvas[x, y] == 1):\n out += whiteChar\n else:\n out += blackChar\n print(out)\n time.sleep(0.2)\n"
] |
[
[
"numpy.zeros"
],
[
"numpy.full"
]
] |
oicr-gsi/dashi
|
[
"34bf96a7d447095df525df3ad27dbe10f4e3dde0"
] |
[
"application/dash_application/views/rnaseqc.py"
] |
[
"import dash_html_components as html\nimport dash_core_components as core\nfrom dash.dependencies import Input, Output\nfrom ..dash_id import init_ids\nimport pandas\nimport gsiqcetl.load\nfrom gsiqcetl.rnaseqqc.constants import CacheSchema\nfrom gsiqcetl.pinery.sampleprovenance.constants import (\n CacheSchema as SampleProvenanceCacheSchema,\n)\n\nfrom application.dash_application.plots.shiny_mimic import ShinyMimic\n\npage_name = \"rnaseqc/over_time\"\n\nRNA_DF = gsiqcetl.load.rnaseqqc(CacheSchema.v2)\nRNA_COL = gsiqcetl.load.rnaseqqc_columns(CacheSchema.v2)\n\nCOL_RUN_DATE = \"Run Date\"\nCOL_PROP_ALIGNED_BASES = \"Proportion Aligned Bases\"\n\n# The Run Name is used to extract the date\nRNA_DF[COL_RUN_DATE] = (\n RNA_DF[RNA_COL.SequencerRunName].dropna().apply(lambda x: x.split(\"_\")[0])\n)\n# Some runs do not have the proper format and will be excluded\nRNA_DF = RNA_DF[RNA_DF[COL_RUN_DATE].str.isnumeric()]\nRNA_DF[COL_RUN_DATE] = pandas.to_datetime(RNA_DF[COL_RUN_DATE], yearfirst=True)\n\nRNA_DF[COL_PROP_ALIGNED_BASES] = (\n RNA_DF[RNA_COL.PassedFilterAlignedBases] / RNA_DF[RNA_COL.PassedFilterBases]\n)\n\n# List projects for which RNA-Seq studies have been done\nALL_PROJECTS = RNA_DF[RNA_COL.StudyTitle].sort_values().unique()\n\n# Pull in meta data from Pinery\n# noinspection PyTypeChecker\nPINERY: pandas.DataFrame = gsiqcetl.load.pinery_sample_provenance(\n SampleProvenanceCacheSchema.v1\n)\nPINERY_COL = gsiqcetl.load.pinery_sample_provenance_columns(\n SampleProvenanceCacheSchema.v1\n)\n\nPINERY = PINERY[\n [\n PINERY_COL.SampleName,\n PINERY_COL.SequencerRunName,\n PINERY_COL.LaneNumber,\n PINERY_COL.PrepKit,\n PINERY_COL.LibrarySourceTemplateType,\n PINERY_COL.TissueOrigin,\n PINERY_COL.TissueType,\n PINERY_COL.TissuePreparation,\n ]\n]\n\nRNA_DF = RNA_DF.merge(\n PINERY,\n how=\"left\",\n left_on=[RNA_COL.SampleName, RNA_COL.SequencerRunName, RNA_COL.LaneNumber],\n right_on=[\n PINERY_COL.SampleName,\n PINERY_COL.SequencerRunName,\n PINERY_COL.LaneNumber,\n ],\n)\n\n# NaN kits need to be changed to a str. Use the existing Unspecified\nRNA_DF = RNA_DF.fillna({PINERY_COL.PrepKit: \"Unspecified\"})\nRNA_DF = RNA_DF.fillna({PINERY_COL.LibrarySourceTemplateType: \"Unknown\"})\n# NaN Tissue Origin is set to `nn`, which is used by MISO for unknown\nRNA_DF = RNA_DF.fillna({PINERY_COL.TissueOrigin: \"nn\"})\n# NaN Tissue Type is set to `n`, which is used by MISO for unknown\nRNA_DF = RNA_DF.fillna({PINERY_COL.TissueType: \"n\"})\nRNA_DF = RNA_DF.fillna({PINERY_COL.TissuePreparation: \"Unknown\"})\n\n# Kits used for RNA-Seq experiments\nALL_KITS = RNA_DF[PINERY_COL.PrepKit].sort_values().unique()\n\n# Which metrics can be plotted\nMETRICS_TO_GRAPH = (\n RNA_COL.ProportionUsableBases,\n RNA_COL.rRNAContaminationreadsaligned,\n RNA_COL.ProportionCorrectStrandReads,\n COL_PROP_ALIGNED_BASES,\n RNA_COL.ProportionCodingBases,\n RNA_COL.ProportionIntronicBases,\n RNA_COL.ProportionIntergenicBases,\n RNA_COL.ProportionUTRBases,\n)\n\n# Which columns will the data table always have\nDEFAULT_TABLE_COLUMN = [\n {\"name\": \"Library\", \"id\": RNA_COL.SampleName},\n {\"name\": \"Project\", \"id\": RNA_COL.StudyTitle},\n {\"name\": \"Run\", \"id\": RNA_COL.SequencerRunName},\n {\"name\": \"Lane\", \"id\": RNA_COL.LaneNumber},\n {\"name\": \"Kit\", \"id\": PINERY_COL.PrepKit},\n {\"name\": \"Library Design\", \"id\": PINERY_COL.LibrarySourceTemplateType},\n {\"name\": \"Tissue Origin\", \"id\": PINERY_COL.TissueOrigin},\n {\"name\": \"Tissue Type\", \"id\": PINERY_COL.TissueType},\n {\"name\": \"Tissue Material\", \"id\": PINERY_COL.TissuePreparation},\n]\n\n# Columns on which shape and colour can be set\nSHAPE_COLOUR_COLUMN = [\n {\"name\": \"Project\", \"id\": RNA_COL.StudyTitle},\n {\"name\": \"Kit\", \"id\": PINERY_COL.PrepKit},\n {\"name\": \"Library Design\", \"id\": PINERY_COL.LibrarySourceTemplateType},\n {\"name\": \"Tissue Origin\", \"id\": PINERY_COL.TissueOrigin},\n {\"name\": \"Tissue Type\", \"id\": PINERY_COL.TissueType},\n {\"name\": \"Tissue Material\", \"id\": PINERY_COL.TissuePreparation},\n]\n\nplot_creator = ShinyMimic(\n lambda: RNA_DF,\n \"rnaseqqc_over_time\",\n METRICS_TO_GRAPH,\n SHAPE_COLOUR_COLUMN,\n SHAPE_COLOUR_COLUMN,\n RNA_COL.StudyTitle,\n PINERY_COL.PrepKit,\n COL_RUN_DATE,\n RNA_COL.SampleName,\n)\n\nlayout = plot_creator.generate_layout(\n 4,\n RNA_COL.StudyTitle,\n PINERY_COL.PrepKit,\n DEFAULT_TABLE_COLUMN + [{\"name\": i, \"id\": i} for i in METRICS_TO_GRAPH],\n)\n\n\ndef init_callbacks(dash_app):\n plot_creator.assign_callbacks(dash_app)\n"
] |
[
[
"pandas.to_datetime"
]
] |
skphy/autodE
|
[
"fd80995206ac601299d2f78105d0fe4deee8c2cf"
] |
[
"tests/test_values.py"
] |
[
"import numpy as np\nfrom autode.units import ang, ha, ha_per_ang, ha_per_a0, ev\nfrom autode.values import (ValueArray, Gradient, Coordinate, Coordinates,\n MomentOfInertia)\n\n\nclass TmpValues(ValueArray):\n\n implemented_units = [ha, ev]\n\n def __repr__(self):\n return ''\n\n\ndef test_base_arr():\n\n tmp_values = TmpValues(np.arange(2))\n assert tmp_values.units is None\n\n tmp_values = TmpValues(np.arange(2), units=ha)\n assert tmp_values.units == ha\n\n for item in (None, 'a', 0, np.zeros(2)):\n\n # These are not the same! != calls __ne__\n assert not tmp_values == item\n assert tmp_values != item\n\n\ndef test_unit_retention():\n vals = TmpValues(np.array([0.1]), units=ev)\n assert vals.units == ev\n\n # Initialising an array from something with units should not default to the\n # default unit type (Hartrees for energies)\n vals1 = TmpValues(vals)\n assert vals1.units == ev\n\n\ndef test_coordinate():\n coord = Coordinate(0.0, 0.0, 0.0)\n assert coord.units == ang\n assert 'coord' in repr(coord).lower()\n\n assert coord is not None\n # Equality defaults to np.allclose\n assert coord == np.zeros(3)\n\n\ndef test_coordinates():\n\n arr = np.array([[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.1]])\n coords = Coordinates(arr)\n assert coords.units == ang\n\n # Coordinates should reshape into n_atoms x 3\n coords = Coordinates(arr.flatten())\n assert coords.shape == (2, 3)\n\n\ndef test_moi():\n\n moi = MomentOfInertia(np.zeros(shape=(3, 3)))\n assert 'i' in repr(moi).lower()\n\n\ndef test_gradients():\n\n # Default gradient units are Ha Å^-1\n gradients = Gradient(np.arange(2, dtype='f8'))\n assert gradients.units == ha_per_ang\n assert 'grad' in repr(gradients).lower()\n\n gradients_ha_a0 = gradients.to(ha_per_a0)\n\n # Energy per bohr is smaller than per angstrom..\n assert all(g1 - g2 <= 0 for g1, g2 in zip(gradients_ha_a0, gradients))\n\n\n"
] |
[
[
"numpy.array",
"numpy.arange",
"numpy.zeros"
]
] |
zmoon92/proplot
|
[
"2c6f7af8a044567bb9409d3f67d844bac05c7d14"
] |
[
"proplot/axes/plot.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\nThe plotting wrappers that add functionality to various `~matplotlib.axes.Axes`\nmethods. \"Wrapped\" `~matplotlib.axes.Axes` methods accept the additional keyword\narguments documented by the wrapper function. In a future version, these features will\nbe documented on the individual `~proplot.axes.Axes` methods themselves.\n\"\"\"\nimport functools\nimport inspect\nimport re\nimport sys\nfrom numbers import Integral, Number\n\nimport matplotlib.artist as martist\nimport matplotlib.axes as maxes\nimport matplotlib.cm as mcm\nimport matplotlib.colors as mcolors\nimport matplotlib.container as mcontainer\nimport matplotlib.contour as mcontour\nimport matplotlib.legend as mlegend\nimport matplotlib.patches as mpatches\nimport matplotlib.patheffects as mpatheffects\nimport matplotlib.text as mtext\nimport matplotlib.ticker as mticker\nimport matplotlib.transforms as mtransforms\nimport numpy as np\nimport numpy.ma as ma\n\nfrom .. import colors as pcolors\nfrom .. import constructor\nfrom ..config import rc\nfrom ..internals import ic # noqa: F401\nfrom ..internals import (\n _dummy_context,\n _flexible_getattr,\n _not_none,\n _state_context,\n docstring,\n warnings,\n)\nfrom ..utils import edges, edges2d, to_rgb, to_xyz, units\n\ntry:\n from cartopy.crs import PlateCarree\nexcept ModuleNotFoundError:\n PlateCarree = object\n\n__all__ = [\n 'bar_wrapper',\n 'barh_wrapper',\n 'boxplot_wrapper',\n 'cmap_changer',\n 'colorbar_wrapper',\n 'cycle_changer',\n 'default_latlon',\n 'default_transform',\n 'fill_between_wrapper',\n 'fill_betweenx_wrapper',\n # 'hist_wrapper', # very minor changes\n 'hlines_wrapper',\n 'indicate_error',\n 'legend_wrapper',\n # 'parametric_wrapper', # full documentation is on Axes method\n # 'plot_wrapper', # the only functionality provided by this wrapper is deprecated\n 'scatter_wrapper',\n 'standardize_1d',\n 'standardize_2d',\n # 'stem_wrapper', # very minor changes\n 'text_wrapper',\n 'violinplot_wrapper',\n 'vlines_wrapper',\n]\n\ndocstring.snippets['standardize.autoformat'] = \"\"\"\nautoformat : bool, optional\n Whether *x* axis labels, *y* axis labels, axis formatters, axes titles,\n colorbar labels, and legend labels are automatically configured when\n a `~pandas.Series`, `~pandas.DataFrame` or `~xarray.DataArray` is passed\n to the plotting command. Default is :rc:`autoformat`.\n\"\"\"\n\ndocstring.snippets['axes.cmap_changer'] = \"\"\"\ncmap : colormap spec, optional\n The colormap specifer, passed to the `~proplot.constructor.Colormap`\n constructor.\ncmap_kw : dict-like, optional\n Passed to `~proplot.constructor.Colormap`.\nnorm : normalizer spec, optional\n The colormap normalizer, used to warp data before passing it\n to `~proplot.colors.DiscreteNorm`. This is passed to the\n `~proplot.constructor.Norm` constructor.\nnorm_kw : dict-like, optional\n Passed to `~proplot.constructor.Norm`.\nvmin, vmax : float, optional\n Used to determine level locations if `levels` is an integer. Actual\n levels may not fall exactly on `vmin` and `vmax`, but the minimum\n level will be no smaller than `vmin` and the maximum level will be\n no larger than `vmax`. If `vmin` or `vmax` is not provided, the\n minimum and maximum data values are used.\nlevels, N : int or list of float, optional\n The number of level edges, or a list of level edges. If the former,\n `locator` is used to generate this many levels at \"nice\" intervals.\n If the latter, the levels should be monotonically increasing or\n decreasing (note that decreasing levels will only work with ``pcolor``\n plots, not ``contour`` plots). Default is :rc:`image.levels`.\n Note this means you can now discretize your colormap colors in a\n ``pcolor`` plot just like with ``contourf``.\nvalues : int or list of float, optional\n The number of level centers, or a list of level centers. If provided,\n levels are inferred using `~proplot.utils.edges`. This will override\n any `levels` input.\nsymmetric : bool, optional\n If ``True``, automatically generated levels are symmetric about zero.\npositive : bool, optional\n If ``True``, automatically generated levels are positive with a minimum at zero.\nnegative : bool, optional\n If ``True``, automatically generated levels are negative with a maximum at zero.\nnozero : bool, optional\n If ``True``, ``0`` is removed from the level list.\nlocator : locator-spec, optional\n The locator used to determine level locations if `levels` or `values`\n is an integer and `vmin` and `vmax` were not provided. Passed to the\n `~proplot.constructor.Locator` constructor. Default is\n `~matplotlib.ticker.MaxNLocator` with ``levels`` integer levels.\nlocator_kw : dict-like, optional\n Passed to `~proplot.constructor.Locator`.\n\"\"\"\n\n_area_docstring = \"\"\"\nSupports overlaying and stacking successive columns of data, and permits\nusing different colors for \"negative\" and \"positive\" regions.\n\nNote\n----\nThis function wraps `~matplotlib.axes.Axes.fill_between{suffix}` and\n`~proplot.axes.Axes.area{suffix}`.\n\nParameters\n----------\n*args : ({y}1,), ({x}, {y}1), or ({x}, {y}1, {y}2)\n The *{x}* and *{y}* coordinates. If `{x}` is not provided, it will be\n inferred from `{y}1`. If `{y}1` and `{y}2` are provided, this function\n will shade between respective columns of the arrays. The default value\n for `{y}2` is ``0``.\nstacked : bool, optional\n Whether to \"stack\" successive columns of the `{y}1` array. If this is\n ``True`` and `{y}2` was provided, it will be ignored.\nnegpos : bool, optional\n Whether to shade where `{y}1` is greater than `{y}2` with the color\n `poscolor`, and where `{y}1` is less than `{y}2` with the color\n `negcolor`. For example, to shade positive values red and negative values\n blue, use ``ax.fill_between{suffix}({x}, {y}, negpos=True)``.\nnegcolor, poscolor : color-spec, optional\n Colors to use for the negative and positive shaded regions. Ignored if `negpos`\n is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.\nwhere : ndarray, optional\n Boolean ndarray mask for points you want to shade. See `this example \\\n<https://matplotlib.org/3.1.0/gallery/pyplots/whats_new_98_4_fill_between.html#sphx-glr-gallery-pyplots-whats-new-98-4-fill-between-py>`__.\nlw, linewidth : float, optional\n The edge width for the area patches.\nedgecolor : color-spec, optional\n The edge color for the area patches.\n\nOther parameters\n----------------\n**kwargs\n Passed to `~matplotlib.axes.Axes.fill_between`.\n\"\"\"\ndocstring.snippets['axes.fill_between'] = _area_docstring.format(\n x='x', y='y', suffix='',\n)\ndocstring.snippets['axes.fill_betweenx'] = _area_docstring.format(\n x='y', y='x', suffix='x',\n)\n\n_bar_docstring = \"\"\"\nSupports grouping and stacking successive columns of data, and changes\nthe default bar style.\n\nNote\n----\nThis function wraps `~matplotlib.axes.Axes.bar{suffix}`.\n\nParameters\n----------\n{x}, {height}, width, {bottom} : float or list of float, optional\n The dimensions of the bars. If the *{x}* coordinates are not provided,\n they are set to ``np.arange(0, len(height))``. Note that the units\n for `width` are now *relative*.\norientation : {{'vertical', 'horizontal'}}, optional\n The orientation of the bars.\nvert : bool, optional\n Alternative to the `orientation` keyword arg. If ``False``, horizontal\n bars are drawn. This is for consistency with\n `~matplotlib.axes.Axes.boxplot` and `~matplotlib.axes.Axes.violinplot`.\nstacked : bool, optional\n Whether to stack columns of input data, or plot the bars side-by-side.\nnegpos : bool, optional\n Whether to shade bars greater than zero with `poscolor` and bars less\n than zero with `negcolor`.\nnegcolor, poscolor : color-spec, optional\n Colors to use for the negative and positive bars. Ignored if `negpos`\n is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.\nlw, linewidth : float, optional\n The edge width for the bar patches.\nedgecolor : color-spec, optional\n The edge color for the bar patches.\n\nOther parameters\n----------------\n**kwargs\n Passed to `~matplotlib.axes.Axes.bar{suffix}`.\n\"\"\"\ndocstring.snippets['axes.bar'] = _bar_docstring.format(\n x='x', height='height', bottom='bottom', suffix='',\n)\ndocstring.snippets['axes.barh'] = _bar_docstring.format(\n x='y', height='right', bottom='left', suffix='h',\n)\n\ndocstring.snippets['axes.lines'] = \"\"\"\nnegpos : bool, optional\n Whether to color lines greater than zero with `poscolor` and lines less\n than zero with `negcolor`.\nnegcolor, poscolor : color-spec, optional\n Colors to use for the negative and positive lines. Ignored if `negpos`\n is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.\n\"\"\"\n\n\ndef _concatenate_docstrings(func):\n \"\"\"\n Concatenate docstrings from a matplotlib axes method with a ProPlot axes\n method and obfuscate the call signature to avoid misleading users. Requires\n that ProPlot documentation has no \"other parameters\", notes, or examples\n sections.\n \"\"\"\n # NOTE: Originally had idea to use numpydoc.docscrape.NumpyDocString to\n # interpolate docstrings but *enormous* number of assupmtions would go into\n # this. And simple is better than complex.\n # Get matplotlib axes func\n # If current func has no docstring just blindly copy matplotlib one\n name = func.__name__\n orig = getattr(maxes.Axes, name)\n odoc = inspect.getdoc(orig)\n if not odoc: # should never happen\n return func\n\n # Prepend summary and potentially bail\n # TODO: Does this break anything on sphinx website?\n fdoc = inspect.getdoc(func) or '' # also dedents\n regex = re.search(r'\\.( | *\\n|\\Z)', odoc)\n if regex:\n fdoc = odoc[:regex.start() + 1] + '\\n\\n' + fdoc\n if rc['docstring.hardcopy']: # True when running sphinx\n func.__doc__ = fdoc\n return func\n\n # Obfuscate signature by converting to *args **kwargs. Note this does\n # not change behavior of function! Copy parameters from a dummy function\n # because I'm too lazy to figure out inspect.Parameters API\n # See: https://stackoverflow.com/a/33112180/4970632\n dsig = inspect.signature(lambda *args, **kwargs: None)\n fsig = inspect.signature(func)\n func.__signature__ = (\n fsig.replace(parameters=tuple(dsig.parameters.values()))\n )\n\n # Concatenate docstrings and copy summary\n # Make sure different sections are very visible\n doc = f\"\"\"\n================================{'=' * len(name)}\nproplot.axes.Axes.{name} documentation\n================================{'=' * len(name)}\n{fdoc}\n==================================={'=' * len(name)}\nmatplotlib.axes.Axes.{name} documentation\n==================================={'=' * len(name)}\n{odoc}\n\"\"\"\n func.__doc__ = doc\n\n # Return\n return func\n\n\ndef _load_objects():\n \"\"\"\n Delay loading expensive modules. We just want to detect if *input\n arrays* belong to these types -- and if this is the case, it means the\n module has already been imported! So, we only try loading these classes\n within autoformat calls. This saves >~500ms of import time.\n \"\"\"\n global DataArray, DataFrame, Series, Index, ndarray, ARRAY_TYPES\n ndarray = np.ndarray\n DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)\n DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)\n Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)\n Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)\n ARRAY_TYPES = (ndarray, DataArray, DataFrame, Series, Index)\n\n\n_load_objects()\n\n# Make keywords for styling cmap_changer-overridden plots *consistent*\n# TODO: Consider deprecating linewidth and linestyle interpretation. Think\n# these already have flexible interpretation for all plotting funcs.\nSTYLE_ARGS_TRANSLATE = {\n 'contour': {\n 'colors': 'colors',\n 'linewidths': 'linewidths',\n 'linestyles': 'linestyles',\n },\n 'tricontour': {\n 'colors': 'colors',\n 'linewidths': 'linewidths',\n 'linestyles': 'linestyles',\n },\n 'pcolor': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'pcolormesh': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'pcolorfast': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'tripcolor': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'parametric': {\n 'colors': 'color',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'hexbin': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidths',\n 'linestyles': 'linestyles',\n },\n 'hist2d': {\n 'colors': 'edgecolors',\n 'linewidths': 'linewidths',\n 'linestyles': 'linestyles',\n },\n 'barbs': {\n 'colors': 'barbcolor',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'quiver': { # NOTE: linewidth/linestyle apply to *arrow outline*\n 'colors': 'color',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'streamplot': {\n 'colors': 'color',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'spy': {\n 'colors': 'color',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'matshow': {\n 'colors': 'color',\n 'linewidths': 'linewidth',\n 'linestyles': 'linestyle',\n },\n 'imshow': None,\n}\n\n\ndef _is_number(data):\n \"\"\"\n Test whether input is numeric array rather than datetime or strings.\n \"\"\"\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)\n\n\ndef _is_string(data):\n \"\"\"\n Test whether input is array of strings.\n \"\"\"\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)\n\n\ndef _to_arraylike(data):\n \"\"\"\n Convert list of lists to array-like type.\n \"\"\"\n _load_objects()\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.array(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data\n\n\ndef _to_indexer(data):\n \"\"\"\n Return indexible attribute of array-like type.\n \"\"\"\n return getattr(data, 'iloc', data)\n\n\ndef _to_ndarray(data):\n \"\"\"\n Convert arbitrary input to ndarray cleanly.\n \"\"\"\n return np.atleast_1d(getattr(data, 'values', data))\n\n\ndef default_latlon(self, func, *args, latlon=True, **kwargs):\n \"\"\"\n Makes ``latlon=True`` the default for basemap plots.\n This means you no longer have to pass ``latlon=True`` if your data\n coordinates are longitude and latitude.\n\n Note\n ----\n This function wraps {methods} for `~proplot.axes.BasemapAxes`.\n \"\"\"\n return func(self, *args, latlon=latlon, **kwargs)\n\n\ndef default_transform(self, func, *args, transform=None, **kwargs):\n \"\"\"\n Makes ``transform=cartopy.crs.PlateCarree()`` the default\n for cartopy plots. This means you no longer have to\n pass ``transform=cartopy.crs.PlateCarree()`` if your data\n coordinates are longitude and latitude.\n\n Note\n ----\n This function wraps {methods} for `~proplot.axes.CartopyAxes`.\n \"\"\"\n # Apply default transform\n # TODO: Do some cartopy methods reset backgroundpatch or outlinepatch?\n # Deleted comment reported this issue\n if transform is None:\n transform = PlateCarree()\n result = func(self, *args, transform=transform, **kwargs)\n return result\n\n\ndef _axis_labels_title(data, axis=None, units=True):\n \"\"\"\n Get data and label for pandas or xarray objects or their coordinates along axis\n `axis`. If `units` is ``True`` also look for units on xarray data arrays.\n \"\"\"\n label = ''\n _load_objects()\n if isinstance(data, ndarray):\n if axis is not None and data.ndim > axis:\n data = np.arange(data.shape[axis])\n\n # Xarray with common NetCDF attribute names\n elif isinstance(data, DataArray):\n if axis is not None and data.ndim > axis:\n data = data.coords[data.dims[axis]]\n label = getattr(data, 'name', '') or ''\n for key in ('standard_name', 'long_name'):\n label = data.attrs.get(key, label)\n if units:\n units = data.attrs.get('units', '')\n if label and units:\n label = f'{label} ({units})'\n elif units:\n label = units\n\n # Pandas object with name attribute\n # if not label and isinstance(data, DataFrame) and data.columns.size == 1:\n elif isinstance(data, (DataFrame, Series, Index)):\n if axis == 0 and isinstance(data, Index):\n pass\n elif axis == 0 and isinstance(data, (DataFrame, Series)):\n data = data.index\n elif axis == 1 and isinstance(data, DataFrame):\n data = data.columns\n elif axis == 1 and isinstance(data, (Series, Index)):\n data = np.array([data.name]) # treat series name as the \"column\" data\n # DataFrame has no native name attribute but user can add one:\n # https://github.com/pandas-dev/pandas/issues/447\n label = getattr(data, 'name', '') or ''\n\n return data, str(label).strip()\n\n\n@docstring.add_snippets\ndef standardize_1d(self, func, *args, autoformat=None, **kwargs):\n \"\"\"\n Interpret positional arguments for the \"1D\" plotting methods so usage is\n consistent. Positional arguments are standardized as follows:\n\n * If a 2D array is passed, the corresponding plot command is called for\n each column of data (except for ``boxplot`` and ``violinplot``, in which\n case each column is interpreted as a distribution).\n * If *x* and *y* or *latitude* and *longitude* coordinates were not\n provided, and a `~pandas.DataFrame` or `~xarray.DataArray`, we\n try to infer them from the metadata. Otherwise,\n ``np.arange(0, data.shape[0])`` is used.\n\n Parameters\n ----------\n %(standardize.autoformat)s\n\n See also\n --------\n cycle_changer\n\n Note\n ----\n This function wraps {methods}\n \"\"\"\n # Sanitize input\n # TODO: Add exceptions for methods other than 'hist'?\n name = func.__name__\n autoformat = _not_none(autoformat, rc['autoformat'])\n _load_objects()\n if not args:\n return func(self, *args, **kwargs)\n elif len(args) == 1:\n x = None\n y, *args = args\n elif len(args) <= 4: # max signature is x, y, z, color\n x, y, *args = args\n else:\n raise ValueError(\n f'{name}() takes up to 4 positional arguments but {len(args)} was given.'\n )\n vert = kwargs.get('vert', None)\n if vert is not None:\n orientation = ('vertical' if vert else 'horizontal')\n else:\n orientation = kwargs.get('orientation', 'vertical')\n\n # Iterate through list of ys that we assume are identical\n # Standardize based on the first y input\n if len(args) >= 1 and 'fill_between' in name:\n ys, args = (y, args[0]), args[1:]\n else:\n ys = (y,)\n ys = [_to_arraylike(y) for y in ys]\n\n # Auto x coords\n y = ys[0] # test the first y input\n if x is None:\n axis = int(\n name in ('hist', 'boxplot', 'violinplot')\n or any(kwargs.get(s, None) for s in ('means', 'medians'))\n )\n x, _ = _axis_labels_title(y, axis=axis)\n x = _to_arraylike(x)\n if x.ndim != 1:\n raise ValueError(\n f'x coordinates must be 1-dimensional, but got {x.ndim}.'\n )\n\n # Auto formatting\n x_index = None # index version of 'x'\n if not hasattr(self, 'projection'):\n # First handle string-type x-coordinates\n kw = {}\n xname = 'y' if orientation == 'horizontal' else 'x'\n yname = 'x' if xname == 'y' else 'y'\n if _is_string(x):\n if name in ('hist',):\n kwargs.setdefault('labels', list(x))\n else:\n x_index = np.arange(len(x))\n kw[xname + 'locator'] = mticker.FixedLocator(x_index)\n kw[xname + 'formatter'] = mticker.IndexFormatter(x)\n kw[xname + 'minorlocator'] = mticker.NullLocator()\n if name == 'boxplot': # otherwise IndexFormatter is overridden\n kwargs['labels'] = x\n\n # Next handle labels if 'autoformat' is on\n # NOTE: Do not overwrite existing labels!\n if autoformat:\n # Ylabel\n y, label = _axis_labels_title(y)\n iname = xname if name in ('hist',) else yname\n if label and not getattr(self, f'get_{iname}label')():\n # For histograms, this label is used for *x* coordinates\n kw[iname + 'label'] = label\n if name not in ('hist',):\n # Xlabel\n x, label = _axis_labels_title(x)\n if label and not getattr(self, f'get_{xname}label')():\n kw[xname + 'label'] = label\n # Reversed axis\n if name not in ('scatter',):\n if x_index is None and len(x) > 1 and x[1] < x[0]:\n kw[xname + 'reverse'] = True\n\n # Appply\n if kw:\n self.format(**kw)\n\n # Standardize args\n if x_index is not None:\n x = x_index\n if name in ('boxplot', 'violinplot'):\n ys = [_to_ndarray(yi) for yi in ys] # store naked array\n kwargs['positions'] = x\n\n # Basemap shift x coordiantes without shifting y, we fix this!\n if getattr(self, 'name', '') == 'basemap' and kwargs.get('latlon', None):\n ix, iys = x, []\n xmin, xmax = self.projection.lonmin, self.projection.lonmax\n for y in ys:\n # Ensure data is monotonic and falls within map bounds\n ix, iy = _enforce_bounds(*_fix_latlon(x, y), xmin, xmax)\n iys.append(iy)\n x, ys = ix, iys\n\n # WARNING: For some functions, e.g. boxplot and violinplot, we *require*\n # cycle_changer is also applied so it can strip 'x' input.\n with rc.context(autoformat=autoformat):\n return func(self, x, *ys, *args, **kwargs)\n\n\ndef _enforce_bounds(x, y, xmin, xmax):\n \"\"\"\n Ensure data for basemap plots is restricted between the minimum and\n maximum longitude of the projection. Input is the ``x`` and ``y``\n coordinates. The ``y`` coordinates are rolled along the rightmost axis.\n \"\"\"\n if x.ndim != 1:\n return x, y\n # Roll in same direction if some points on right-edge extend\n # more than 360 above min longitude; *they* should be on left side\n lonroll = np.where(x > xmin + 360)[0] # tuple of ids\n if lonroll.size: # non-empty\n roll = x.size - lonroll.min()\n x = np.roll(x, roll)\n y = np.roll(y, roll, axis=-1)\n x[:roll] -= 360 # make monotonic\n\n # Set NaN where data not in range xmin, xmax. Must be done\n # for regional smaller projections or get weird side-effects due\n # to having valid data way outside of the map boundaries\n y = y.copy()\n if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges\n y[..., (x[1:] < xmin) | (x[:-1] > xmax)] = np.nan\n elif x.size == y.shape[-1]: # test the centers and pad by one for safety\n where = np.where((x < xmin) | (x > xmax))[0]\n y[..., where[1:-1]] = np.nan\n return x, y\n\n\ndef _fix_latlon(x, y):\n \"\"\"\n Ensure longitudes are monotonic and make `~numpy.ndarray` copies so the\n contents can be modified. Ignores 2D coordinate arrays.\n \"\"\"\n # Sanitization and bail if 2d\n if x.ndim == 1:\n x = ma.array(x)\n if y.ndim == 1:\n y = ma.array(y)\n if x.ndim != 1 or all(x < x[0]): # skip monotonic backwards data\n return x, y\n # Enforce monotonic longitudes\n lon1 = x[0]\n while True:\n filter_ = (x < lon1)\n if filter_.sum() == 0:\n break\n x[filter_] += 360\n return x, y\n\n\ndef _interp_poles(y, Z):\n \"\"\"\n Add data points on the poles as the average of highest latitude data.\n \"\"\"\n # Get means\n with np.errstate(all='ignore'):\n p1 = Z[0, :].mean() # pole 1, make sure is not 0D DataArray!\n p2 = Z[-1, :].mean() # pole 2\n if hasattr(p1, 'item'):\n p1 = np.asscalar(p1) # happens with DataArrays\n if hasattr(p2, 'item'):\n p2 = np.asscalar(p2)\n # Concatenate\n ps = (-90, 90) if (y[0] < y[-1]) else (90, -90)\n Z1 = np.repeat(p1, Z.shape[1])[None, :]\n Z2 = np.repeat(p2, Z.shape[1])[None, :]\n y = ma.concatenate((ps[:1], y, ps[1:]))\n Z = ma.concatenate((Z1, Z, Z2), axis=0)\n return y, Z\n\n\n@docstring.add_snippets\ndef standardize_2d(\n self, func, *args, autoformat=None, order='C', globe=False, **kwargs\n):\n \"\"\"\n Interpret positional arguments for the \"2D\" plotting methods so usage is\n consistent. Positional arguments are standardized as follows:\n\n * If *x* and *y* or *latitude* and *longitude* coordinates were not\n provided, and a `~pandas.DataFrame` or `~xarray.DataArray` is passed, we\n try to infer them from the metadata. Otherwise, ``np.arange(0, data.shape[0])``\n and ``np.arange(0, data.shape[1])`` are used.\n * For ``pcolor`` and ``pcolormesh``, coordinate *edges* are calculated\n if *centers* were provided. For all other methods, coordinate *centers*\n are calculated if *edges* were provided.\n\n Parameters\n ----------\n %(standardize.autoformat)s\n order : {{'C', 'F'}}, optional\n If ``'C'``, arrays should be shaped ``(y, x)``. If ``'F'``, arrays\n should be shaped ``(x, y)``. Default is ``'C'``.\n globe : bool, optional\n Whether to ensure global coverage for `~proplot.axes.GeoAxes` plots.\n Default is ``False``. When set to ``True`` this does the following:\n\n #. Interpolates input data to the North and South poles by setting the data\n values at the poles to the mean from latitudes nearest each pole.\n #. Makes meridional coverage \"circular\", i.e. the last longitude coordinate\n equals the first longitude coordinate plus 360\\N{DEGREE SIGN}.\n #. For `~proplot.axes.BasemapAxes`, 1D longitude vectors are also cycled to\n fit within the map edges. For example, if the projection central longitude\n is 90\\N{DEGREE SIGN}, the data is shifted so that it spans -90\\N{DEGREE SIGN}\n to 270\\N{DEGREE SIGN}.\n\n See also\n --------\n cmap_changer\n\n Note\n ----\n This function wraps {methods}\n \"\"\"\n # Sanitize input\n name = func.__name__\n autoformat = _not_none(autoformat, rc['autoformat'])\n _load_objects()\n if not args:\n return func(self, *args, **kwargs)\n elif len(args) > 5:\n raise ValueError(\n f'{name}() takes up to 5 positional arguments but {len(args)} was given.'\n )\n x, y = None, None\n if len(args) > 2:\n x, y, *args = args\n\n # Ensure DataArray, DataFrame or ndarray\n Zs = []\n for Z in args:\n Z = _to_arraylike(Z)\n if Z.ndim != 2:\n raise ValueError(f'Z must be 2-dimensional, got shape {Z.shape}.')\n Zs.append(Z)\n if not all(Zs[0].shape == Z.shape for Z in Zs):\n raise ValueError(\n f'Zs must be same shape, got shapes {[Z.shape for Z in Zs]}.'\n )\n\n # Retrieve coordinates\n if x is None and y is None:\n Z = Zs[0]\n if order == 'C':\n idx, idy = 1, 0\n else:\n idx, idy = 0, 1\n # x = np.arange(Z.shape[idx])\n # y = np.arange(Z.shape[idy])\n if isinstance(Z, ndarray):\n x = np.arange(Z.shape[idx])\n y = np.arange(Z.shape[idy])\n elif isinstance(Z, DataArray): # DataArray\n x = Z.coords[Z.dims[idx]]\n y = Z.coords[Z.dims[idy]]\n else: # DataFrame; never Series or Index because these are 1d\n if order == 'C':\n x = Z.columns\n y = Z.index\n else:\n x = Z.index\n y = Z.columns\n\n # Optionally re-order\n # TODO: Double check this\n if order == 'F':\n x, y = x.T, y.T # in case they are 2-dimensional\n Zs = tuple(Z.T for Z in Zs)\n elif order != 'C':\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n\n # Check coordinates\n x, y = _to_arraylike(x), _to_arraylike(y)\n if x.ndim != y.ndim:\n raise ValueError(\n f'x coordinates are {x.ndim}-dimensional, '\n f'but y coordinates are {y.ndim}-dimensional.'\n )\n for s, array in zip(('x', 'y'), (x, y)):\n if array.ndim not in (1, 2):\n raise ValueError(\n f'{s} coordinates are {array.ndim}-dimensional, '\n f'but must be 1 or 2-dimensional.'\n )\n\n # Auto axis labels\n # TODO: Check whether isinstance(GeoAxes) instead of checking projection attribute\n kw = {}\n xi = yi = None\n if not hasattr(self, 'projection'):\n # First handle string-type x and y-coordinates\n if _is_string(x):\n xi = np.arange(len(x))\n kw['xlocator'] = mticker.FixedLocator(xi)\n kw['xformatter'] = mticker.IndexFormatter(x)\n kw['xminorlocator'] = mticker.NullLocator()\n if _is_string(y):\n yi = np.arange(len(y))\n kw['ylocator'] = mticker.FixedLocator(yi)\n kw['yformatter'] = mticker.IndexFormatter(y)\n kw['yminorlocator'] = mticker.NullLocator()\n\n # Handle labels if 'autoformat' is on\n # NOTE: Do not overwrite existing labels!\n if autoformat:\n for key, xy in zip(('xlabel', 'ylabel'), (x, y)):\n # Axis label\n _, label = _axis_labels_title(xy)\n if label and not getattr(self, f'get_{key}')():\n kw[key] = label\n # Reversed axis\n if (\n len(xy) > 1\n and all(isinstance(xy, Number) for xy in xy[:2])\n and xy[1] < xy[0]\n ):\n kw[key[0] + 'reverse'] = True\n if kw:\n self.format(**kw)\n\n # Use *index coordinates* from here on out if input was array of strings\n if xi is not None:\n x = xi\n if yi is not None:\n y = yi\n\n # Auto axes title and colorbar label\n # NOTE: Do not overwrite existing title!\n # NOTE: Must apply default colorbar label *here* rather than in\n # cmap_changer in case metadata is stripped by globe=True.\n colorbar_kw = kwargs.pop('colorbar_kw', None) or {}\n if autoformat:\n _, colorbar_label = _axis_labels_title(Zs[0], units=True)\n colorbar_kw.setdefault('label', colorbar_label)\n kwargs['colorbar_kw'] = colorbar_kw\n\n # Enforce edges\n if name in ('pcolor', 'pcolormesh', 'pcolorfast'):\n Z = Zs[0] # already enforced that shapes must be identical (see above)\n xlen, ylen = x.shape[-1], y.shape[0]\n if Z.ndim != 2:\n raise ValueError(\n f'Input arrays must be 2D, instead got shape {Z.shape}.'\n )\n elif Z.shape[1] == xlen and Z.shape[0] == ylen:\n # Get edges given centers\n if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):\n x = edges(x)\n y = edges(y)\n else:\n if (\n x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1\n and _is_number(x)\n ):\n x = edges2d(x)\n if (\n y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1\n and _is_number(y)\n ):\n y = edges2d(y)\n elif Z.shape[1] != xlen - 1 or Z.shape[0] != ylen - 1:\n raise ValueError(\n f'Input shapes x {x.shape} and y {y.shape} must match '\n f'Z centers {Z.shape} or '\n f'Z borders {tuple(i+1 for i in Z.shape)}.'\n )\n\n # Enforce centers\n else:\n Z = Zs[0] # already enforced that shapes must be identical (see above)\n xlen, ylen = x.shape[-1], y.shape[0]\n if Z.ndim != 2:\n raise ValueError(\n f'Input arrays must be 2d, instead got shape {Z.shape}.'\n )\n elif Z.shape[1] == xlen - 1 and Z.shape[0] == ylen - 1:\n # Get centers given edges.\n if all(z.ndim == 1 and z.size > 1 and _is_number(z) for z in (x, y)):\n x = 0.5 * (x[1:] + x[:-1])\n y = 0.5 * (y[1:] + y[:-1])\n else:\n if (\n x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1\n and _is_number(x)\n ):\n x = 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])\n if (\n y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1\n and _is_number(y)\n ):\n y = 0.25 * (y[:-1, :-1] + y[:-1, 1:] + y[1:, :-1] + y[1:, 1:])\n elif Z.shape[1] != xlen or Z.shape[0] != ylen:\n raise ValueError(\n f'Input shapes x {x.shape} and y {y.shape} '\n f'must match Z centers {Z.shape} '\n f'or Z borders {tuple(i+1 for i in Z.shape)}.'\n )\n\n # Cartopy projection axes\n if (\n getattr(self, 'name', '') == 'cartopy'\n and isinstance(kwargs.get('transform', None), PlateCarree)\n ):\n x, y = _fix_latlon(x, y)\n ix, iZs = x, []\n for Z in Zs:\n if globe and x.ndim == 1 and y.ndim == 1:\n # Fix holes over poles by *interpolating* there\n y, Z = _interp_poles(y, Z)\n\n # Fix seams by ensuring circular coverage. Unlike basemap,\n # cartopy can plot across map edges.\n if x[0] % 360 != (x[-1] + 360) % 360:\n ix = ma.concatenate((x, [x[0] + 360]))\n Z = ma.concatenate((Z, Z[:, :1]), axis=1)\n iZs.append(Z)\n x, Zs = ix, iZs\n\n # Basemap projection axes\n elif getattr(self, 'name', '') == 'basemap' and kwargs.get('latlon', None):\n # Fix grid\n xmin, xmax = self.projection.lonmin, self.projection.lonmax\n x, y = _fix_latlon(x, y)\n ix, iZs = x, []\n for Z in Zs:\n # Ensure data is within map bounds\n ix, Z = _enforce_bounds(x, Z, xmin, xmax)\n\n # Globe coverage fixes\n if globe and ix.ndim == 1 and y.ndim == 1:\n # Fix holes over poles by interpolating there (equivalent to\n # simple mean of highest/lowest latitude points)\n y, Z = _interp_poles(y, Z)\n\n # Fix seams at map boundary; 3 scenarios here:\n # Have edges (e.g. for pcolor), and they fit perfectly against\n # basemap seams. Does not augment size.\n if ix[0] == xmin and ix.size - 1 == Z.shape[1]:\n pass # do nothing\n # Have edges (e.g. for pcolor), and the projection edge is\n # in-between grid cell boundaries. Augments size by 1.\n elif ix.size - 1 == Z.shape[1]: # just add grid cell\n ix = ma.append(xmin, ix)\n ix[-1] = xmin + 360\n Z = ma.concatenate((Z[:, -1:], Z), axis=1)\n # Have centers (e.g. for contourf), and we need to interpolate\n # to left/right edges of the map boundary. Augments size by 2.\n elif ix.size == Z.shape[1]:\n xi = np.array([ix[-1], ix[0] + 360]) # x\n if xi[0] != xi[1]:\n Zq = ma.concatenate((Z[:, -1:], Z[:, :1]), axis=1)\n xq = xmin + 360\n Zq = (\n Zq[:, :1] * (xi[1] - xq) + Zq[:, 1:] * (xq - xi[0])\n ) / (xi[1] - xi[0])\n ix = ma.concatenate(([xmin], ix, [xmin + 360]))\n Z = ma.concatenate((Zq, Z, Zq), axis=1)\n else:\n raise ValueError(\n 'Unexpected shape of longitude, latitude, and/or data array(s).'\n )\n iZs.append(Z)\n x, Zs = ix, iZs\n\n # Convert to projection coordinates\n if x.ndim == 1 and y.ndim == 1:\n x, y = np.meshgrid(x, y)\n x, y = self.projection(x, y)\n kwargs['latlon'] = False\n\n # Finally return result\n with rc.context(autoformat=autoformat):\n return func(self, x, y, *Zs, **kwargs)\n\n\ndef _get_error_data(\n data, y, errdata=None, stds=None, pctiles=False,\n stds_default=None, pctiles_default=None,\n means_or_medians=True, absolute=False, label=False,\n):\n \"\"\"\n Return values that can be passed to the `~matplotlib.axes.Axes.errorbar`\n `xerr` and `yerr` keyword args.\n \"\"\"\n # Parse arguments\n # NOTE: Have to guard against \"truth value of an array is ambiguous\" errors\n if not isinstance(stds, ARRAY_TYPES):\n if stds in (1, True):\n stds = stds_default\n elif stds in (0, False):\n stds = None\n if not isinstance(pctiles, ARRAY_TYPES):\n if pctiles in (1, True):\n pctiles = pctiles_default\n elif pctiles in (0, False):\n pctiles = None\n\n # Incompatible settings\n if stds is not None and pctiles is not None:\n warnings._warn_proplot(\n 'You passed both a standard deviation range and a percentile range for '\n 'drawing error indicators. Using the former.'\n )\n pctiles = None\n if not means_or_medians and (stds is not None or pctiles is not None):\n raise ValueError(\n 'To automatically compute standard deviations or percentiles on columns '\n 'of data you must pass means=True or medians=True.'\n )\n if means_or_medians and errdata is not None:\n stds = pctiles = None\n warnings._warn_proplot(\n 'You explicitly provided the error bounds but also requested '\n 'automatically calculating means or medians on data columns. '\n 'It may make more sense to use the \"stds\" or \"pctiles\" keyword args '\n 'and have *proplot* calculate the error bounds.'\n )\n\n # Compute error data in format that can be passed to matplotlib.axes.Axes.errorbar()\n # NOTE: Include option to pass symmetric deviation from central points\n y = _to_ndarray(y)\n data = _to_ndarray(data)\n if errdata is not None:\n label_default = 'error range'\n err = _to_ndarray(errdata)\n if (\n err.ndim not in (1, 2)\n or err.shape[-1] != y.shape[-1]\n or err.ndim == 2 and err.shape[0] != 2\n ):\n raise ValueError(\n f'errdata must have shape (2, {y.shape[-1]}), but got {err.shape}.'\n )\n if err.ndim == 1:\n abserr = err\n err = np.empty((2, err.size))\n err[0, :] = y - abserr # translated back to absolute deviations below\n err[1, :] = y + abserr\n elif stds is not None:\n label_default = fr'{stds[1]}$\\sigma$ range'\n err = y + np.std(data, axis=0)[None, :] * np.asarray(stds)[:, None]\n elif pctiles is not None:\n label_default = f'{pctiles[0]}-{pctiles[1]} percentile range'\n err = np.percentile(data, pctiles, axis=0)\n else:\n raise ValueError('You must provide error bounds.')\n if label == True: # noqa: E712 e.g. 1, 1.0, True\n label = label_default\n elif not label:\n label = None\n if not absolute:\n err = err - y\n err[0, :] *= -1 # absolute deviations from central points\n\n # Return data with default legend entry\n return err, label\n\n\ndef _deprecate_add_errorbars(func):\n \"\"\"\n Translate old-style keyword arguments to new-style in way that is too complex\n for _rename_kwargs. Use a decorator to avoid call signature pollution.\n \"\"\"\n @functools.wraps(func)\n def wrapper(\n *args,\n bars=None, boxes=None, barstd=None, boxstd=None, barrange=None, boxrange=None,\n **kwargs\n ):\n for (prefix, b, std, span) in zip(\n ('bar', 'box'), (bars, boxes), (barstd, boxstd), (barrange, boxrange),\n ):\n if b is not None or std is not None or span is not None:\n warnings._warn_proplot(\n f\"Keyword args '{prefix}s', '{prefix}std', and '{prefix}range' \"\n 'are deprecated and will be removed in a future version. '\n f\"Please use '{prefix}stds' or '{prefix}pctiles' instead.\"\n )\n if span is None and b: # means 'use the default range'\n span = b\n if std:\n kwargs.setdefault(prefix + 'stds', span)\n else:\n kwargs.setdefault(prefix + 'pctiles', span)\n return func(*args, **kwargs)\n return wrapper\n\n\n@_deprecate_add_errorbars\ndef indicate_error(\n self, func, *args,\n medians=False, means=False,\n boxdata=None, bardata=None, shadedata=None, fadedata=None,\n boxstds=None, barstds=None, shadestds=None, fadestds=None,\n boxpctiles=None, barpctiles=None, shadepctiles=None, fadepctiles=None,\n boxmarker=True, boxmarkercolor='white',\n boxcolor=None, barcolor=None, shadecolor=None, fadecolor=None,\n shadelabel=False, fadelabel=False, shadealpha=0.4, fadealpha=0.2,\n boxlinewidth=None, boxlw=None, barlinewidth=None, barlw=None, capsize=None,\n boxzorder=2.5, barzorder=2.5, shadezorder=1.5, fadezorder=1.5,\n **kwargs\n):\n \"\"\"\n Adds support for drawing error bars and error shading on-the-fly.\n Includes options for interpreting columns of data as *samples*,\n representing the mean or median of each sample with lines, points, or\n bars, and drawing error bars representing percentile ranges or standard\n deviation multiples for each sample. Also supports specifying error\n bar data explicitly.\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n *args\n The input data.\n means : bool, optional\n Whether to plot the means of each column in the input data.\n medians : bool, optional\n Whether to plot the medians of each column in the input data.\n barstds : (float, float) or bool, optional\n Standard deviation multiples for *thin error bars* with optional whiskers\n (i.e. caps). If ``True``, the default standard deviation multiples ``(-3, 3)``\n are used. This argument is only valid if `means` or `medians` is ``True``.\n barpctiles : (float, float) or bool, optional\n As with `barstds`, but instead using *percentiles* for the error bars.\n The percentiles are calculated with `numpy.percentile`. If ``True``, the\n default percentiles ``(0, 100)`` are used.\n bardata : 2 x N array or 1D array, optional\n If shape is 2 x N these are the lower and upper bounds for the thin error bars.\n If array is 1D these are the absolute, symmetric deviations from the central\n points. This should be used if `means` and `medians` are both ``False`` (i.e.\n you did not provide dataset columns from which statistical properties can be\n calculated automatically).\n boxstds, boxpctiles, boxdata : optional\n As with `barstds`, `barpctiles`, and `bardata`, but for *thicker error bars*\n representing a smaller interval than the thin error bars. If `boxstds` is\n ``True``, the default standard deviation multiples ``(-1, 1)`` are used.\n If `boxpctiles` is ``True``, the default percentile multiples ``(25, 75)``\n are used (i.e. the interquartile range). When boxes and bars are combined, this\n has the effect of drawing miniature box-and-whisker plots.\n shadestds, shadepctiles, shadedata : optional\n As with `barstds`, `barpctiles`, and `bardata`, but using *shading* to indicate\n the error range. If `shadestds` is ``True``, the default standard deviation\n multiples ``(-2, 2)`` are used. If `shadepctiles` is ``True``, the default\n percentile multiples ``(10, 90)`` are used. Shading is generally useful for\n `~matplotlib.axes.Axes.plot` plots and not `~matplotlib.axes.Axes.bar` plots.\n fadestds, fadepctiles, fadedata : optional\n As with `shadestds`, `shadepctiles`, and `shadedata`, but for an additional,\n more faded, *secondary* shaded region. If `fadestds` is ``True``, the default\n standard deviation multiples ``(-3, 3)`` are used. If `fadepctiles` is ``True``,\n the default percentile multiples ``(0, 100)`` are used.\n barcolor, boxcolor, shadecolor, fadecolor : color-spec, optional\n Colors for the different error indicators. For error bars, the default is\n ``'k'``. For shading, the default behavior is to inherit color from the\n primary `~matplotlib.artist.Artist`.\n shadelabel, fadelabel : bool or str, optional\n Labels for the shaded regions to be used as separate legend entries. To toggle\n labels \"on\" and apply a *default* label, use e.g. ``shadelabel=True``. To apply\n a *custom* label, use e.g. ``shadelabel='label'``. Otherwise, the shading is\n drawn underneath the line and/or marker in the legend entry.\n barlinewidth, boxlinewidth, barlw, boxlw : float, optional\n Line widths for the thin and thick error bars, in points. The defaults\n are ``barlw=0.8`` and ``boxlw=4 * barlw``.\n boxmarker : bool, optional\n Whether to draw a small marker in the middle of the box denoting the mean or\n median position. Ignored if `boxes` is ``False``. Default is ``True``.\n boxmarkercolor : color-spec, optional\n Color for the `boxmarker` marker. Default is ``'w'``.\n capsize : float, optional\n The cap size for thin error bars in points.\n barzorder, boxzorder, shadezorder, fadezorder : float, optional\n The \"zorder\" for the thin error bars, thick error bars, and shading.\n\n Returns\n -------\n h, err1, err2, ...\n The original plot object and the error bar or shading objects.\n \"\"\"\n name = func.__name__\n x, data, *args = args\n x = _to_arraylike(x)\n data = _to_arraylike(data)\n\n # Get means or medians for plotting\n # NOTE: We can *only* use pctiles and stds if one of these was true\n # TODO: Add support for 3D arrays.\n y = data\n bars = any(_ is not None for _ in (barstds, barpctiles, bardata))\n boxes = any(_ is not None for _ in (boxstds, boxpctiles, boxdata))\n shading = any(_ is not None for _ in (shadestds, shadepctiles, shadedata))\n fading = any(_ is not None for _ in (fadestds, fadepctiles, fadedata))\n if means or medians:\n # Take means or medians while preserving metadata for legends\n # NOTE: Permit 3d array with error dimension coming first\n if not (bars or boxes or shading or fading):\n bars = boxes = True # toggle these on\n barstds = boxstds = True # error bars and boxes with default stdev ranges\n if data.ndim != 2:\n raise ValueError(\n f'Need 2D data array for means=True or medians=True, '\n f'got {data.ndim}D array.'\n )\n keep = {}\n if DataArray is not ndarray and isinstance(data, DataArray):\n keep['keep_attrs'] = True\n if means:\n y = data.mean(axis=0, **keep)\n elif medians:\n if hasattr(data, 'quantile'): # DataFrame and DataArray\n y = data.quantile(0.5, axis=0, **keep)\n if Series is not ndarray and isinstance(y, Series):\n y.name = '' # do not set name to quantile number\n else:\n y = np.percentile(data, 50, axis=0, **keep)\n if getattr(data, 'name', '') and not getattr(y, 'name', ''):\n y.name = data.name # copy DataFrame name to Series name\n\n # Infer width of error elements\n # NOTE: violinplot_wrapper passes some invalid keyword args with expectation\n # that indicate_error wrapper pops them and uses them for error bars.\n lw = None\n if name == 'bar':\n lw = _not_none(kwargs.get('linewidth', None), kwargs.get('lw', None))\n elif name == 'violinplot':\n lw = _not_none(kwargs.pop('linewidth', None), kwargs.pop('lw', None))\n lw = _not_none(lw, 0.8)\n barlw = _not_none(barlinewidth=barlinewidth, barlw=barlw, default=lw)\n boxlw = _not_none(boxlinewidth=boxlinewidth, boxlw=boxlw, default=4 * barlw)\n capsize = _not_none(capsize, 3.0)\n\n # Infer color for error bars\n edgecolor = None\n if name == 'bar':\n edgecolor = kwargs.get('edgecolor', None)\n elif name == 'violinplot':\n edgecolor = kwargs.pop('edgecolor', None)\n edgecolor = _not_none(edgecolor, 'k')\n barcolor = _not_none(barcolor, edgecolor)\n boxcolor = _not_none(boxcolor, barcolor)\n\n # Infer color for shading\n shadecolor_infer = shadecolor is None\n shadecolor = _not_none(\n shadecolor, kwargs.get('color', None), kwargs.get('facecolor', None), edgecolor\n )\n fadecolor_infer = fadecolor is None\n fadecolor = _not_none(fadecolor, shadecolor)\n\n # Draw dark and light shading\n vert = kwargs.get('vert', kwargs.get('orientation', 'vertical') == 'vertical')\n axis = 'y' if vert else 'x' # yerr\n errargs = (x, y) if vert else (y, x)\n errobjs = []\n means_or_medians = means or medians\n if fading:\n err, label = _get_error_data(\n data, y, fadedata, fadestds, fadepctiles,\n stds_default=(-3, 3), pctiles_default=(0, 100), absolute=True,\n means_or_medians=means_or_medians, label=fadelabel,\n )\n errfunc = self.fill_between if vert else self.fill_betweenx\n errobj = errfunc(\n x, *err, linewidth=0, color=fadecolor,\n alpha=fadealpha, zorder=fadezorder,\n )\n errobj.set_label(label)\n errobjs.append(errobj)\n if shading:\n err, label = _get_error_data(\n data, y, shadedata, shadestds, shadepctiles,\n stds_default=(-2, 2), pctiles_default=(10, 90), absolute=True,\n means_or_medians=means_or_medians, label=shadelabel,\n )\n errfunc = self.fill_between if vert else self.fill_betweenx\n errobj = errfunc(\n x, *err, linewidth=0, color=shadecolor,\n alpha=shadealpha, zorder=shadezorder,\n )\n errobj.set_label(label) # shadelabel=False\n errobjs.append(errobj)\n\n # Draw thin error bars and thick error boxes\n if boxes:\n err, label = _get_error_data(\n data, y, boxdata, boxstds, boxpctiles,\n stds_default=(-1, 1), pctiles_default=(25, 75),\n means_or_medians=means_or_medians,\n )\n if boxmarker:\n self.scatter(*errargs, s=boxlw, marker='o', color=boxmarkercolor, zorder=5)\n errkw = {axis + 'err': err}\n errobj = self.errorbar(\n *errargs, color=boxcolor, linewidth=boxlw, linestyle='none',\n capsize=0, zorder=boxzorder, **errkw,\n )\n errobjs.append(errobj)\n if bars: # now impossible to make thin bar width different from cap width!\n err, label = _get_error_data(\n data, y, bardata, barstds, barpctiles,\n stds_default=(-3, 3), pctiles_default=(0, 100),\n means_or_medians=means_or_medians,\n )\n errkw = {axis + 'err': err}\n errobj = self.errorbar(\n *errargs, color=barcolor, linewidth=barlw, linestyle='none',\n markeredgecolor=barcolor, markeredgewidth=barlw,\n capsize=capsize, zorder=barzorder, **errkw\n )\n errobjs.append(errobj)\n\n # Call main function\n # NOTE: Provide error objects for inclusion in legend, but *only* provide\n # the shading. Never want legend entries for error bars.\n xy = (x, data) if name == 'violinplot' else (x, y)\n kwargs.setdefault('errobjs', errobjs[:int(shading + fading)])\n result = obj = func(self, *xy, *args, **kwargs)\n\n # Apply inferrred colors to objects\n if type(result) in (tuple, list): # avoid BarContainer\n obj = result[0]\n i = 0\n for b, infer in zip((fading, shading), (fadecolor_infer, shadecolor_infer)):\n if b and infer:\n if hasattr(obj, 'get_facecolor'):\n color = obj.get_facecolor()\n elif hasattr(obj, 'get_color'):\n color = obj.get_color()\n else:\n color = None\n if color is not None:\n errobjs[i].set_facecolor(color)\n i += 1\n\n # Return objects\n # NOTE: This should not affect internal matplotlib calls to these funcs\n # NOTE: Avoid expanding matplolib collections that are list subclasses here\n if errobjs:\n if type(result) in (tuple, list): # e.g. result of plot\n return (*result, *errobjs)\n else:\n return (result, *errobjs)\n else:\n return result\n\n\ndef parametric_wrapper(self, func, *args, interp=0, **kwargs):\n \"\"\"\n Calls `~proplot.axes.Axes.parametric` and optionally interpolates values before\n they get passed to `cmap_changer` and the colormap boundaries are drawn.\n \"\"\"\n # Parse input arguments\n # NOTE: This wrapper is required so that\n # WARNING: So far this only works for 1D *x* and *y* coordinates.\n # Cannot draw multiple colormap lines at once\n if len(args) == 3:\n x, y, values = args\n elif 'values' in kwargs:\n values = kwargs.pop('values')\n if len(args) == 1:\n y = np.asarray(args[0])\n x = np.arange(y.shape[-1])\n elif len(args) == 2:\n x, y = args\n else:\n raise ValueError(f'1 to 3 positional arguments required, got {len(args)}.')\n else:\n raise ValueError('Missing required keyword argument \"values\".')\n x, y, values = np.atleast_1d(x), np.atleast_1d(y), np.atleast_1d(values)\n if (\n any(_.ndim != 1 for _ in (x, y, values))\n or len({x.size, y.size, values.size}) > 1\n ):\n raise ValueError(\n f'x {x.shape}, y {y.shape}, and values {values.shape} '\n 'must be 1-dimensional and have the same size.'\n )\n\n # Interpolate values to allow for smooth gradations between values\n # (interp=False) or color switchover halfway between points\n # (interp=True). Then optionally interpolate the colormap values.\n if interp > 0:\n xorig, yorig, vorig = x, y, values\n x, y, values = [], [], []\n for j in range(xorig.shape[0] - 1):\n idx = slice(None)\n if j + 1 < xorig.shape[0] - 1:\n idx = slice(None, -1)\n x.extend(np.linspace(xorig[j], xorig[j + 1], interp + 2)[idx].flat)\n y.extend(np.linspace(yorig[j], yorig[j + 1], interp + 2)[idx].flat)\n values.extend(np.linspace(vorig[j], vorig[j + 1], interp + 2)[idx].flat)\n x, y, values = np.array(x), np.array(y), np.array(values)\n\n # Call main function\n return func(self, x, y, values=values, **kwargs)\n\n\ndef plot_wrapper(\n self, func, *args, cmap=None, values=None, **kwargs\n):\n \"\"\"\n Calls `~proplot.axes.Axes.parametric` in certain cases (but this behavior\n is now deprecated).\n \"\"\"\n if len(args) > 3: # e.g. with fmt string\n raise ValueError(f'Expected 1-3 positional args, got {len(args)}.')\n if cmap is not None:\n warnings._warn_proplot(\n 'Drawing \"parametric\" plots with ax.plot(x, y, values=values, cmap=cmap) '\n 'is deprecated and will be removed in a future version. Please use '\n 'ax.parametric(x, y, values, cmap=cmap) instead.'\n )\n return self.parametric(*args, cmap=cmap, values=values, **kwargs)\n\n # Draw lines\n result = func(self, *args, values=values, **kwargs)\n\n # Add sticky edges? No because there is no way to check whether \"dependent variable\"\n # is x or y axis like with area/areax and bar/barh. Better to always have margin.\n # for objs in result:\n # if not isinstance(objs, tuple):\n # objs = (objs,)\n # for obj in objs:\n # xdata = obj.get_xdata()\n # obj.sticky_edges.x.extend((np.min(xdata), np.max(xdata)))\n\n return result\n\n\n@docstring.add_snippets\ndef scatter_wrapper(\n self, func, *args,\n s=None, size=None, markersize=None,\n c=None, color=None, markercolor=None, smin=None, smax=None,\n cmap=None, cmap_kw=None, norm=None, norm_kw=None,\n vmin=None, vmax=None, N=None, levels=None, values=None,\n symmetric=False, locator=None, locator_kw=None,\n lw=None, linewidth=None, linewidths=None,\n markeredgewidth=None, markeredgewidths=None,\n edgecolor=None, edgecolors=None,\n markeredgecolor=None, markeredgecolors=None,\n **kwargs\n):\n \"\"\"\n Adds keyword arguments to `~matplotlib.axes.Axes.scatter` that are more\n consistent with the `~matplotlib.axes.Axes.plot` keyword arguments and\n supports `cmap_changer` features.\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n s, size, markersize : float or list of float, optional\n The marker size(s). The units are scaled by `smin` and `smax`.\n smin, smax : float, optional\n The minimum and maximum marker size in units points ** 2 used to\n scale the `s` array. If not provided, the marker sizes are equivalent\n to the values in the `s` array.\n c, color, markercolor : color-spec or list thereof, or array, optional\n The marker fill color(s). If this is an array of scalar values, the\n colors will be generated by passing the values through the `norm`\n normalizer and drawing from the `cmap` colormap.\n %(axes.cmap_changer)s\n lw, linewidth, linewidths, markeredgewidth, markeredgewidths : \\\nfloat or list thereof, optional\n The marker edge width.\n edgecolors, markeredgecolor, markeredgecolors : \\\ncolor-spec or list thereof, optional\n The marker edge color.\n\n Other parameters\n ----------------\n **kwargs\n Passed to `~matplotlib.axes.Axes.scatter`.\n \"\"\"\n # Manage input arguments\n # NOTE: Parse 1d must come before this\n nargs = len(args)\n if len(args) > 4:\n raise ValueError(f'Expected 1-4 positional args, got {nargs}.')\n args = list(args)\n if len(args) == 4:\n c = args.pop(1)\n if len(args) == 3:\n s = args.pop(0)\n\n # Apply some aliases for keyword arguments\n c = _not_none(c=c, color=color, markercolor=markercolor)\n s = _not_none(s=s, size=size, markersize=markersize)\n lw = _not_none(\n lw=lw, linewidth=linewidth, linewidths=linewidths,\n markeredgewidth=markeredgewidth, markeredgewidths=markeredgewidths,\n )\n ec = _not_none(\n edgecolor=edgecolor, edgecolors=edgecolors,\n markeredgecolor=markeredgecolor, markeredgecolors=markeredgecolors,\n )\n\n # Get colormap\n cmap_kw = cmap_kw or {}\n if cmap is not None:\n cmap = constructor.Colormap(cmap, **cmap_kw)\n\n # Get normalizer and levels\n # NOTE: If the length of the c array !=\n ticks = None\n carray = np.atleast_1d(c)\n if (\n np.issubdtype(carray.dtype, np.number)\n and not (carray.ndim == 2 and carray.shape[1] in (3, 4))\n ):\n carray = carray.ravel()\n norm, cmap, _, ticks = _build_discrete_norm(\n carray, # sample data for getting suitable levels\n N=N, levels=levels, values=values,\n norm=norm, norm_kw=norm_kw, locator=locator, locator_kw=locator_kw,\n cmap=cmap, vmin=vmin, vmax=vmax, extend='neither', symmetric=symmetric,\n )\n\n # Fix 2D arguments but still support scatter(x_vector, y_2d) usage\n # NOTE: Since we are flattening vectors the coordinate metadata is meaningless,\n # so converting to ndarray and stripping metadata is no problem.\n # NOTE: numpy.ravel() preserves masked arrays\n if len(args) == 2 and all(np.asarray(arg).squeeze().ndim > 1 for arg in args):\n args = tuple(np.ravel(arg) for arg in args)\n\n # Scale s array\n if np.iterable(s) and (smin is not None or smax is not None):\n smin_true, smax_true = min(s), max(s)\n if smin is None:\n smin = smin_true\n if smax is None:\n smax = smax_true\n s = (\n smin + (smax - smin)\n * (np.array(s) - smin_true) / (smax_true - smin_true)\n )\n obj = func(\n self, *args, c=c, s=s, cmap=cmap, norm=norm,\n linewidths=lw, edgecolors=ec, **kwargs\n )\n if ticks is not None:\n obj.ticks = ticks\n return obj\n\n\ndef stem_wrapper(\n self, func, *args, linefmt=None, basefmt=None, markerfmt=None, **kwargs\n):\n \"\"\"\n Make `use_line_collection` the default to suppress annoying warning message.\n \"\"\"\n # Set default colors\n # NOTE: 'fmt' strings can only be 2 to 3 characters and include color shorthands\n # like 'r' or cycle colors like 'C0'. Cannot use full color names.\n # NOTE: Matplotlib defaults try to make a 'reddish' color the base and 'bluish'\n # color the stems. To make this more robust we temporarily replace the cycler\n # with a negcolor/poscolor cycler, otherwise try to point default colors to the\n # blush 'C0' and reddish 'C1' from the new default 'colorblind' cycler.\n if not any(\n isinstance(fmt, str) and re.match(r'\\AC[0-9]', fmt)\n for fmt in (linefmt, basefmt, markerfmt)\n ):\n cycle = constructor.Cycle((rc['negcolor'], rc['poscolor']), name='_neg_pos')\n context = rc.context({'axes.prop_cycle': cycle})\n else:\n context = _dummy_context()\n\n # Add stem lines with bluish stem color and reddish base color\n with context:\n kwargs['linefmt'] = _not_none(linefmt, 'C0-')\n kwargs['basefmt'] = _not_none(basefmt, 'C1-')\n kwargs['markerfmt'] = _not_none(markerfmt, linefmt[:-1] + 'o')\n kwargs.setdefault('use_line_collection', True)\n try:\n return func(self, *args, **kwargs)\n except TypeError:\n kwargs.pop('use_line_collection') # old version\n return func(self, *args, **kwargs)\n\n\ndef _draw_lines(\n self, func, *args, negpos=False, negcolor=None, poscolor=None, **kwargs\n):\n \"\"\"\n Parse lines arguments. Support automatic *x* coordinates and default\n \"minima\" at zero.\n \"\"\"\n # Parse positional arguments, use default \"base\" position of zero\n x = 'x' if func.__name__ == 'vlines' else 'y'\n y = 'y' if x == 'x' else 'x'\n args = list(args)\n if x in kwargs:\n args.insert(0, kwargs.pop(x))\n for suffix in ('min', 'max'):\n key = y + suffix\n if key in kwargs:\n args.append(kwargs.pop(key))\n if len(args) == 1:\n x = np.arange(len(np.atleast_1d(args[0])))\n args.insert(0, x)\n if len(args) == 2:\n args.insert(1, 0.0)\n elif len(args) != 3:\n raise TypeError('lines() requires 1 to 3 positional arguments.')\n\n # Support \"negative\" and \"positive\" lines\n x, y1, y2 = args\n if negpos and kwargs.get('color', None) is None:\n y1 = _to_arraylike(y1)\n y2 = _to_arraylike(y2)\n y1array = _to_ndarray(y1)\n y2array = _to_ndarray(y2)\n\n # Negative colors\n mask = y2array >= y1array # positive\n y1neg = y1.copy()\n y2neg = y2.copy()\n if mask.size == 1:\n if mask.item():\n y1neg = y2neg = np.nan\n else:\n if y1.size > 1:\n _to_indexer(y1neg)[mask] = np.nan\n if y2.size > 1:\n _to_indexer(y2neg)[mask] = np.nan\n color = _not_none(negcolor, rc['negcolor'])\n negobj = func(self, x, y1neg, y2neg, color=color, **kwargs)\n\n # Positive colors\n mask = y2array < y1array # negative\n y1pos = y1.copy()\n y2pos = y2.copy()\n if mask.size == 1:\n if mask.item():\n y1pos = y2pos = np.nan\n else:\n if y1.size > 1:\n _to_indexer(y1pos)[mask] = np.nan\n if y2.size > 1:\n _to_indexer(y2pos)[mask] = np.nan\n color = _not_none(poscolor, rc['poscolor'])\n posobj = func(self, x, y1pos, y2pos, color=color, **kwargs)\n\n # Return both objects\n return (negobj, posobj)\n else:\n return func(self, x, y1, y2, **kwargs)\n\n\n@docstring.add_snippets\ndef hlines_wrapper(self, func, *args, **kwargs):\n \"\"\"\n Plot horizontal lines with flexible positional arguments and optionally\n use different colors for \"negative\" and \"positive\" lines.\n\n Parameters\n ----------\n %(axes.lines)s\n\n Note\n ----\n This function wraps {methods}\n \"\"\"\n return _draw_lines(self, func, *args, **kwargs)\n\n\n@docstring.add_snippets\ndef vlines_wrapper(self, func, *args, **kwargs):\n \"\"\"\n Plot vertical lines with flexible positional arguments and optionally\n use different colors for \"negative\" and \"positive\" lines.\n\n Parameters\n ----------\n %(axes.lines)s\n\n Note\n ----\n This function wraps {methods}\n \"\"\"\n return _draw_lines(self, func, *args, **kwargs)\n\n\ndef _fill_between_apply(\n self, func, *args,\n negcolor=None, poscolor=None, negpos=False,\n lw=None, linewidth=None,\n **kwargs\n):\n \"\"\"\n Helper function that powers `fill_between` and `fill_betweenx`.\n \"\"\"\n # Parse input arguments as follows:\n # * Permit using 'x', 'y1', and 'y2' or 'y', 'x1', and 'x2' as\n # keyword arguments.\n # * When negpos is True, instead of using fill_between(x, y1, y2=0) as default,\n # make the default fill_between(x, y1=0, y2).\n x = 'y' if 'x' in func.__name__ else 'x'\n y = 'x' if x == 'y' else 'y'\n args = list(args)\n if x in kwargs: # keyword 'x'\n args.insert(0, kwargs.pop(x))\n if len(args) == 1:\n args.insert(0, np.arange(len(args[0])))\n for yi in (y + '1', y + '2'):\n if yi in kwargs: # keyword 'y'\n args.append(kwargs.pop(yi))\n if len(args) == 2:\n args.append(0)\n elif len(args) == 3:\n if kwargs.get('stacked', False):\n warnings._warn_proplot(\n f'{func.__name__} cannot have three positional arguments '\n f'with negpos=True. Ignoring third argument.'\n )\n else:\n raise ValueError(f'Expected 2-3 positional args, got {len(args)}.')\n\n # Modify default properties\n # Set default edge width for patches to zero\n kwargs['linewidth'] = _not_none(lw=lw, linewidth=linewidth, default=0)\n\n # Draw patches\n xv, y1, y2 = args\n xv = _to_arraylike(xv)\n y1 = _to_arraylike(y1)\n y2 = _to_arraylike(y2)\n if negpos and kwargs.get('color', None) is None:\n # Plot negative and positive patches\n name = func.__name__\n message = name + ' argument {}={!r} is incompatible with negpos=True. Ignoring.'\n where = kwargs.pop('where', None)\n if where is not None:\n warnings._warn_proplot(message.format('where', where))\n stacked = kwargs.pop('stacked', None)\n if stacked:\n warnings._warn_proplot(message.format('stacked', stacked))\n kwargs.setdefault('interpolate', True)\n if np.asarray(y1).ndim > 1 or np.asarray(y2).ndim > 2:\n raise ValueError(f'{name} arguments with negpos=True must be 1D.')\n where1 = y1 < y2\n where2 = y1 >= y2\n negcolor = _not_none(negcolor, rc['negcolor'])\n poscolor = _not_none(poscolor, rc['poscolor'])\n obj1 = func(self, xv, y1, y2, where=where1, color=negcolor, **kwargs)\n obj2 = func(self, xv, y1, y2, where=where2, color=poscolor, **kwargs)\n result = objs = (obj1, obj2) # may be tuple of tuples due to cycle_changer\n\n else:\n # Plot basic patches\n result = func(self, xv, y1, y2, **kwargs)\n objs = (result,)\n\n # Add sticky edges in x-direction, and sticky edges in y-direction *only*\n # if one of the y limits is scalar. This should satisfy most users.\n xsides = (np.min(xv), np.max(xv))\n ysides = []\n if y1.size == 1:\n ysides.append(np.asarray(y1).item())\n if y2.size == 1:\n ysides.append(np.asarray(y2).item())\n for iobjs in objs:\n if not isinstance(iobjs, tuple):\n iobjs = (iobjs,)\n for obj in iobjs:\n getattr(obj.sticky_edges, x).extend(xsides)\n getattr(obj.sticky_edges, y).extend(ysides)\n\n return result\n\n\n@docstring.add_snippets\ndef fill_between_wrapper(self, func, *args, **kwargs):\n \"\"\"\n %(axes.fill_between)s\n \"\"\"\n return _fill_between_apply(self, func, *args, **kwargs)\n\n\n@docstring.add_snippets\ndef fill_betweenx_wrapper(self, func, *args, **kwargs):\n \"\"\"\n %(axes.fill_betweenx)s\n \"\"\"\n return _fill_between_apply(self, func, *args, **kwargs)\n\n\ndef hist_wrapper(self, func, x, bins=None, **kwargs):\n \"\"\"\n Forces `bar_wrapper` to interpret `width` as literal rather than relative\n to step size and enforces all arguments after `bins` are keyword-only.\n \"\"\"\n with _state_context(self, _absolute_bar_width=True):\n return func(self, x, bins=bins, **kwargs)\n\n\n@docstring.add_snippets\ndef bar_wrapper(\n self, func, x=None, height=None, width=0.8, bottom=None, *,\n vert=None, orientation='vertical', stacked=False,\n lw=None, linewidth=None, edgecolor='black',\n negpos=False, negcolor=None, poscolor=None,\n **kwargs\n):\n \"\"\"\n %(axes.bar)s\n \"\"\"\n # Parse arguments\n # WARNING: Implementation is really weird... we flip around arguments for horizontal\n # plots only to flip them back in cycle_changer when iterating through columns.\n if vert is not None:\n orientation = 'vertical' if vert else 'horizontal'\n if orientation == 'horizontal':\n x, bottom = bottom, x\n width, height = height, width\n\n # Parse args\n # TODO: Stacked feature is implemented in `cycle_changer`, but makes more\n # sense do document here; figure out way to move it here?\n if kwargs.get('left', None) is not None:\n warnings._warn_proplot('bar() keyword \"left\" is deprecated. Use \"x\" instead.')\n x = kwargs.pop('left')\n if x is None and height is None:\n raise ValueError('bar() requires at least 1 positional argument, got 0.')\n elif height is None:\n x, height = None, x\n args = (x, height)\n linewidth = _not_none(lw=lw, linewidth=linewidth, default=rc['patch.linewidth'])\n kwargs.update({\n 'width': width, 'bottom': bottom, 'stacked': stacked,\n 'orientation': orientation, 'linewidth': linewidth, 'edgecolor': edgecolor,\n })\n\n # Call func\n # NOTE: This *must* also be wrapped by cycle_changer, which ultimately\n # permutes back the x/bottom args for horizontal bars! Need to clean up.\n if negpos and kwargs.get('color', None) is None:\n # Draw negative and positive bars\n # NOTE: cycle_changer makes bar widths *relative* to step size between\n # x coordinates to cannot just omit data. Instead make some height nan.\n message = 'bar() argument {}={!r} is incompatible with negpos=True. Ignoring.'\n stacked = kwargs.pop('stacked', None)\n if stacked:\n warnings._warn_proplot(message.format('stacked', stacked))\n height = np.asarray(height)\n if height.ndim > 1:\n raise ValueError('bar() heights with negpos=True must be 1D.')\n height1 = height.copy().astype(np.float64)\n height1[height >= 0] = np.nan\n height2 = height.copy().astype(np.float64)\n height2[height < 0] = np.nan\n negcolor = _not_none(negcolor, rc['negcolor'])\n poscolor = _not_none(poscolor, rc['poscolor'])\n obj1 = func(self, x, height1, color=negcolor, **kwargs)\n obj2 = func(self, x, height2, color=poscolor, **kwargs)\n result = (obj1, obj2)\n else:\n # Draw simple bars\n result = func(self, *args, **kwargs)\n return result\n\n\n@docstring.add_snippets\ndef barh_wrapper(self, func, y=None, right=None, width=0.8, left=None, **kwargs):\n \"\"\"\n %(axes.barh)s\n \"\"\"\n # Converts y-->bottom, left-->x, width-->height, height-->width.\n # Convert back to (x, bottom, width, height) so we can pass stuff\n # through cycle_changer.\n # NOTE: ProPlot calls second positional argument 'right' so that 'width'\n # means the width of *bars*.\n # NOTE: You *must* do juggling of barh keyword order --> bar keyword order\n # --> barh keyword order, because horizontal hist passes arguments to bar\n # directly and will not use a 'barh' method with overridden argument order!\n func # avoid U100 error\n height = _not_none(height=kwargs.pop('height', None), width=width, default=0.8)\n kwargs.setdefault('orientation', 'horizontal')\n if y is None and width is None:\n raise ValueError('barh() requires at least 1 positional argument, got 0.')\n return self.bar(x=left, width=right, height=height, bottom=y, **kwargs)\n\n\ndef boxplot_wrapper(\n self, func, *args,\n color='k', fill=True, fillcolor=None, fillalpha=0.7,\n lw=None, linewidth=None, orientation=None,\n marker=None, markersize=None,\n boxcolor=None, boxlw=None,\n capcolor=None, caplw=None,\n meancolor=None, meanlw=None,\n mediancolor=None, medianlw=None,\n whiskercolor=None, whiskerlw=None,\n fliercolor=None, flierlw=None,\n **kwargs\n):\n \"\"\"\n Adds convenient keyword arguments and changes the default boxplot style.\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n *args : 1D or 2D ndarray\n The data array.\n color : color-spec, optional\n The color of all objects.\n fill : bool, optional\n Whether to fill the box with a color.\n fillcolor : color-spec, optional\n The fill color for the boxes. Default is the next color cycler color.\n fillalpha : float, optional\n The opacity of the boxes. Default is ``1``.\n lw, linewidth : float, optional\n The linewidth of all objects.\n vert : bool, optional\n If ``False``, box plots are drawn horizontally.\n orientation : {{None, 'horizontal', 'vertical'}}, optional\n Alternative to the native `vert` keyword arg. Controls orientation.\n marker : marker-spec, optional\n Marker style for the 'fliers', i.e. outliers.\n markersize : float, optional\n Marker size for the 'fliers', i.e. outliers.\n boxcolor, capcolor, meancolor, mediancolor, whiskercolor : \\\ncolor-spec, optional\n The color of various boxplot components. These are shorthands so you\n don't have to pass e.g. a ``boxprops`` dictionary.\n boxlw, caplw, meanlw, medianlw, whiskerlw : float, optional\n The line width of various boxplot components. These are shorthands so\n you don't have to pass e.g. a ``boxprops`` dictionary.\n\n Other parameters\n ----------------\n **kwargs\n Passed to the matplotlib plotting method.\n \"\"\"\n # Call function\n if len(args) > 2:\n raise ValueError(f'Expected 1-2 positional args, got {len(args)}.')\n if orientation is not None:\n if orientation == 'horizontal':\n kwargs['vert'] = False\n elif orientation != 'vertical':\n raise ValueError(\n 'Orientation must be \"horizontal\" or \"vertical\", '\n f'got {orientation!r}.'\n )\n obj = func(self, *args, **kwargs)\n if not args:\n return obj\n\n # Modify results\n # TODO: Pass props keyword args instead? Maybe does not matter.\n lw = _not_none(lw=lw, linewidth=linewidth, default=0.8)\n if fillcolor is None:\n cycler = next(self._get_lines.prop_cycler)\n fillcolor = cycler.get('color', None)\n for key, icolor, ilw in (\n ('boxes', boxcolor, boxlw),\n ('caps', capcolor, caplw),\n ('whiskers', whiskercolor, whiskerlw),\n ('means', meancolor, meanlw),\n ('medians', mediancolor, medianlw),\n ('fliers', fliercolor, flierlw),\n ):\n if key not in obj: # possible if not rendered\n continue\n artists = obj[key]\n ilw = _not_none(ilw, lw)\n icolor = _not_none(icolor, color)\n for artist in artists:\n if icolor is not None:\n artist.set_color(icolor)\n artist.set_markeredgecolor(icolor)\n if ilw is not None:\n artist.set_linewidth(ilw)\n artist.set_markeredgewidth(ilw)\n if key == 'boxes' and fill:\n patch = mpatches.PathPatch(\n artist.get_path(), color=fillcolor,\n alpha=fillalpha, linewidth=0)\n self.add_artist(patch)\n if key == 'fliers':\n if marker is not None:\n artist.set_marker(marker)\n if markersize is not None:\n artist.set_markersize(markersize)\n return obj\n\n\ndef violinplot_wrapper(\n self, func, *args,\n lw=None, linewidth=None, fillcolor=None, edgecolor='black',\n fillalpha=0.7, orientation=None,\n **kwargs\n):\n \"\"\"\n Adds convenient keyword arguments and changes the default violinplot style\n to match `this matplotlib example \\\n<https://matplotlib.org/3.1.0/gallery/statistics/customized_violin.html>`__.\n It is also no longer possible to show minima and maxima with whiskers --\n while this is useful for `~matplotlib.axes.Axes.boxplot`\\\\ s it is\n redundant for `~matplotlib.axes.Axes.violinplot`\\\\ s.\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n *args : 1D or 2D ndarray\n The data array.\n lw, linewidth : float, optional\n The linewidth of the line objects. Default is ``1``.\n edgecolor : color-spec, optional\n The edge color for the violin patches. Default is ``'black'``.\n fillcolor : color-spec, optional\n The violin plot fill color. Default is the next color cycler color.\n fillalpha : float, optional\n The opacity of the violins. Default is ``1``.\n vert : bool, optional\n If ``False``, box plots are drawn horizontally.\n orientation : {{None, 'horizontal', 'vertical'}}, optional\n Alternative to the native `vert` keyword arg. Controls orientation.\n boxrange, barrange : (float, float), optional\n Percentile ranges for the thick and thin central bars. The defaults\n are ``(25, 75)`` and ``(5, 95)``, respectively.\n\n Other parameters\n ----------------\n **kwargs\n Passed to `~matplotlib.axes.Axes.violinplot`.\n \"\"\"\n # Orientation and checks\n if len(args) > 2:\n raise ValueError(f'Expected 1-2 positional args, got {len(args)}.')\n if orientation is not None:\n if orientation == 'horizontal':\n kwargs['vert'] = False\n elif orientation != 'vertical':\n raise ValueError(\n 'Orientation must be \"horizontal\" or \"vertical\", '\n f'got {orientation!r}.'\n )\n\n # Sanitize input\n lw = _not_none(lw=lw, linewidth=linewidth, default=0.8)\n if kwargs.pop('showextrema', None):\n warnings._warn_proplot('Ignoring showextrema=True.')\n if 'showmeans' in kwargs:\n kwargs.setdefault('means', kwargs.pop('showmeans'))\n if 'showmedians' in kwargs:\n kwargs.setdefault('medians', kwargs.pop('showmedians'))\n kwargs.setdefault('capsize', 0)\n result = obj = func(\n self, *args,\n showmeans=False, showmedians=False, showextrema=False, lw=lw, **kwargs\n )\n if not args:\n return result\n\n # Modify body settings\n if isinstance(result, (list, tuple)):\n obj = result[0]\n for artist in obj['bodies']:\n artist.set_alpha(fillalpha)\n artist.set_edgecolor(edgecolor)\n artist.set_linewidths(lw)\n if fillcolor is not None:\n artist.set_facecolor(fillcolor)\n return result\n\n\ndef _get_transform(self, transform):\n \"\"\"\n Translates user input transform. Also used in an axes method.\n \"\"\"\n try:\n from cartopy.crs import CRS\n except ModuleNotFoundError:\n CRS = None\n cartopy = getattr(self, 'name', '') == 'cartopy'\n if (\n isinstance(transform, mtransforms.Transform)\n or CRS and isinstance(transform, CRS)\n ):\n return transform\n elif transform == 'figure':\n return self.figure.transFigure\n elif transform == 'axes':\n return self.transAxes\n elif transform == 'data':\n return PlateCarree() if cartopy else self.transData\n elif cartopy and transform == 'map':\n return self.transData\n else:\n raise ValueError(f'Unknown transform {transform!r}.')\n\n\ndef _update_text(self, props):\n \"\"\"\n Monkey patch that adds pseudo \"border\" properties to text objects\n without wrapping the entire class. We override update to facilitate\n updating inset titles.\n \"\"\"\n props = props.copy() # shallow copy\n border = props.pop('border', None)\n bordercolor = props.pop('bordercolor', 'w')\n borderinvert = props.pop('borderinvert', False)\n borderwidth = props.pop('borderwidth', 2)\n if border:\n facecolor, bgcolor = self.get_color(), bordercolor\n if borderinvert:\n facecolor, bgcolor = bgcolor, facecolor\n kwargs = {\n 'linewidth': borderwidth,\n 'foreground': bgcolor,\n 'joinstyle': 'miter',\n }\n self.update({\n 'color': facecolor,\n 'path_effects': [mpatheffects.Stroke(**kwargs), mpatheffects.Normal()],\n })\n return type(self).update(self, props)\n\n\ndef text_wrapper(\n self, func,\n x=0, y=0, text='', transform='data',\n family=None, fontfamily=None, fontname=None, fontsize=None, size=None,\n border=False, bordercolor='w', borderwidth=2, borderinvert=False,\n **kwargs\n):\n \"\"\"\n Enables specifying `tranform` with a string name and adds a feature for\n drawing borders around text.\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n x, y : float\n The *x* and *y* coordinates for the text.\n text : str\n The text string.\n transform : {{'data', 'axes', 'figure'}} or \\\n`~matplotlib.transforms.Transform`, optional\n The transform used to interpret `x` and `y`. Can be a\n `~matplotlib.transforms.Transform` object or a string representing the\n `~matplotlib.axes.Axes.transData`, `~matplotlib.axes.Axes.transAxes`,\n or `~matplotlib.figure.Figure.transFigure` transforms. Default is\n ``'data'``, i.e. the text is positioned in data coordinates.\n fontsize, size : float or str, optional\n The font size. If float, units are inches. If string, units are\n interpreted by `~proplot.utils.units`.\n fontname, fontfamily, family : str, optional\n The font name (e.g. ``'Fira Math'``) or font family name (e.g.\n ``'serif'``). Matplotlib falls back to the system default if not found.\n fontweight, weight, fontstyle, style, fontvariant, variant : str, optional\n Additional font properties. See `~matplotlib.text.Text` for details.\n border : bool, optional\n Whether to draw border around text.\n borderwidth : float, optional\n The width of the text border. Default is ``2`` points.\n bordercolor : color-spec, optional\n The color of the text border. Default is ``'w'``.\n borderinvert : bool, optional\n If ``True``, the text and border colors are swapped.\n\n Other parameters\n ----------------\n **kwargs\n Passed to `~matplotlib.axes.Axes.text`.\n \"\"\"\n # Parse input args\n # NOTE: Previously issued warning if fontname did not match any of names\n # in ttflist but this would result in warning for e.g. family='sans-serif'.\n # Matplotlib font API makes it very difficult to inject warning in\n # correct place. Simpler to just\n # NOTE: Do not emit warning if user supplied conflicting properties\n # because matplotlib has like 100 conflicting text properties for which\n # it doesn't emit warnings. Prefer not to fix all of them.\n fontsize = _not_none(fontsize, size)\n fontfamily = _not_none(fontname, fontfamily, family)\n if fontsize is not None:\n try:\n rc._scale_font(fontsize) # *validate* but do not translate\n except KeyError:\n fontsize = units(fontsize, 'pt')\n kwargs['fontsize'] = fontsize\n if fontfamily is not None:\n kwargs['fontfamily'] = fontfamily\n if not transform:\n transform = self.transData\n else:\n transform = _get_transform(self, transform)\n\n # Apply monkey patch to text object\n # TODO: Why only support this here, and not in arbitrary places throughout\n # rest of matplotlib API? Units engine needs better implementation.\n obj = func(self, x, y, text, transform=transform, **kwargs)\n obj.update = _update_text.__get__(obj)\n obj.update({\n 'border': border,\n 'bordercolor': bordercolor,\n 'borderinvert': borderinvert,\n 'borderwidth': borderwidth,\n })\n return obj\n\n\ndef _iter_legend_objects(objs):\n \"\"\"\n Retrieve the (object, label) pairs for objects with actual labels\n from nested lists and tuples of objects.\n \"\"\"\n # Account for (1) multiple columns of data, (2) functions that return\n # multiple values (e.g. hist() returns (bins, values, patches)), and\n # (3) matplotlib.Collection list subclasses.\n if hasattr(objs, 'get_label'):\n label = objs.get_label()\n if label and label[:1] != '_':\n yield (objs, label)\n elif isinstance(objs, (list, tuple)):\n for obj in objs:\n yield from _iter_legend_objects(obj)\n\n\ndef cycle_changer(\n self, func, *args,\n cycle=None, cycle_kw=None,\n label=None, labels=None, values=None,\n legend=None, legend_kw=None,\n colorbar=None, colorbar_kw=None,\n errobjs=None,\n **kwargs\n):\n \"\"\"\n Adds features for controlling colors in the property cycler and drawing\n legends or colorbars in one go.\n\n Note\n ----\n This function wraps {methods}\n\n This wrapper also *standardizes acceptable input* -- these methods now all\n accept 2D arrays holding columns of data, and *x*-coordinates are always\n optional. Note this alters the behavior of `~matplotlib.axes.Axes.boxplot`\n and `~matplotlib.axes.Axes.violinplot`, which now compile statistics on\n *columns* of data instead of *rows*.\n\n Parameters\n ----------\n cycle : cycle-spec, optional\n The cycle specifer, passed to the `~proplot.constructor.Cycle`\n constructor. If the returned list of colors is unchanged from the\n current axes color cycler, the axes cycle will **not** be reset to the\n first position.\n cycle_kw : dict-like, optional\n Passed to `~proplot.constructor.Cycle`.\n label : float or str, optional\n The legend label to be used for this plotted element.\n labels, values : list of float or list of str, optional\n Used with 2D input arrays. The legend labels or colorbar coordinates\n for each column in the array. Can be numeric or string, and must match\n the number of columns in the 2D array.\n legend : bool, int, or str, optional\n If not ``None``, this is a location specifying where to draw an *inset*\n or *panel* legend from the resulting handle(s). If ``True``, the\n default location is used. Valid locations are described in\n `~proplot.axes.Axes.legend`.\n legend_kw : dict-like, optional\n Ignored if `legend` is ``None``. Extra keyword args for our call\n to `~proplot.axes.Axes.legend`.\n colorbar : bool, int, or str, optional\n If not ``None``, this is a location specifying where to draw an *inset*\n or *panel* colorbar from the resulting handle(s). If ``True``, the\n default location is used. Valid locations are described in\n `~proplot.axes.Axes.colorbar`.\n colorbar_kw : dict-like, optional\n Ignored if `colorbar` is ``None``. Extra keyword args for our call\n to `~proplot.axes.Axes.colorbar`.\n errobjs : `~matplotlib.artist.Artist` or list thereof, optional\n Error bar objects to add to the legend. This is used internally and\n should not be necessary for users. See `indicate_error`.\n\n Other parameters\n ----------------\n *args, **kwargs\n Passed to the matplotlib plotting method.\n\n See also\n --------\n standardize_1d\n proplot.constructor.Cycle\n proplot.constructor.Colors\n \"\"\"\n # Parse positional args\n # NOTE: Requires standardize_1d wrapper before reaching this. Also note\n # that the 'x' coordinates are sometimes ignored below.\n name = func.__name__\n if not args:\n return func(self, *args, **kwargs)\n x, y, *args = args\n ys = (y,)\n if len(args) >= 1 and name in ('fill_between', 'fill_betweenx'):\n ys, args = (y, args[0]), args[1:]\n # Parse keyword args\n autoformat = rc['autoformat'] # possibly manipulated by standardize_[12]d\n barh = stacked = False\n cycle_kw = cycle_kw or {}\n legend_kw = legend_kw or {}\n colorbar_kw = colorbar_kw or {}\n labels = _not_none(\n values=values,\n labels=labels,\n label=label,\n legend_kw_labels=legend_kw.pop('labels', None),\n )\n if name in ('pie',): # add x coordinates as default pie chart labels\n labels = _not_none(labels, x) # TODO: move to pie wrapper?\n colorbar_legend_label = None # for colorbar or legend\n if name in ('bar', 'fill_between', 'fill_betweenx'):\n stacked = kwargs.pop('stacked', False)\n if name in ('bar',):\n barh = kwargs.get('orientation', None) == 'horizontal'\n width = kwargs.pop('width', 0.8) # 'width' for bar *and* barh (see bar_wrapper)\n bottom = 'x' if barh else 'bottom'\n kwargs.setdefault(bottom, 0) # 'x' required even though 'y' isn't for bar plots\n\n # Determine and temporarily set cycler\n # NOTE: Axes cycle has no getter, only set_prop_cycle, which sets a\n # prop_cycler attribute on the hidden _get_lines and _get_patches_for_fill\n # objects. This is the only way to query current axes cycler! Should not\n # wrap set_prop_cycle because would get messy and fragile.\n # NOTE: The _get_lines cycler is an *itertools cycler*. Has no length, so\n # we must cycle over it with next(). We try calling next() the same number\n # of times as the length of input cycle. If the input cycle *is* in fact\n # the same, below does not reset the color position, cycles us to start!\n if cycle is not None or cycle_kw:\n # Get the new cycler\n cycle_args = () if cycle is None else (cycle,)\n if y.ndim > 1 and y.shape[1] > 1: # default samples count\n cycle_kw.setdefault('N', y.shape[1])\n cycle = constructor.Cycle(*cycle_args, **cycle_kw)\n\n # Get the original property cycle\n # NOTE: Matplotlib saves itertools.cycle(cycler), not the original\n # cycler object, so we must build up the keys again.\n i = 0\n by_key = {}\n cycle_orig = self._get_lines.prop_cycler\n for i in range(len(cycle)): # use the cycler object length as a guess\n prop = next(cycle_orig)\n for key, value in prop.items():\n if key not in by_key:\n by_key[key] = set()\n if isinstance(value, (list, np.ndarray)):\n value = tuple(value)\n by_key[key].add(value)\n\n # Reset property cycler if it differs\n reset = set(by_key) != set(cycle.by_key())\n if not reset: # test individual entries\n for key, value in cycle.by_key().items():\n if by_key[key] != set(value):\n reset = True\n break\n if reset:\n self.set_prop_cycle(cycle)\n\n # Custom property cycler additions\n # NOTE: By default matplotlib uses _get_patches_for_fill.get_next_color\n # for scatter next scatter color, but cannot get anything else! We simultaneously\n # iterate through the _get_lines property cycler and apply relevant properties.\n apply_from_cycler = set() # which keys to apply from property cycler\n if name in ('scatter',):\n # Figure out which props should be updated\n prop_keys = set(self._get_lines._prop_keys) - {'color', 'linestyle', 'dashes'}\n for key, prop in (\n ('markersize', 's'),\n ('linewidth', 'linewidths'),\n ('markeredgewidth', 'linewidths'),\n ('markeredgecolor', 'edgecolors'),\n ('alpha', 'alpha'),\n ('marker', 'marker'),\n ):\n prop = kwargs.get(prop, None)\n if key in prop_keys and prop is None: # if key in cycler and property unset\n apply_from_cycler.add(key)\n\n # Handle legend labels. Several scenarios:\n # 1. Always prefer input labels\n # 2. Always add labels if this is a *named* dimension.\n # 3. Even if not *named* dimension add labels if labels are string\n # WARNING: Most methods that accept 2D arrays use columns of data, but when\n # pandas DataFrame passed to hist, boxplot, or violinplot, rows of data\n # assumed! This is fixed in parse_1d by converting to values.\n y1 = ys[0]\n ncols = 1\n if name in ('pie', 'boxplot', 'violinplot'):\n # Functions handle multiple labels on their own\n if labels is not None:\n kwargs['labels'] = labels # error raised down the line\n else:\n # Get column count and sanitize labels\n ncols = 1 if y.ndim == 1 else y.shape[1]\n if not np.iterable(labels) or isinstance(labels, str):\n labels = [labels] * ncols\n if len(labels) != ncols:\n raise ValueError(\n f'Got {ncols} columns in data array, but {len(labels)} labels.'\n )\n\n # Get automatic legend labels and legend title\n # NOTE: Only apply labels if they are string labels *or* the\n # legend or colorbar has a title (latter is more common for colorbars)\n if autoformat:\n ilabels, colorbar_legend_label = _axis_labels_title(y1, axis=1)\n ilabels = _to_ndarray(ilabels) # may be empty!\n for i, (ilabel, label) in enumerate(zip(ilabels, labels)):\n if label is None and (colorbar_legend_label or isinstance(ilabel, str)):\n labels[i] = ilabel\n\n # Sanitize labels\n # WARNING: Must convert labels to string here because e.g. scatter() applies\n # default label if input is False-ey. So numeric '0' would be overridden.\n if labels is None:\n labels = [''] * ncols\n else:\n labels = [str(_not_none(label, '')) for label in labels]\n\n # Get step size for bar plots\n # WARNING: This will fail for non-numeric non-datetime64 singleton\n # datatypes but this is good enough for vast majority of most cases.\n if name in ('bar',):\n if not stacked and not getattr(self, '_absolute_bar_width', False):\n x_test = np.atleast_1d(_to_ndarray(x))\n if len(x_test) >= 2:\n x_step = x_test[1:] - x_test[:-1]\n x_step = np.concatenate((x_step, x_step[-1:]))\n elif x_test.dtype == np.datetime64:\n x_step = np.timedelta64(1, 'D')\n else:\n x_step = np.array(0.5)\n if np.issubdtype(x_test.dtype, np.datetime64):\n # Avoid integer timedelta truncation\n x_step = x_step.astype('timedelta64[ns]')\n width = width * x_step / ncols\n key = 'height' if barh else 'width'\n kwargs[key] = width\n\n # Plot susccessive columns\n objs = []\n for i in range(ncols):\n # Prop cycle properties\n kw = kwargs.copy()\n if apply_from_cycler:\n props = next(self._get_lines.prop_cycler)\n for key in apply_from_cycler:\n value = props[key]\n if key in ('size', 'markersize'):\n key = 's'\n elif key in ('linewidth', 'markeredgewidth'): # translate\n key = 'linewidths'\n elif key == 'markeredgecolor':\n key = 'edgecolors'\n kw[key] = value\n\n # Get x coordinates for bar plot\n ix = x # samples\n if name in ('bar',): # adjust\n if not stacked:\n offset = width * (i - 0.5 * (ncols - 1))\n ix = x + offset\n elif stacked and y1.ndim > 1:\n key = 'x' if barh else 'bottom'\n kw[key] = _to_indexer(y1)[:, :i].sum(axis=1)\n\n # Get y coordinates and labels\n if name in ('pie', 'boxplot', 'violinplot'):\n # Only ever have one y value, cannot have legend labels\n iys = (y1,)\n\n else:\n # The coordinates\n # WARNING: If stacked=True then we always *ignore* second\n # argument passed to fill_between. Warning should be issued\n # by fill_between_wrapper in this case.\n if stacked and name in ('fill_between', 'fill_betweenx'):\n iys = tuple(\n y1 if y1.ndim == 1\n else _to_indexer(y1)[:, :ii].sum(axis=1)\n for ii in (i, i + 1)\n )\n else:\n iys = tuple(\n y_i if y_i.ndim == 1 else _to_indexer(y_i)[:, i]\n for y_i in ys\n )\n kw['label'] = labels[i] or ''\n\n # Build coordinate arguments\n ixy = ()\n if barh: # special case, use kwargs only!\n kw.update({'bottom': ix, 'width': iys[0]})\n elif name in ('pie', 'hist', 'boxplot', 'violinplot'):\n ixy = iys\n else: # has x-coordinates, and maybe more than one y\n ixy = (ix, *iys)\n obj = func(self, *ixy, *args, **kw)\n if type(obj) in (list, tuple) and len(obj) == 1:\n obj = obj[0]\n objs.append(obj)\n\n # Add colorbar\n if colorbar:\n # Add handles\n loc = self._loc_translate(colorbar, 'colorbar', allow_manual=False)\n if loc not in self._auto_colorbar:\n self._auto_colorbar[loc] = ([], {})\n self._auto_colorbar[loc][0].extend(objs)\n\n # Add keywords\n if loc != 'fill':\n colorbar_kw.setdefault('loc', loc)\n if colorbar_legend_label:\n colorbar_kw.setdefault('label', colorbar_legend_label)\n self._auto_colorbar[loc][1].update(colorbar_kw)\n\n # Add legend\n if legend:\n # Get error objects. If they have separate label, allocate separate\n # legend entry. If not, try to combine with current legend entry.\n if type(errobjs) not in (list, tuple):\n errobjs = (errobjs,)\n errobjs = list(filter(None, errobjs))\n errobjs_join = [obj for obj in errobjs if not obj.get_label()]\n errobjs_separate = [obj for obj in errobjs if obj.get_label()]\n\n # Get legend objects\n # NOTE: It is not yet possible to draw error bounds *and* draw lines\n # with multiple columns of data.\n # NOTE: Put error bounds objects *before* line objects in the tuple,\n # so that line gets drawn on top of bounds.\n legobjs = objs.copy()\n if errobjs_join:\n legobjs = [(*legobjs, *errobjs_join)[::-1]]\n legobjs.extend(errobjs_separate)\n try:\n legobjs, labels = list(zip(*_iter_legend_objects(legobjs)))\n except ValueError:\n legobjs = labels = ()\n\n # Add handles and labels\n # NOTE: Important to add labels as *keyword* so users can override\n # NOTE: Use legend(handles, labels) syntax so we can assign labels\n # for tuples of artists. Otherwise they are label-less.\n loc = self._loc_translate(legend, 'legend', allow_manual=False)\n if loc not in self._auto_legend:\n self._auto_legend[loc] = ([], {'labels': []})\n self._auto_legend[loc][0].extend(legobjs)\n self._auto_legend[loc][1]['labels'].extend(labels)\n\n # Add other keywords\n if loc != 'fill':\n legend_kw.setdefault('loc', loc)\n if colorbar_legend_label:\n legend_kw.setdefault('label', colorbar_legend_label)\n self._auto_legend[loc][1].update(legend_kw)\n\n # Return\n # WARNING: Make sure plot always returns tuple of objects, and bar always\n # returns singleton unless we have bulk drawn bar plots! Other matplotlib\n # methods call these internally and expect a certain output format!\n if name == 'plot':\n return tuple(objs) # always return tuple of objects\n elif name in ('boxplot', 'violinplot'):\n return objs[0] # always return singleton\n else:\n return objs[0] if len(objs) == 1 else tuple(objs)\n\n\ndef _auto_levels_locator(\n *args, N=None, norm=None, norm_kw=None, locator=None, locator_kw=None,\n vmin=None, vmax=None, extend='both', symmetric=False,\n positive=False, negative=False, nozero=False,\n):\n \"\"\"\n Automatically generate level locations based on the input data, the\n input locator, and the input normalizer.\n\n Parameters\n ----------\n *args\n The sample dataset(s).\n N : int, optional\n The (approximate) number of levels to create.\n norm, norm_kw\n Passed to `~proplot.constructor.Norm`. Used to determine suitable\n level locations if `locator` is not passed.\n locator, locator_kw\n Passed to `~proplot.constructor.Locator`. Used to determine suitable\n level locations.\n extend : str, optional\n The extend setting.\n symmetric, positive, negative : bool, optional\n Whether the automatic levels should be symmetric, should be all positive\n with a minimum at zero, or should be all negative with a maximum at zero.\n nozero : bool, optional\n Whether zero should be excluded from automatic levels. This is also\n implemented in `cmap_changer` so that `nozero` can be used to remove user\n input levels (e.g. ``ax.contour(..., levels=plot.arange(-5, 5), nozero=True)``),\n but is replecated here so power users can use this function in isolation.\n\n Returns\n -------\n levels : ndarray\n The levels.\n locator : ndarray or `matplotlib.ticker.Locator`\n The locator used for colorbar tick locations.\n \"\"\"\n if np.iterable(N):\n return N, N\n if N is None:\n N = 11\n norm_kw = norm_kw or {}\n locator_kw = locator_kw or {}\n norm = constructor.Norm(norm or 'linear', **norm_kw)\n if positive and negative:\n raise ValueError('Incompatible options: positive=True and negative=True.')\n if locator is not None:\n level_locator = tick_locator = constructor.Locator(locator, **locator_kw)\n elif isinstance(norm, mcolors.LogNorm):\n level_locator = tick_locator = mticker.LogLocator(**locator_kw)\n elif isinstance(norm, mcolors.SymLogNorm):\n locator_kw.setdefault('base', _flexible_getattr(norm, 'base', 10))\n locator_kw.setdefault('linthresh', _flexible_getattr(norm, 'linthresh', 1))\n level_locator = tick_locator = mticker.SymmetricalLogLocator(**locator_kw)\n else:\n nbins = N * 2 if positive or negative else N\n locator_kw.setdefault('symmetric', symmetric or positive or negative)\n level_locator = mticker.MaxNLocator(nbins, min_n_ticks=1, **locator_kw)\n tick_locator = None\n\n # Get locations\n automin = vmin is None\n automax = vmax is None\n if automin or automax:\n vmins = []\n vmaxs = []\n for data in args:\n data = ma.masked_invalid(data, copy=False)\n if automin:\n vmin = float(data.min())\n if automax:\n vmax = float(data.max())\n if vmin == vmax or ma.is_masked(vmin) or ma.is_masked(vmax):\n vmin, vmax = 0, 1\n vmins.append(vmin)\n vmaxs.append(vmax)\n vmin = min(vmins)\n vmax = max(vmaxs)\n try:\n levels = level_locator.tick_values(vmin, vmax)\n except RuntimeError: # too-many-ticks error\n levels = np.linspace(vmin, vmax, N) # TODO: _autolev used N+1\n\n # Trim excess levels the locator may have supplied\n # NOTE: This part is mostly copied from matplotlib _autolev\n if not locator_kw.get('symmetric', None):\n i0, i1 = 0, len(levels) # defaults\n under, = np.where(levels < vmin)\n if len(under):\n i0 = under[-1]\n if not automin or extend in ('min', 'both'):\n i0 += 1 # permit out-of-bounds data\n over, = np.where(levels > vmax)\n if len(over):\n i1 = over[0] + 1 if len(over) else len(levels)\n if not automax or extend in ('max', 'both'):\n i1 -= 1 # permit out-of-bounds data\n if i1 - i0 < 3:\n i0, i1 = 0, len(levels) # revert\n levels = levels[i0:i1]\n\n # Compare the no. of levels we *got* (levels) to what we *wanted* (N)\n # If we wanted more than 2 times the result, then add nn - 1 extra\n # levels in-between the returned levels *in normalized space*.\n # Example: A LogNorm gives too few levels, so we select extra levels\n # here, but use the locator for determining tick locations.\n nn = N // len(levels)\n if nn >= 2:\n olevels = norm(levels)\n nlevels = []\n for i in range(len(levels) - 1):\n l1, l2 = olevels[i], olevels[i + 1]\n nlevels.extend(np.linspace(l1, l2, nn + 1)[:-1])\n nlevels.append(olevels[-1])\n levels = norm.inverse(nlevels)\n\n # Filter the remaining contours\n if nozero and 0 in levels:\n levels = levels[levels != 0]\n if positive:\n levels = levels[levels >= 0]\n if negative:\n levels = levels[levels <= 0]\n\n # Use auto-generated levels for ticks if still None\n locator = tick_locator or levels\n return levels, locator\n\n\ndef _build_discrete_norm(\n data=None, N=None, levels=None, values=None,\n norm=None, norm_kw=None, cmap=None, vmin=None, vmax=None, extend=None,\n minlength=2,\n **kwargs,\n):\n \"\"\"\n Build a `~proplot.colors.DiscreteNorm` or `~proplot.colors.BoundaryNorm`\n from the input arguments. This automatically calculates \"nice\" level\n boundaries if they were not provided.\n\n Parameters\n ----------\n data, vmin, vmax, levels, values\n Used to determine the level boundaries.\n norm, norm_kw\n Passed to `~proplot.constructor.Norm` and then to `DiscreteNorm`.\n vmin, vmax : float, optional\n The minimum and maximum values for the normalizer.\n cmap : `matplotlib.colors.Colormap`, optional\n The colormap. Passed to `DiscreteNorm`.\n extend : str, optional\n The extend setting.\n minlength : int, optional\n The minimum length for level lists.\n **kwargs\n Passed to `_auto_levels_locator`.\n\n Returns\n -------\n norm : `matplotlib.colors.Normalize`\n The normalizer.\n ticks : `numpy.ndarray` or `matplotlib.locator.Locator`\n The axis locator or the tick location candidates.\n \"\"\"\n # Parse flexible keyword args\n norm_kw = norm_kw or {}\n levels = _not_none(\n N=N, levels=levels, norm_kw_levels=norm_kw.pop('levels', None),\n default=rc['image.levels']\n )\n vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))\n vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))\n if norm == 'segments': # TODO: remove\n norm = 'segmented'\n\n # NOTE: Matplotlib colorbar algorithm *cannot* handle descending levels\n # so this function reverses them and adds special attribute to the\n # normalizer. Then colorbar_wrapper reads this attribute and flips the\n # axis and the colormap direction.\n # Check input levels and values\n for key, val in (('levels', levels), ('values', values)):\n if not np.iterable(val):\n continue\n if len(val) < minlength or len(val) >= 2 and any(\n np.sign(np.diff(val)) != np.sign(val[1] - val[0])\n ):\n raise ValueError(\n f'{key!r} must be monotonically increasing or decreasing '\n f'and at least length {minlength}, got {val}.'\n )\n\n # Get level edges from level centers\n locator = None\n if isinstance(values, Integral):\n levels = values + 1\n elif np.iterable(values) and len(values) == 1:\n levels = [values[0] - 1, values[0] + 1] # weird but why not\n elif np.iterable(values) and len(values) > 1:\n # Try to generate levels such that a LinearSegmentedNorm will\n # place values ticks at the center of each colorbar level.\n # utils.edges works only for evenly spaced values arrays.\n # We solve for: (x1 + x2)/2 = y --> x2 = 2*y - x1\n # with arbitrary starting point x1. We also start the algorithm\n # on the end with *smaller* differences.\n if norm is None or norm == 'segmented':\n reverse = abs(values[-1] - values[-2]) < abs(values[1] - values[0])\n if reverse:\n values = values[::-1]\n levels = [values[0] - (values[1] - values[0]) / 2]\n for val in values:\n levels.append(2 * val - levels[-1])\n if reverse:\n levels = levels[::-1]\n if any(np.sign(np.diff(levels)) != np.sign(levels[1] - levels[0])):\n levels = edges(values) # backup plan, weird tick locations\n # Generate levels by finding in-between points in the\n # normalized numeric space, e.g. LogNorm space.\n else:\n inorm = constructor.Norm(norm, **norm_kw)\n levels = inorm.inverse(edges(inorm(values)))\n elif values is not None:\n raise ValueError(\n f'Unexpected input values={values!r}. '\n 'Must be integer or list of numbers.'\n )\n\n # Get default normalizer\n # Only use LinearSegmentedNorm if necessary, because it is slow\n descending = False\n if np.iterable(levels):\n if len(levels) == 1:\n norm = mcolors.Normalize(vmin=levels[0] - 1, vmax=levels[0] + 1)\n else:\n levels, descending = pcolors._check_levels(levels)\n if len(levels) > 2 and norm is None:\n steps = np.abs(np.diff(levels))\n eps = np.mean(steps) / 1e3\n if np.any(np.abs(np.diff(steps)) >= eps):\n norm = 'segmented'\n if norm == 'segmented':\n if not np.iterable(levels):\n norm = 'linear' # same result with improved speed\n else:\n norm_kw['levels'] = levels\n norm = constructor.Norm(norm or 'linear', **norm_kw)\n\n # Use the locator to determine levels\n # Mostly copied from the hidden contour.ContourSet._autolev\n # NOTE: Subsequently, we *only* use the locator to determine ticks if\n # *levels* and *values* were not passed.\n if isinstance(norm, mcolors.BoundaryNorm):\n # Get levels from bounds\n # TODO: Test this feature?\n # NOTE: No warning because we get here internally?\n levels = norm.boundaries\n elif np.iterable(values):\n # Prefer ticks in center, but subsample if necessary\n locator = np.asarray(values)\n elif np.iterable(levels):\n # Prefer ticks on level edges, but subsample if necessary\n locator = np.asarray(levels)\n else:\n # Determine levels automatically\n levels, locator = _auto_levels_locator(\n data, N=levels, norm=norm, vmin=vmin, vmax=vmax, extend=extend, **kwargs\n )\n\n # Generate DiscreteNorm and update \"child\" norm with vmin and vmax from\n # levels. This lets the colorbar set tick locations properly!\n # TODO: Move these to DiscreteNorm?\n if not isinstance(norm, mcolors.BoundaryNorm) and len(levels) > 1:\n norm = pcolors.DiscreteNorm(\n levels, cmap=cmap, norm=norm, descending=descending, unique=extend,\n )\n if descending:\n cmap = cmap.reversed()\n return norm, cmap, levels, locator\n\n\n@warnings._rename_kwargs('0.6', centers='values')\n@docstring.add_snippets\ndef cmap_changer(\n self, func, *args, extend='neither',\n cmap=None, cmap_kw=None, norm=None, norm_kw=None,\n vmin=None, vmax=None, N=None, levels=None, values=None,\n symmetric=False, positive=False, negative=False, nozero=False,\n locator=None, locator_kw=None,\n edgefix=None, labels=False, labels_kw=None, fmt=None, precision=2,\n colorbar=False, colorbar_kw=None,\n lw=None, linewidth=None, linewidths=None,\n ls=None, linestyle=None, linestyles=None,\n color=None, colors=None, edgecolor=None, edgecolors=None,\n **kwargs\n):\n \"\"\"\n Adds several new keyword args and features for specifying the colormap,\n levels, and normalizers. Uses the `~proplot.colors.DiscreteNorm`\n normalizer to bin data into discrete color levels (see notes).\n\n Note\n ----\n This function wraps {methods}\n\n Parameters\n ----------\n extend : {{'neither', 'min', 'max', 'both'}}, optional\n Where to assign unique colors to out-of-bounds data and draw\n \"extensions\" (triangles, by default) on the colorbar.\n %(axes.cmap_changer)s\n edgefix : bool, optional\n Whether to fix the the `white-lines-between-filled-contours \\\n<https://stackoverflow.com/q/8263769/4970632>`__\n and `white-lines-between-pcolor-rectangles \\\n<https://stackoverflow.com/q/27092991/4970632>`__\n issues. This slows down figure rendering by a bit. Default is\n :rc:`image.edgefix`.\n labels : bool, optional\n For `~matplotlib.axes.Axes.contour`, whether to add contour labels\n with `~matplotlib.axes.Axes.clabel`. For `~matplotlib.axes.Axes.pcolor`\n or `~matplotlib.axes.Axes.pcolormesh`, whether to add labels to the\n center of grid boxes. In the latter case, the text will be black\n when the luminance of the underlying grid box color is >50%%, and\n white otherwise.\n labels_kw : dict-like, optional\n Ignored if `labels` is ``False``. Extra keyword args for the labels.\n For `~matplotlib.axes.Axes.contour`, passed to\n `~matplotlib.axes.Axes.clabel`. For `~matplotlib.axes.Axes.pcolor`\n or `~matplotlib.axes.Axes.pcolormesh`, passed to\n `~matplotlib.axes.Axes.text`.\n fmt : format-spec, optional\n Passed to the `~proplot.constructor.Norm` constructor, used to format\n number labels. You can also use the `precision` keyword arg.\n precision : int, optional\n Maximum number of decimal places for the number labels.\n Number labels are generated with the\n `~proplot.ticker.SimpleFormatter` formatter, which allows us to\n limit the precision.\n colorbar : bool, int, or str, optional\n If not ``None``, this is a location specifying where to draw an *inset*\n or *panel* colorbar from the resulting mappable. If ``True``, the\n default location is used. Valid locations are described in\n `~proplot.axes.Axes.colorbar`.\n colorbar_kw : dict-like, optional\n Ignored if `colorbar` is ``None``. Extra keyword args for our call\n to `~proplot.axes.Axes.colorbar`.\n\n Other parameters\n ----------------\n lw, linewidth, linewidths\n The width of `~matplotlib.axes.Axes.contour` lines and\n `~proplot.axes.Axes.parametric` lines. Also the width of lines\n *between* `~matplotlib.axes.Axes.pcolor` boxes,\n `~matplotlib.axes.Axes.pcolormesh` boxes, and\n `~matplotlib.axes.Axes.contourf` filled contours.\n ls, linestyle, linestyles\n As above, but for the line style.\n color, colors, edgecolor, edgecolors\n As above, but for the line color. For `~matplotlib.axes.Axes.contourf`\n plots, if you provide `colors` without specifying the `linewidths`\n or `linestyles`, this argument is used to manually specify the *fill\n colors*. See the `~matplotlib.axes.Axes.contourf` documentation for\n details.\n *args, **kwargs\n Passed to the matplotlib plotting method.\n\n See also\n --------\n standardize_2d\n proplot.constructor.Colormap\n proplot.constructor.Norm\n proplot.colors.DiscreteNorm\n \"\"\"\n name = func.__name__\n autoformat = rc['autoformat'] # possibly manipulated by standardize_[12]d\n if not args:\n return func(self, *args, **kwargs)\n\n # Mutable inputs\n cmap_kw = cmap_kw or {}\n norm_kw = norm_kw or {}\n labels_kw = labels_kw or {}\n locator_kw = locator_kw or {}\n colorbar_kw = colorbar_kw or {}\n norm_kw = norm_kw or {}\n\n # Flexible user input\n # NOTE: For now when drawing contour or contourf plots with no colormap,\n # cannot use 'values' to specify level centers or level center count.\n # NOTE: For now need to duplicate 'levels' parsing here and in\n # _build_discrete_norm so that it works with contour plots with no cmap.\n Z_sample = args[-1]\n edgefix = _not_none(edgefix, rc['image.edgefix'])\n linewidths = _not_none(lw=lw, linewidth=linewidth, linewidths=linewidths)\n linestyles = _not_none(ls=ls, linestyle=linestyle, linestyles=linestyles)\n colors = _not_none(\n color=color, colors=colors, edgecolor=edgecolor, edgecolors=edgecolors,\n )\n levels = _not_none(\n N=N, levels=levels, norm_kw_levels=norm_kw.pop('levels', None),\n default=rc['image.levels']\n )\n\n # Get colormap, but do not use cmap when 'colors' are passed to contour()\n # or to contourf() -- the latter only when 'linewidths' and 'linestyles'\n # are also *not* passed. This wrapper lets us add \"edges\" to contourf\n # plots by calling contour() after contourf() if 'linewidths' or\n # 'linestyles' are explicitly passed, but do not want to disable the\n # native matplotlib feature for manually coloring filled contours.\n # https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.contourf\n add_contours = (\n name in ('contourf', 'tricontourf')\n and (linewidths is not None or linestyles is not None)\n )\n has_cmap = colors is None or (\n name not in ('contour', 'tricontour')\n and (name not in ('contourf', 'tricontourf') or add_contours)\n )\n if not has_cmap:\n if cmap is not None:\n warnings._warn_proplot(\n f'Ignoring input colormap cmap={cmap!r}, using input colors '\n f'colors={colors!r} instead.'\n )\n cmap = None\n if name in ('contourf', 'tricontourf'):\n kwargs['colors'] = colors # this was not done above\n colors = None\n else:\n if cmap is None:\n if name == 'spy':\n cmap = pcolors.ListedColormap(['w', 'k'], '_binary')\n else:\n cmap = rc['image.cmap']\n cmap = constructor.Colormap(cmap, **cmap_kw)\n if getattr(cmap, '_cyclic', None) and extend != 'neither':\n warnings._warn_proplot(\n 'Cyclic colormap requires extend=\"neither\". '\n f'Overriding user input extend={extend!r}.'\n )\n extend = 'neither'\n\n # Translate standardized keyword arguments back into the keyword args\n # accepted by native matplotlib methods. Also disable edgefix if user want\n # to customize the \"edges\".\n style_kw = STYLE_ARGS_TRANSLATE.get(name, None)\n for key, value in (\n ('colors', colors),\n ('linewidths', linewidths),\n ('linestyles', linestyles)\n ):\n if value is None or add_contours:\n continue\n if not style_kw or key not in style_kw: # no known conversion table\n raise TypeError(f'{name}() got an unexpected keyword argument {key!r}')\n edgefix = False # disable edgefix when specifying borders!\n kwargs[style_kw[key]] = value\n\n # Build colormap normalizer and update keyword args\n # NOTE: Standard algorithm for obtaining default levels does not work\n # for hexbin, because it colors *counts*, not data values!\n ticks = None\n if not has_cmap and not np.iterable(levels):\n levels, _ = _auto_levels_locator(\n Z_sample, N=levels,\n norm=norm, norm_kw=norm_kw, locator=locator, locator_kw=locator_kw,\n vmin=vmin, vmax=vmax, extend=extend, symmetric=symmetric,\n )\n if has_cmap and name not in ('hexbin',):\n norm, cmap, levels, ticks = _build_discrete_norm(\n Z_sample, levels=levels, values=values, cmap=cmap,\n norm=norm, norm_kw=norm_kw, locator=locator, locator_kw=locator_kw,\n vmin=vmin, vmax=vmax, extend=extend,\n symmetric=symmetric, positive=positive, negative=negative, nozero=nozero,\n minlength=(1 if name in ('contour', 'tricontour') else 2),\n )\n if nozero and np.iterable(levels) and 0 in levels:\n levels = np.asarray(levels)\n levels = levels[levels != 0]\n if has_cmap:\n kwargs['cmap'] = cmap\n if norm is not None:\n kwargs['norm'] = norm\n if name in ('contour', 'contourf', 'tricontour', 'tricontourf'):\n kwargs['levels'] = levels\n kwargs['extend'] = extend\n if name in ('parametric',):\n kwargs['values'] = values\n\n # Call function, possibly twice to add 'edges' to contourf plot\n obj = func(self, *args, **kwargs)\n if not isinstance(obj, tuple): # hist2d\n obj.extend = extend # normally 'extend' is just for contour/contourf\n if ticks is not None:\n obj.ticks = ticks # a Locator or ndarray used for controlling ticks\n if add_contours:\n colors = _not_none(colors, 'k')\n self.contour(\n *args, levels=levels, linewidths=linewidths,\n linestyles=linestyles, colors=colors\n )\n\n # Apply labels\n # TODO: Add quiverkey to this!\n if labels:\n # Formatting for labels\n fmt = _not_none(labels_kw.pop('fmt', None), fmt, 'simple')\n fmt = constructor.Formatter(fmt, precision=precision)\n\n # Use clabel method\n if name in ('contour', 'contourf', 'tricontour', 'tricontourf'):\n cobj = obj\n colors = None\n if name in ('contourf', 'tricontourf'):\n lums = [to_xyz(cmap(norm(level)), 'hcl')[2] for level in levels]\n cobj = self.contour(*args, levels=levels, linewidths=0)\n colors = ['w' if lum < 50 else 'k' for lum in lums]\n text_kw = {}\n for key in (*labels_kw,): # allow dict to change size\n if key not in (\n 'levels', 'fontsize', 'colors', 'inline', 'inline_spacing',\n 'manual', 'rightside_up', 'use_clabeltext',\n ):\n text_kw[key] = labels_kw.pop(key)\n labels_kw.setdefault('colors', colors)\n labels_kw.setdefault('inline_spacing', 3)\n labels_kw.setdefault('fontsize', rc['text.labelsize'])\n labs = cobj.clabel(fmt=fmt, **labels_kw)\n for lab in labs:\n lab.update(text_kw)\n\n # Label each box manually\n # See: https://stackoverflow.com/a/20998634/4970632\n elif name in ('pcolor', 'pcolormesh', 'pcolorfast'):\n # Populate the _facecolors attribute, which is initially filled\n # with just a single color\n obj.update_scalarmappable()\n\n # Get text positions and colors\n labels_kw_ = {'size': rc['text.labelsize'], 'ha': 'center', 'va': 'center'}\n labels_kw_.update(labels_kw)\n array = obj.get_array()\n paths = obj.get_paths()\n colors = np.asarray(obj.get_facecolors())\n edgecolors = np.asarray(obj.get_edgecolors())\n if len(colors) == 1: # weird flex but okay\n colors = np.repeat(colors, len(array), axis=0)\n if len(edgecolors) == 1:\n edgecolors = np.repeat(edgecolors, len(array), axis=0)\n for i, (color, path, num) in enumerate(zip(colors, paths, array)):\n if not np.isfinite(num):\n edgecolors[i, :] = 0\n continue\n bbox = path.get_extents()\n x = (bbox.xmin + bbox.xmax) / 2\n y = (bbox.ymin + bbox.ymax) / 2\n if 'color' not in labels_kw:\n _, _, lum = to_xyz(color, 'hcl')\n if lum < 50:\n color = 'w'\n else:\n color = 'k'\n labels_kw_['color'] = color\n self.text(x, y, fmt(num), **labels_kw_)\n obj.set_edgecolors(edgecolors)\n else:\n raise RuntimeError(f'Not possible to add labels to {name!r} plot.')\n\n # Fix white lines between filled contours/mesh and fix issues with colormaps\n # that are not perfectly opaque. 0.4pt is thick enough to hide lines but thin\n # enough to not add \"dots\" in corner of pcolor plots.\n # See: https://github.com/jklymak/contourfIssues\n # See: https://stackoverflow.com/q/15003353/4970632\n if edgefix and name in (\n 'pcolor', 'pcolormesh', 'pcolorfast', 'tripcolor', 'contourf', 'tricontourf'\n ):\n cmap = obj.get_cmap()\n if not cmap._isinit:\n cmap._init()\n if all(cmap._lut[:-1, 3] == 1): # skip for cmaps with transparency\n edgecolor = 'face'\n else:\n edgecolor = 'none'\n if name in ('pcolor', 'pcolormesh', 'pcolorfast', 'tripcolor'):\n if hasattr(obj, 'set_linewidth'): # not always true for pcolorfast\n obj.set_linewidth(0.4)\n if hasattr(obj, 'set_edgecolor'): # not always true for pcolorfast\n obj.set_edgecolor(edgecolor)\n else:\n for contour in obj.collections:\n contour.set_edgecolor(edgecolor)\n contour.set_linewidth(0.4)\n contour.set_linestyle('-')\n\n # Optionally add colorbar\n if colorbar:\n loc = self._loc_translate(colorbar, 'colorbar', allow_manual=False)\n if autoformat:\n _, label = _axis_labels_title(Z_sample) # last one is data, we assume\n if label:\n colorbar_kw.setdefault('label', label)\n if name in ('parametric',) and values is not None:\n colorbar_kw.setdefault('values', values)\n if loc != 'fill':\n colorbar_kw.setdefault('loc', loc)\n self.colorbar(obj, **colorbar_kw)\n\n return obj\n\n\ndef _iter_legend_children(children):\n \"\"\"\n Iterate recursively through `_children` attributes of various `HPacker`,\n `VPacker`, and `DrawingArea` classes.\n \"\"\"\n for obj in children:\n if hasattr(obj, '_children'):\n yield from _iter_legend_children(obj._children)\n else:\n yield obj\n\n\ndef legend_wrapper(\n self, handles=None, labels=None, *, ncol=None, ncols=None,\n center=None, order='C', loc=None, label=None, title=None,\n fontsize=None, fontweight=None, fontcolor=None,\n color=None, marker=None, lw=None, linewidth=None,\n dashes=None, linestyle=None, markersize=None, frameon=None, frame=None,\n **kwargs\n):\n \"\"\"\n Adds useful features for controlling legends, including \"centered-row\"\n legends.\n\n Note\n ----\n This function wraps `proplot.axes.Axes.legend`\n and `proplot.figure.Figure.legend`.\n\n Parameters\n ----------\n handles : list of `~matplotlib.artist.Artist`, optional\n List of artists instances, or list of lists of artist instances (see\n the `center` keyword). If ``None``, the artists are retrieved with\n `~matplotlib.axes.Axes.get_legend_handles_labels`.\n labels : list of str, optional\n Matching list of string labels, or list of lists of string labels (see\n the `center` keywod). If ``None``, the labels are retrieved by calling\n `~matplotlib.artist.Artist.get_label` on each\n `~matplotlib.artist.Artist` in `handles`.\n ncol, ncols : int, optional\n The number of columns. `ncols` is an alias, added\n for consistency with `~matplotlib.pyplot.subplots`.\n order : {'C', 'F'}, optional\n Whether legend handles are drawn in row-major (``'C'``) or column-major\n (``'F'``) order. Analagous to `numpy.array` ordering. For some reason\n ``'F'`` was the original matplotlib default. Default is ``'C'``.\n center : bool, optional\n Whether to center each legend row individually. If ``True``, we\n actually draw successive single-row legends stacked on top of each\n other.\n\n If ``None``, we infer this setting from `handles`. Default is ``True``\n if `handles` is a list of lists; each sublist is used as a *row*\n in the legend. Otherwise, default is ``False``.\n loc : int or str, optional\n The legend location. The following location keys are valid:\n\n ================== ================================================\n Location Valid keys\n ================== ================================================\n \"best\" possible ``0``, ``'best'``, ``'b'``, ``'i'``, ``'inset'``\n upper right ``1``, ``'upper right'``, ``'ur'``\n upper left ``2``, ``'upper left'``, ``'ul'``\n lower left ``3``, ``'lower left'``, ``'ll'``\n lower right ``4``, ``'lower right'``, ``'lr'``\n center left ``5``, ``'center left'``, ``'cl'``\n center right ``6``, ``'center right'``, ``'cr'``\n lower center ``7``, ``'lower center'``, ``'lc'``\n upper center ``8``, ``'upper center'``, ``'uc'``\n center ``9``, ``'center'``, ``'c'``\n ================== ================================================\n\n label, title : str, optional\n The legend title. The `label` keyword is also accepted, for consistency\n with `colorbar`.\n fontsize, fontweight, fontcolor : optional\n The font size, weight, and color for legend text.\n color, lw, linewidth, marker, linestyle, dashes, markersize : \\\nproperty-spec, optional\n Properties used to override the legend handles. For example, if you\n want a legend that describes variations in line style ignoring\n variations in color, you might want to use ``color='k'``. For now this\n does not include `facecolor`, `edgecolor`, and `alpha`, because\n `~matplotlib.axes.Axes.legend` uses these keyword args to modify the\n frame properties.\n\n Other parameters\n ----------------\n **kwargs\n Passed to `~matplotlib.axes.Axes.legend`.\n \"\"\"\n # Parse input args\n # TODO: Legend entries for colormap or scatterplot objects! Idea is we\n # pass a scatter plot or contourf or whatever, and legend is generated by\n # drawing patch rectangles or markers using data values and their\n # corresponding cmap colors! For scatterplots just test get_facecolor()\n # to see if it contains more than one color.\n # TODO: It is *also* often desirable to label a colormap object with\n # one data value. Maybe add a legend option for the *number of samples*\n # or the *sample points* when drawing legends for colormap objects.\n # Look into \"legend handlers\", might just want to add own handlers by\n # passing handler_map to legend() and get_legend_handles_labels().\n if order not in ('F', 'C'):\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n ncol = _not_none(ncols=ncols, ncol=ncol)\n title = _not_none(label=label, title=title)\n frameon = _not_none(\n frame=frame, frameon=frameon, default=rc['legend.frameon']\n )\n if handles is not None and not np.iterable(handles): # e.g. a mappable object\n handles = [handles]\n if labels is not None and (not np.iterable(labels) or isinstance(labels, str)):\n labels = [labels]\n if title is not None:\n kwargs['title'] = title\n if frameon is not None:\n kwargs['frameon'] = frameon\n if fontsize is not None:\n kwargs['fontsize'] = rc._scale_font(fontsize)\n\n # Handle and text properties that are applied after-the-fact\n # NOTE: Set solid_capstyle to 'butt' so line does not extend past error bounds\n # shading in legend entry. This change is not noticable in other situations.\n kw_text = {}\n for key, value in (\n ('color', fontcolor),\n ('weight', fontweight),\n ):\n if value is not None:\n kw_text[key] = value\n kw_handle = {'solid_capstyle': 'butt'}\n for key, value in (\n ('color', color),\n ('marker', marker),\n ('linewidth', lw),\n ('linewidth', linewidth),\n ('markersize', markersize),\n ('linestyle', linestyle),\n ('dashes', dashes),\n ):\n if value is not None:\n kw_handle[key] = value\n\n # Legend box properties\n outline = rc.fill(\n {\n 'linewidth': 'axes.linewidth',\n 'edgecolor': 'axes.edgecolor',\n 'facecolor': 'axes.facecolor',\n 'alpha': 'legend.framealpha',\n }\n )\n for key in (*outline,):\n if key != 'linewidth':\n if kwargs.get(key, None):\n outline.pop(key, None)\n\n # Get axes for legend handle detection\n # TODO: Update this when no longer use \"filled panels\" for outer legends\n axs = [self]\n if self._panel_hidden:\n if self._panel_parent: # axes panel\n axs = list(self._panel_parent._iter_axes(hidden=False, children=True))\n else:\n axs = list(self.figure._iter_axes(hidden=False, children=True))\n\n # Handle list of lists (centered row legends)\n # NOTE: Avoid very common plot() error where users draw individual lines\n # with plot() and add singleton tuples to a list of handles. If matplotlib\n # gets a list like this but gets no 'labels' argument, it raises error.\n list_of_lists = False\n if handles is not None:\n handles = [\n handle[0] if type(handle) is tuple and len(handle) == 1 else handle\n for handle in handles\n ]\n list_of_lists = any(type(handle) in (list, np.ndarray) for handle in handles)\n if handles is not None and labels is not None and len(handles) != len(labels):\n raise ValueError(\n f'Got {len(handles)} handles and {len(labels)} labels.'\n )\n if list_of_lists:\n if any(not np.iterable(_) for _ in handles):\n raise ValueError(f'Invalid handles={handles!r}.')\n if not labels:\n labels = [None] * len(handles)\n elif not all(np.iterable(_) and not isinstance(_, str) for _ in labels):\n # e.g. handles=[obj1, [obj2, obj3]] requires labels=[lab1, [lab2, lab3]]\n raise ValueError(\n f'Invalid labels={labels!r} for handles={handles!r}.'\n )\n\n # Parse handles and legends with native matplotlib parser\n if not list_of_lists:\n if isinstance(handles, np.ndarray):\n handles = handles.tolist()\n if isinstance(labels, np.ndarray):\n labels = labels.tolist()\n handles, labels, *_ = mlegend._parse_legend_args(\n axs, handles=handles, labels=labels,\n )\n pairs = list(zip(handles, labels))\n else:\n pairs = []\n for ihandles, ilabels in zip(handles, labels):\n if isinstance(ihandles, np.ndarray):\n ihandles = ihandles.tolist()\n if isinstance(ilabels, np.ndarray):\n ilabels = ilabels.tolist()\n ihandles, ilabels, *_ = mlegend._parse_legend_args(\n axs, handles=ihandles, labels=ilabels,\n )\n pairs.append(list(zip(handles, labels)))\n\n # Manage pairs in context of 'center' option\n center = _not_none(center, list_of_lists)\n if not center and list_of_lists: # standardize format based on input\n list_of_lists = False # no longer is list of lists\n pairs = [pair for ipairs in pairs for pair in ipairs]\n elif center and not list_of_lists:\n list_of_lists = True\n ncol = _not_none(ncol, 3)\n pairs = [\n pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs))\n ] # to list of iterables\n ncol = None\n if list_of_lists: # remove empty lists, pops up in some examples\n pairs = [ipairs for ipairs in pairs if ipairs]\n\n # Bail if no pairs\n if not pairs:\n return mlegend.Legend(self, [], [], ncol=ncol, loc=loc, **kwargs)\n\n # Individual legend\n legs = []\n width, height = self.get_size_inches()\n if not center:\n # Optionally change order\n # See: https://stackoverflow.com/q/10101141/4970632\n # Example: If 5 columns, but final row length 3, columns 0-2 have\n # N rows but 3-4 have N-1 rows.\n ncol = _not_none(ncol, 3)\n if order == 'C':\n split = [ # split into rows\n pairs[i * ncol:(i + 1) * ncol]\n for i in range(len(pairs) // ncol + 1)\n ]\n nrowsmax = len(split) # max possible row count\n nfinalrow = len(split[-1]) # columns in final row\n nrows = (\n [nrowsmax] * nfinalrow + [nrowsmax - 1] * (ncol - nfinalrow)\n )\n fpairs = []\n for col, nrow in enumerate(nrows): # iterate through cols\n fpairs.extend(split[row][col] for row in range(nrow))\n pairs = fpairs\n\n # Draw legend\n leg = mlegend.Legend(self, *zip(*pairs), ncol=ncol, loc=loc, **kwargs)\n legs = [leg]\n\n # Legend with centered rows, accomplished by drawing separate legends for\n # each row. The label spacing/border spacing will be exactly replicated.\n else:\n # Message when overriding some properties\n overridden = []\n kwargs.pop('frameon', None) # then add back later!\n for override in ('bbox_transform', 'bbox_to_anchor'):\n prop = kwargs.pop(override, None)\n if prop is not None:\n overridden.append(override)\n if ncol is not None:\n warnings._warn_proplot(\n 'Detected list of *lists* of legend handles. '\n 'Ignoring user input property \"ncol\".'\n )\n if overridden:\n warnings._warn_proplot(\n 'Ignoring user input properties '\n + ', '.join(map(repr, overridden))\n + ' for centered-row legend.'\n )\n\n # Determine space we want sub-legend to occupy as fraction of height\n # NOTE: Empirical testing shows spacing fudge factor necessary to\n # exactly replicate the spacing of standard aligned legends.\n fontsize = kwargs.get('fontsize', None) or rc['legend.fontsize']\n fontsize = rc._scale_font(fontsize)\n spacing = kwargs.get('labelspacing', None) or rc['legend.labelspacing']\n if pairs:\n interval = 1 / len(pairs) # split up axes\n interval = (((1 + spacing * 0.85) * fontsize) / 72) / height\n\n # Iterate and draw\n # NOTE: We confine possible bounding box in *y*-direction, but do not\n # confine it in *x*-direction. Matplotlib will automatically move\n # left-to-right if you request this.\n ymin, ymax = None, None\n if order == 'F':\n raise NotImplementedError(\n 'When center=True, ProPlot vertically stacks successive '\n 'single-row legends. Column-major (order=\"F\") ordering '\n 'is un-supported.'\n )\n loc = _not_none(loc, 'upper center')\n if not isinstance(loc, str):\n raise ValueError(\n f'Invalid location {loc!r} for legend with center=True. '\n 'Must be a location *string*.'\n )\n elif loc == 'best':\n warnings._warn_proplot(\n 'For centered-row legends, cannot use \"best\" location. '\n 'Using \"upper center\" instead.'\n )\n\n # Iterate through sublists\n for i, ipairs in enumerate(pairs):\n if i == 1:\n kwargs.pop('title', None)\n if i >= 1 and title is not None:\n i += 1 # extra space!\n\n # Legend position\n if 'upper' in loc:\n y1 = 1 - (i + 1) * interval\n y2 = 1 - i * interval\n elif 'lower' in loc:\n y1 = (len(pairs) + i - 2) * interval\n y2 = (len(pairs) + i - 1) * interval\n else: # center\n y1 = 0.5 + interval * len(pairs) / 2 - (i + 1) * interval\n y2 = 0.5 + interval * len(pairs) / 2 - i * interval\n ymin = min(y1, _not_none(ymin, y1))\n ymax = max(y2, _not_none(ymax, y2))\n\n # Draw legend\n bbox = mtransforms.Bbox([[0, y1], [1, y2]])\n leg = mlegend.Legend(\n self, *zip(*ipairs), loc=loc, ncol=len(ipairs),\n bbox_transform=self.transAxes, bbox_to_anchor=bbox,\n frameon=False, **kwargs\n )\n legs.append(leg)\n\n # Add legends manually so matplotlib does not remove old ones\n for leg in legs:\n self.add_artist(leg)\n leg.legendPatch.update(outline) # or get_frame()\n\n # Apply *overrides* to legend elements\n # WARNING: legendHandles only contains the *first* artist per legend because\n # HandlerBase.legend_artist() called in Legend._init_legend_box() only\n # returns the first artist. Instead we try to iterate through offset boxes.\n # TODO: Remove this feature? Idea was this lets users create *categorical*\n # legends in clunky way, e.g. entries denoting *colors* and entries denoting\n # *markers*. But would be better to add capacity for categorical labels in a\n # *single* legend like seaborn rather than multiple legends.\n for leg in legs:\n try:\n children = leg._legend_handle_box._children\n except AttributeError: # older versions maybe?\n children = []\n for obj in _iter_legend_children(children):\n # account for mixed legends, e.g. line on top of\n # error bounds shading.\n if isinstance(obj, mtext.Text):\n leg.update(kw_text)\n else:\n for key, value in kw_handle.items():\n getattr(obj, f'set_{key}', lambda value: None)(value)\n\n # Draw manual fancy bounding box for un-aligned legend\n # WARNING: The matplotlib legendPatch transform is the default transform,\n # i.e. universal coordinates in points. Means we have to transform\n # mutation scale into transAxes sizes.\n # WARNING: Tempting to use legendPatch for everything but for some reason\n # coordinates are messed up. In some tests all coordinates were just result\n # of get window extent multiplied by 2 (???). Anyway actual box is found in\n # _legend_box attribute, which is accessed by get_window_extent.\n if center and frameon:\n if len(legs) == 1:\n # Use builtin frame\n legs[0].set_frame_on(True)\n else:\n # Get coordinates\n renderer = self.figure._get_renderer()\n bboxs = [\n leg.get_window_extent(renderer).transformed(self.transAxes.inverted())\n for leg in legs\n ]\n xmin = min(bbox.xmin for bbox in bboxs)\n xmax = max(bbox.xmax for bbox in bboxs)\n ymin = min(bbox.ymin for bbox in bboxs)\n ymax = max(bbox.ymax for bbox in bboxs)\n fontsize = (fontsize / 72) / width # axes relative units\n fontsize = renderer.points_to_pixels(fontsize)\n\n # Draw and format patch\n patch = mpatches.FancyBboxPatch(\n (xmin, ymin), xmax - xmin, ymax - ymin,\n snap=True, zorder=4.5,\n mutation_scale=fontsize,\n transform=self.transAxes\n )\n if kwargs.get('fancybox', rc['legend.fancybox']):\n patch.set_boxstyle('round', pad=0, rounding_size=0.2)\n else:\n patch.set_boxstyle('square', pad=0)\n patch.set_clip_on(False)\n patch.update(outline)\n self.add_artist(patch)\n\n # Add shadow\n # TODO: This does not work, figure out\n if kwargs.get('shadow', rc['legend.shadow']):\n shadow = mpatches.Shadow(patch, 20, -20)\n self.add_artist(shadow)\n\n # Add patch to list\n legs = (patch, *legs)\n\n # Append attributes and return, and set clip property!!! This is critical\n # for tight bounding box calcs!\n for leg in legs:\n leg.set_clip_on(False)\n return legs[0] if len(legs) == 1 else tuple(legs)\n\n\ndef colorbar_wrapper(\n self, mappable, values=None,\n extend=None, extendsize=None,\n title=None, label=None,\n grid=None, tickminor=None,\n reverse=False, tickloc=None, ticklocation=None,\n locator=None, ticks=None, maxn=None, maxn_minor=None,\n minorlocator=None, minorticks=None,\n locator_kw=None, minorlocator_kw=None,\n formatter=None, ticklabels=None, formatter_kw=None, rotation=None,\n norm=None, norm_kw=None, # normalizer to use when passing colors/lines\n orientation='horizontal',\n edgecolor=None, linewidth=None,\n labelsize=None, labelweight=None, labelcolor=None,\n ticklabelsize=None, ticklabelweight=None, ticklabelcolor=None,\n **kwargs\n):\n \"\"\"\n Adds useful features for controlling colorbars.\n\n Note\n ----\n This function wraps `proplot.axes.Axes.colorbar`\n and `proplot.figure.Figure.colorbar`.\n\n Parameters\n ----------\n mappable : mappable, list of plot handles, list of color-spec, \\\nor colormap-spec\n There are four options here:\n\n 1. A mappable object. Basically, any object with a ``get_cmap`` method,\n like the objects returned by `~matplotlib.axes.Axes.contourf` and\n `~matplotlib.axes.Axes.pcolormesh`.\n 2. A list of \"plot handles\". Basically, any object with a ``get_color``\n method, like `~matplotlib.lines.Line2D` instances. A colormap will\n be generated from the colors of these objects, and colorbar levels\n will be selected using `values`. If `values` is ``None``, we try\n to infer them by converting the handle labels returned by\n `~matplotlib.artist.Artist.get_label` to `float`. Otherwise, it is\n set to ``np.linspace(0, 1, len(mappable))``.\n 3. A list of hex strings, color string names, or RGB tuples. A colormap\n will be generated from these colors, and colorbar levels will be\n selected using `values`. If `values` is ``None``, it is set to\n ``np.linspace(0, 1, len(mappable))``.\n 4. A `~matplotlib.colors.Colormap` instance. In this case, a colorbar\n will be drawn using this colormap and with levels determined by\n `values`. If `values` is ``None``, it is set to\n ``np.linspace(0, 1, cmap.N)``.\n\n values : list of float, optional\n Ignored if `mappable` is a mappable object. This maps each color or\n plot handle in the `mappable` list to numeric values, from which a\n colormap and normalizer are constructed.\n norm : normalizer spec, optional\n Ignored if `values` is ``None``. The normalizer for converting `values`\n to colormap colors. Passed to `~proplot.constructor.Norm`.\n norm_kw : dict-like, optional\n The normalizer settings. Passed to `~proplot.constructor.Norm`.\n extend : {None, 'neither', 'both', 'min', 'max'}, optional\n Direction for drawing colorbar \"extensions\" (i.e. references to\n out-of-bounds data with a unique color). These are triangles by\n default. If ``None``, we try to use the ``extend`` attribute on the\n mappable object. If the attribute is unavailable, we use ``'neither'``.\n extendsize : float or str, optional\n The length of the colorbar \"extensions\" in *physical units*.\n If float, units are inches. If string, units are interpreted\n by `~proplot.utils.units`. Default is :rc:`colorbar.insetextend`\n for inset colorbars and :rc:`colorbar.extend` for outer colorbars.\n reverse : bool, optional\n Whether to reverse the direction of the colorbar.\n tickloc, ticklocation : {'bottom', 'top', 'left', 'right'}, optional\n Where to draw tick marks on the colorbar.\n tickminor : bool, optional\n Whether to add minor ticks to the colorbar with\n `~matplotlib.colorbar.ColorbarBase.minorticks_on`.\n grid : bool, optional\n Whether to draw \"gridlines\" between each level of the colorbar.\n Default is :rc:`colorbar.grid`.\n label, title : str, optional\n The colorbar label. The `title` keyword is also accepted for\n consistency with `legend`.\n locator, ticks : locator spec, optional\n Used to determine the colorbar tick positions. Passed to the\n `~proplot.constructor.Locator` constructor.\n maxn : int, optional\n Used if `locator` is ``None``. Determines the maximum number of levels\n that are ticked. Default depends on the colorbar length relative\n to the font size. The keyword name \"maxn\" is meant to mimic\n the `~matplotlib.ticker.MaxNLocator` class name.\n locator_kw : dict-like, optional\n The locator settings. Passed to `~proplot.constructor.Locator`.\n minorlocator, minorticks, maxn_minor, minorlocator_kw\n As with `locator`, `maxn`, and `locator_kw`, but for the minor ticks.\n formatter, ticklabels : formatter spec, optional\n The tick label format. Passed to the `~proplot.constructor.Formatter`\n constructor.\n formatter_kw : dict-like, optional\n The formatter settings. Passed to `~proplot.constructor.Formatter`.\n rotation : float, optional\n The tick label rotation. Default is ``0``.\n edgecolor, linewidth : optional\n The edge color and line width for the colorbar outline.\n labelsize, labelweight, labelcolor : optional\n The font size, weight, and color for colorbar label text.\n ticklabelsize, ticklabelweight, ticklabelcolor : optional\n The font size, weight, and color for colorbar tick labels.\n orientation : {{'horizontal', 'vertical'}}, optional\n The colorbar orientation. You should not have to explicitly set this.\n\n Other parameters\n ----------------\n **kwargs\n Passed to `~matplotlib.figure.Figure.colorbar`.\n \"\"\"\n # NOTE: There is a weird problem with colorbars when simultaneously\n # passing levels and norm object to a mappable; fixed by passing vmin/vmax\n # instead of levels. (see: https://stackoverflow.com/q/40116968/4970632).\n # NOTE: Often want levels instead of vmin/vmax, while simultaneously\n # using a Normalize (for example) to determine colors between the levels\n # (see: https://stackoverflow.com/q/42723538/4970632). Workaround makes\n # sure locators are in vmin/vmax range exclusively; cannot match values.\n # NOTE: In legend_wrapper() we try to add to the objects accepted by\n # legend() using handler_map. We can't really do anything similar for\n # colorbars; input must just be insnace of mixin class cm.ScalarMappable\n # Mutable args\n norm_kw = norm_kw or {}\n formatter_kw = formatter_kw or {}\n locator_kw = locator_kw or {}\n minorlocator_kw = minorlocator_kw or {}\n\n # Parse input args\n label = _not_none(title=title, label=label)\n locator = _not_none(ticks=ticks, locator=locator)\n minorlocator = _not_none(minorticks=minorticks, minorlocator=minorlocator)\n ticklocation = _not_none(tickloc=tickloc, ticklocation=ticklocation)\n formatter = _not_none(ticklabels=ticklabels, formatter=formatter)\n\n # Colorbar kwargs\n # WARNING: PathCollection scatter objects have an extend method!\n # WARNING: Matplotlib 3.3 deprecated 'extend' parameter passed to colorbar()\n # but *also* fails to read 'extend' parameter when added to a pcolor mappable!\n # Need to figure out workaround!\n grid = _not_none(grid, rc['colorbar.grid'])\n if extend is None:\n if isinstance(getattr(mappable, 'extend', None), str):\n extend = mappable.extend or 'neither'\n else:\n extend = 'neither'\n kwargs.update({\n 'cax': self,\n 'use_gridspec': True,\n 'orientation': orientation,\n 'spacing': 'uniform',\n 'extend': extend,\n })\n kwargs.setdefault('drawedges', grid)\n\n # Text property keyword args\n kw_label = {}\n for key, value in (\n ('size', labelsize),\n ('weight', labelweight),\n ('color', labelcolor),\n ):\n if value is not None:\n kw_label[key] = value\n kw_ticklabels = {}\n for key, value in (\n ('size', ticklabelsize),\n ('weight', ticklabelweight),\n ('color', ticklabelcolor),\n ('rotation', rotation),\n ):\n if value is not None:\n kw_ticklabels[key] = value\n\n # Special case where auto colorbar is generated from 1d methods, a list is\n # always passed, but some 1d methods (scatter) do have colormaps.\n if (\n np.iterable(mappable)\n and len(mappable) == 1\n and hasattr(mappable[0], 'get_cmap')\n ):\n mappable = mappable[0]\n\n # For container objects, we just assume color is the same for every item.\n # Works for ErrorbarContainer, StemContainer, BarContainer.\n if (\n np.iterable(mappable)\n and len(mappable) > 0\n and all(isinstance(obj, mcontainer.Container) for obj in mappable)\n ):\n mappable = [obj[0] for obj in mappable]\n\n # Test if we were given a mappable, or iterable of stuff; note Container\n # and PolyCollection matplotlib classes are iterable.\n cmap = None\n if not isinstance(mappable, (martist.Artist, mcontour.ContourSet)):\n # A colormap instance\n # TODO: Pass remaining arguments through Colormap()? This is really\n # niche usage so maybe not necessary.\n if isinstance(mappable, mcolors.Colormap):\n # NOTE: 'Values' makes no sense if this is just a colormap. Just\n # use unique color for every segmentdata / colors color.\n cmap = mappable\n values = np.linspace(0, 1, cmap.N)\n\n # List of colors\n elif np.iterable(mappable) and all(\n isinstance(obj, str) or (np.iterable(obj) and len(obj) in (3, 4))\n for obj in mappable\n ):\n colors = list(mappable)\n cmap = mcolors.ListedColormap(colors, '_no_name')\n if values is None:\n values = np.arange(len(colors))\n locator = _not_none(locator, values) # tick *all* values by default\n\n # List of artists\n # NOTE: Do not check for isinstance(Artist) in case it is an mpl collection\n elif np.iterable(mappable) and all(\n hasattr(obj, 'get_color') or hasattr(obj, 'get_facecolor')\n for obj in mappable\n ):\n # Generate colormap from colors and infer tick labels\n colors = []\n for obj in mappable:\n if hasattr(obj, 'get_color'):\n color = obj.get_color()\n else:\n color = obj.get_facecolor()\n if isinstance(color, np.ndarray):\n color = color.squeeze() # e.g. scatter plot\n if color.ndim != 1:\n raise ValueError(\n 'Cannot make colorbar from list of artists '\n f'with more than one color: {color!r}.'\n )\n colors.append(to_rgb(color))\n cmap = mcolors.ListedColormap(colors, '_no_name')\n\n # Try to infer tick values and tick labels from Artist labels\n if values is None:\n # Get object labels and values (avoid overwriting colorbar 'label')\n labs = []\n values = []\n for obj in mappable:\n lab = value = None\n if hasattr(obj, 'get_label'):\n lab = obj.get_label() or None\n if lab and lab[:1] == '_': # intended to be ignored by legend\n lab = None\n if lab:\n try:\n value = float(lab)\n except (TypeError, ValueError):\n pass\n labs.append(lab)\n values.append(value)\n # Use default values if labels are non-numeric (numeric labels are\n # common when making on-the-fly colorbars). Try to use object labels\n # for ticks with default vertical rotation, like datetime axes.\n if any(value is None for value in values):\n values = np.arange(len(mappable))\n if formatter is None and any(lab is not None for lab in labs):\n formatter = labs # use these fixed values for ticks\n if orientation == 'horizontal':\n kw_ticklabels.setdefault('rotation', 90)\n locator = _not_none(locator, values) # tick *all* values by default\n\n else:\n raise ValueError(\n 'Input mappable must be a matplotlib artist, '\n 'list of objects, list of colors, or colormap. '\n f'Got {mappable!r}.'\n )\n\n # Build ad hoc ScalarMappable object from colors\n if cmap is not None:\n if np.iterable(mappable) and len(values) != len(mappable):\n raise ValueError(\n f'Passed {len(values)} values, but only {len(mappable)} '\n f'objects or colors.'\n )\n norm, *_ = _build_discrete_norm(\n values=values, extend='neither',\n cmap=cmap, norm=norm, norm_kw=norm_kw,\n )\n mappable = mcm.ScalarMappable(norm, cmap)\n\n # Try to get tick locations from *levels* or from *values* rather than\n # random points along the axis.\n # NOTE: Do not necessarily want e.g. minor tick locations at logminor\n # for LogNorm! In _build_discrete_norm we sometimes select evenly spaced\n # levels in log-space *between* powers of 10, so logminor ticks would be\n # misaligned with levels.\n if locator is None:\n locator = getattr(mappable, 'ticks', None)\n if locator is None:\n # This should only happen if user calls plotting method on native\n # matplotlib axes.\n if isinstance(norm, mcolors.LogNorm):\n locator = 'log'\n elif isinstance(norm, mcolors.SymLogNorm):\n locator = 'symlog'\n locator_kw.setdefault('linthresh', norm.linthresh)\n else:\n locator = 'auto'\n\n elif not isinstance(locator, mticker.Locator):\n # Get default maxn, try to allot 2em squares per label maybe?\n # NOTE: Cannot use Axes.get_size_inches because this is a\n # native matplotlib axes\n width, height = self.figure.get_size_inches()\n if orientation == 'horizontal':\n scale = 3 # em squares alotted for labels\n length = width * abs(self.get_position().width)\n fontsize = kw_ticklabels.get('size', rc['xtick.labelsize'])\n else:\n scale = 1\n length = height * abs(self.get_position().height)\n fontsize = kw_ticklabels.get('size', rc['ytick.labelsize'])\n fontsize = rc._scale_font(fontsize)\n maxn = _not_none(maxn, int(length / (scale * fontsize / 72)))\n maxn_minor = _not_none(\n maxn_minor, int(length / (0.5 * fontsize / 72))\n )\n\n # Get locator\n if tickminor and minorlocator is None:\n step = 1 + len(locator) // max(1, maxn_minor)\n minorlocator = locator[::step]\n step = 1 + len(locator) // max(1, maxn)\n locator = locator[::step]\n\n # Get extend triangles in physical units\n width, height = self.figure.get_size_inches()\n if orientation == 'horizontal':\n scale = width * abs(self.get_position().width)\n else:\n scale = height * abs(self.get_position().height)\n extendsize = units(_not_none(extendsize, rc['colorbar.extend']))\n extendsize = extendsize / (scale - 2 * extendsize)\n\n # Draw the colorbar\n # NOTE: Set default formatter here because we optionally apply a FixedFormatter\n # using *labels* from handle input.\n locator = constructor.Locator(locator, **locator_kw)\n formatter = constructor.Formatter(_not_none(formatter, 'auto'), **formatter_kw)\n kwargs.update({\n 'ticks': locator,\n 'format': formatter,\n 'ticklocation': ticklocation,\n 'extendfrac': extendsize\n })\n mappable.extend = extend # matplotlib >=3.3\n cb = self.figure.colorbar(mappable, **kwargs)\n axis = self.xaxis if orientation == 'horizontal' else self.yaxis\n\n # The minor locator\n # TODO: Document the improved minor locator functionality!\n # NOTE: Colorbar._use_auto_colorbar_locator() is never True because we use\n # the custom DiscreteNorm normalizer. Colorbar._ticks() always called.\n if minorlocator is None:\n if tickminor:\n cb.minorticks_on()\n else:\n cb.minorticks_off()\n elif not hasattr(cb, '_ticker'):\n warnings._warn_proplot(\n 'Matplotlib colorbar API has changed. '\n f'Cannot use custom minor tick locator {minorlocator!r}.'\n )\n cb.minorticks_on() # at least turn them on\n else:\n # Set the minor ticks just like matplotlib internally sets the\n # major ticks. Private API is the only way!\n minorlocator = constructor.Locator(minorlocator, **minorlocator_kw)\n ticks, *_ = cb._ticker(minorlocator, mticker.NullFormatter())\n axis.set_ticks(ticks, minor=True)\n axis.set_ticklabels([], minor=True)\n\n # Label and tick label settings\n # WARNING: Must use colorbar set_label to set text, calling set_text on\n # the axis will do nothing!\n if label is not None:\n cb.set_label(label)\n axis.label.update(kw_label)\n for obj in axis.get_ticklabels():\n obj.update(kw_ticklabels)\n\n # Ticks\n xy = axis.axis_name\n for which in ('minor', 'major'):\n kw = rc.category(xy + 'tick.' + which)\n kw.pop('visible', None)\n if edgecolor:\n kw['color'] = edgecolor\n if linewidth:\n kw['width'] = linewidth\n axis.set_tick_params(which=which, **kw)\n axis.set_ticks_position(ticklocation)\n\n # Fix alpha-blending issues.\n # Cannot set edgecolor to 'face' if alpha non-zero because blending will\n # occur, will get colored lines instead of white ones. Need manual blending\n # NOTE: For some reason cb solids uses listed colormap with always 1.0\n # alpha, then alpha is applied after.\n # See: https://stackoverflow.com/a/35672224/4970632\n cmap = cb.cmap\n if not cmap._isinit:\n cmap._init()\n if any(cmap._lut[:-1, 3] < 1):\n warnings._warn_proplot(\n f'Using manual alpha-blending for {cmap.name!r} colorbar solids.'\n )\n # Generate \"secret\" copy of the colormap!\n lut = cmap._lut.copy()\n cmap = mcolors.Colormap('_cbar_fix', N=cmap.N)\n cmap._isinit = True\n cmap._init = lambda: None\n # Manually fill lookup table with alpha-blended RGB colors!\n for i in range(lut.shape[0] - 1):\n alpha = lut[i, 3]\n lut[i, :3] = (1 - alpha) * 1 + alpha * lut[i, :3] # blend *white*\n lut[i, 3] = 1\n cmap._lut = lut\n # Update colorbar\n cb.cmap = cmap\n cb.draw_all()\n\n # Fix colorbar outline\n kw_outline = {\n 'edgecolor': _not_none(edgecolor, rc['axes.edgecolor']),\n 'linewidth': _not_none(linewidth, rc['axes.linewidth']),\n }\n if cb.outline is not None:\n cb.outline.update(kw_outline)\n if cb.dividers is not None:\n cb.dividers.update(kw_outline)\n\n # *Never* rasterize because it causes misalignment with border lines\n if cb.solids:\n cb.solids.set_rasterized(False)\n cb.solids.set_linewidth(0.4)\n cb.solids.set_edgecolor('face')\n\n # Invert the axis if descending DiscreteNorm\n norm = mappable.norm\n if getattr(norm, '_descending', None):\n axis.set_inverted(True)\n if reverse: # potentially double reverse, although that would be weird...\n axis.set_inverted(True)\n return cb\n\n\ndef _basemap_redirect(func):\n \"\"\"\n Docorator that calls the basemap version of the function of the\n same name. This must be applied as the innermost decorator.\n \"\"\"\n name = func.__name__\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if getattr(self, 'name', '') == 'basemap':\n return getattr(self.projection, name)(*args, ax=self, **kwargs)\n else:\n return func(self, *args, **kwargs)\n wrapper.__doc__ = None\n return wrapper\n\n\ndef _basemap_norecurse(func):\n \"\"\"\n Decorator to prevent recursion in basemap method overrides.\n See `this post https://stackoverflow.com/a/37675810/4970632`__.\n \"\"\"\n name = func.__name__\n func._called_from_basemap = False\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func._called_from_basemap:\n result = getattr(maxes.Axes, name)(self, *args, **kwargs)\n else:\n with _state_context(func, _called_from_basemap=True):\n result = func(self, *args, **kwargs)\n return result\n return wrapper\n\n\ndef _generate_decorator(driver):\n \"\"\"\n Generate generic wrapper decorator and dynamically modify the docstring\n to list methods wrapped by this function. Also set `__doc__` to ``None`` so\n that ProPlot fork of automodapi doesn't add these methods to the website\n documentation. Users can still call help(ax.method) because python looks\n for superclass method docstrings if a docstring is empty.\n \"\"\"\n driver._docstring_orig = driver.__doc__ or ''\n driver._methods_wrapped = []\n proplot_methods = ('parametric', 'heatmap', 'area', 'areax')\n\n def decorator(func):\n # Define wrapper and suppress documentation\n # We only document wrapper functions, not the methods they wrap\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return driver(self, func, *args, **kwargs)\n name = func.__name__\n if name not in proplot_methods:\n wrapper.__doc__ = None\n\n # List wrapped methods in the driver function docstring\n # Prevents us from having to both explicitly apply decorators in\n # axes.py and explicitly list functions *again* in this file\n docstring = driver._docstring_orig\n if '{methods}' in docstring:\n if name in proplot_methods:\n link = f'`~proplot.axes.Axes.{name}`'\n else:\n link = f'`~matplotlib.axes.Axes.{name}`'\n methods = driver._methods_wrapped\n if link not in methods:\n methods.append(link)\n string = (\n ', '.join(methods[:-1])\n + ',' * int(len(methods) > 2) # Oxford comma bitches\n + ' and ' * int(len(methods) > 1)\n + methods[-1]\n )\n driver.__doc__ = docstring.format(methods=string)\n return wrapper\n return decorator\n\n\n# Auto generated decorators. Each wrapper internally calls func(self, ...) somewhere.\n_bar_wrapper = _generate_decorator(bar_wrapper)\n_barh_wrapper = _generate_decorator(barh_wrapper)\n_default_latlon = _generate_decorator(default_latlon)\n_boxplot_wrapper = _generate_decorator(boxplot_wrapper)\n_default_transform = _generate_decorator(default_transform)\n_cmap_changer = _generate_decorator(cmap_changer)\n_cycle_changer = _generate_decorator(cycle_changer)\n_fill_between_wrapper = _generate_decorator(fill_between_wrapper)\n_fill_betweenx_wrapper = _generate_decorator(fill_betweenx_wrapper)\n_hist_wrapper = _generate_decorator(hist_wrapper)\n_hlines_wrapper = _generate_decorator(hlines_wrapper)\n_indicate_error = _generate_decorator(indicate_error)\n_parametric_wrapper = _generate_decorator(parametric_wrapper)\n_plot_wrapper = _generate_decorator(plot_wrapper)\n_scatter_wrapper = _generate_decorator(scatter_wrapper)\n_standardize_1d = _generate_decorator(standardize_1d)\n_standardize_2d = _generate_decorator(standardize_2d)\n_stem_wrapper = _generate_decorator(stem_wrapper)\n_text_wrapper = _generate_decorator(text_wrapper)\n_violinplot_wrapper = _generate_decorator(violinplot_wrapper)\n_vlines_wrapper = _generate_decorator(vlines_wrapper)\n"
] |
[
[
"matplotlib.cm.ScalarMappable",
"numpy.meshgrid",
"matplotlib.patches.FancyBboxPatch",
"matplotlib.ticker.IndexFormatter",
"numpy.min",
"numpy.mean",
"numpy.where",
"numpy.sign",
"numpy.ma.concatenate",
"numpy.issubdtype",
"matplotlib.colors.Colormap",
"numpy.max",
"numpy.concatenate",
"matplotlib.patheffects.Normal",
"numpy.empty",
"matplotlib.ticker.MaxNLocator",
"matplotlib.ticker.LogLocator",
"numpy.asscalar",
"numpy.arange",
"numpy.isfinite",
"numpy.ma.append",
"numpy.array",
"numpy.percentile",
"numpy.roll",
"numpy.ma.masked_invalid",
"matplotlib.colors.ListedColormap",
"numpy.diff",
"numpy.ma.is_masked",
"numpy.std",
"numpy.timedelta64",
"matplotlib.ticker.NullFormatter",
"matplotlib.transforms.Bbox",
"matplotlib.legend.Legend",
"numpy.iterable",
"numpy.asarray",
"numpy.errstate",
"matplotlib.legend._parse_legend_args",
"matplotlib.ticker.SymmetricalLogLocator",
"matplotlib.patches.Shadow",
"numpy.atleast_1d",
"numpy.ma.array",
"matplotlib.colors.Normalize",
"numpy.ravel",
"numpy.repeat",
"matplotlib.ticker.NullLocator",
"numpy.linspace",
"matplotlib.ticker.FixedLocator",
"matplotlib.patheffects.Stroke"
]
] |
hotosm/ml-enabler-cli
|
[
"287d929fb0cf5be41100bdff41261ef99ded0ceb"
] |
[
"ml_enabler/utils/osm.py"
] |
[
"from area import area\nfrom geopy.distance import great_circle\nimport json\nimport numpy as np\nimport os\nimport subprocess\nimport tempfile\n\n\ndef get_osm(aoi):\n # convert AOI to bounding box\n pass\n\n\nclass OSMData(object):\n\n def __init__(self, aoi, overpass_url='https://lz4.overpass-api.de/api/interpreter'):\n self.aoi = aoi\n self.url = overpass_url\n self.geojson = None\n\n @property\n def bbox(self):\n \"\"\" Get bounding box of AOI \"\"\"\n coords = np.array(self.aoi['geometry']['coordinates']).squeeze()\n lons = [c[0] for c in coords]\n lats = [c[1] for c in coords]\n return [min(lons), min(lats), max(lons), max(lats)]\n\n @classmethod\n def _to_geojson(cls, data):\n geojson = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [d[\"lon\"], d[\"lat\"]],\n },\n \"properties\": d,\n } for d in data]\n }\n return geojson\n\n @classmethod\n def run_command(cls, cmd):\n \"\"\" Run cmd as a system command \"\"\"\n try:\n out = subprocess.check_output(cmd.split(' '), stderr=subprocess.STDOUT)\n return out\n except Exception:\n raise RuntimeError('Error running %s' % cmd)\n\n @classmethod\n def to_geojson(cls, osmdata):\n \"\"\" Convert OSM data to GeoJSON using osmtogeojson \"\"\"\n with tempfile.TemporaryDirectory() as outdir:\n outfile = os.path.join(outdir, 'osm.osm')\n # logger.debug('Writing OSM data to temp file %s' % outfile)\n with open(outfile, 'w') as f:\n f.write(osmdata)\n geojson = json.loads(cls.run_command('osmtogeojson %s' % outfile))\n os.remove(outfile)\n return geojson\n\n async def building_area(self, session):\n geojson = await self.fetch(session)\n total = 0\n for f in geojson['features']:\n if 'building' in f['properties']:\n total += area(f['geometry'])\n return total\n\n async def road_length_km(self, session):\n geojson = await self.fetch(session)\n total = 0\n for f in geojson['features']:\n if 'highway' not in f['properties']:\n continue\n len = 0\n last_coord = None\n for c in f['geometry']['coordinates']:\n if last_coord is None:\n last_coord = c\n continue\n len += great_circle((last_coord[1], last_coord[0]), (c[1], c[0])).km\n total += len\n return total\n\n async def fetch(self, session):\n \"\"\" Fetch OSM data within an AOI as GeoJSON \"\"\"\n # osm bounding box: (south, west, north, east)\n if self.geojson is not None:\n return self.geojson\n q = self.build_query()\n resp = await session.get(self.url, data=q)\n if resp.status == 200:\n self.geojson = self.to_geojson(await resp.text())\n return self.geojson\n else:\n raise Exception('Error fetching OSM data from Overpass')\n\n def build_query(self, bbox=False, format='xml'):\n # poly\n if bbox:\n geoq = '%s, %s, %s, %s' % (self.bbox[1], self.bbox[0], self.bbox[3], self.bbox[2])\n else:\n coords = np.array(self.aoi['geometry']['coordinates']).squeeze()\n _coords = ['%s %s' % (c[1], c[0]) for c in coords]\n geoq = 'poly:\"%s\"' % ' '.join(_coords)\n\n # types = ('node[\"building\"=\"yes\"]', 'way[\"building\"=\"yes\"]', 'relation[\"building\"=\"yes\"]')\n types = ('way[\"building\"]','way[\"highway\"]')\n # types = ('way[\"building\"=\"yes\"]')\n q = '[out:%s];(%s);out geom;' % (format, ''.join(['%s(%s);' % (t, geoq) for t in types]))\n # q = '[out:xml];way[\"building\"=\"yes\"](%s);out geom;' % geoq\n # logger.debug(q)\n return q\n"
] |
[
[
"numpy.array"
]
] |
nalkhish/MachineLearning
|
[
"591c9e351f9f02fbcf6921dd84b679a45de2c66c"
] |
[
"Coursera/costs.py"
] |
[
"import numpy as np\n\n\ndef regularization_cost(kwargs):\n thetas = kwargs.get('thetas', np.array([0]))\n return kwargs.get('reg_p', 0) * thetas.T.dot(thetas)\n\ndef regularization_cost_2(thetas, kwargs):\n return kwargs.get('reg_p', 0) * thetas.T.dot(thetas)\n\ndef calc_cost_linear(m, **kwargs):\n return kwargs['ers'].T.dot(kwargs['ers'])/(2*m) + regularization_cost(kwargs)\n\n\ndef calc_cost_logistic(m, **kwargs):\n targets = kwargs['ts']\n predictions = kwargs['ps']\n return (-targets.T.dot(np.log(predictions)) - (1-targets).T.dot(np.log(1-predictions))) / m + regularization_cost(kwargs)\n\n\ndef calc_cost_multiclass_logistic(m, **kwargs):\n targets = kwargs['ts']\n predictions = kwargs['ps']\n costs = []\n for i in range(len(targets)):\n thetas = kwargs.get('thetas', np.array([0] * len(targets))).T[i]\n cost = (-targets[i].T.dot(np.log(predictions[i])) - (1-targets[i]).T.dot(np.log(1-predictions[i]))) / m + regularization_cost_2(thetas, kwargs)\n costs.append(cost)\n return costs\n"
] |
[
[
"numpy.array",
"numpy.log"
]
] |
asongtoruin/album_plots
|
[
"639ca31379f10364dc0a628889419e9a4fcaa9a2"
] |
[
"draw.py"
] |
[
"import argparse\nfrom pathlib import Path\nfrom textwrap import wrap\n\nimport matplotlib.patheffects as pe\nimport matplotlib.pyplot as plt\nimport palbums\nimport pandas as pd\nfrom PIL import Image\nimport seaborn as sns\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport yaml\n\nimport tools\n\n\nGRAPH_DIR = Path('Graphs')\nGRAPH_DIR.mkdir(exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-config', help='yaml config file')\nconfig_path = parser.parse_args().config\n\nargs = yaml.load(Path(config_path).read_text(), Loader=yaml.SafeLoader)\n\nif 'rcParams' in args.keys():\n for k, v in args['rcParams'].items():\n plt.rcParams[k] = v\n\nif 'style' in args.keys():\n plt.style.use(args['style'])\n\n\nauth = spotipy.SpotifyOAuth(\n redirect_uri='http://localhost:8888/callback', username=args['username']\n)\n\nsp = spotipy.Spotify(auth_manager=auth)\n\nalbum = tools.get_album(sp, args['album_id'])\n\nall_data = pd.concat(\n tools.Track(t).loudness(sp) for t in album['tracks']['items']\n)\nall_data['Centred Time'] = (\n all_data['Time'] \n - (all_data.groupby('TrackNo')['Time'].transform('max') / 2)\n)\n\ng = sns.FacetGrid(\n data=all_data, sharex=True, sharey=True, row='Name', aspect=8, height=.8\n)\ng.map_dataframe(tools.plot_waves)\n\ng.set_titles('{row_name}', c='C1', weight='bold', pad=2) \n\nfor ax in g.axes.flatten():\n ax.set_axis_off()\n ax_min, ax_max = ax.get_xlim()\n \n ax.margins(x=0.003)\n\nplt.tight_layout()\n \nplt.savefig('body.png')\n\nwidth = g.fig.get_size_inches()[0]\n\nfig, ax = plt.subplots()\nfig.set_size_inches(width, 3)\nimg = plt.imread(album['images'][0]['url'], format='jpg')\nax.imshow(img)\nax.set_axis_off()\n\nname = album['name']\n\nif len(name) > 40:\n title = '\\n'.join(wrap(name, 40))\n size = 20\nelse:\n title = name\n size = 30\n \naname = ax.annotate(\n text=title, xy=(0.5, 0.28), xycoords='figure fraction', \n size=size, weight='bold', ha='center', va='top', c='C1'\n)\n\nartists = ','.join(a['name'] for a in album['artists'])\nbbox = aname.get_window_extent(fig.canvas.get_renderer())\n\nart_text = ax.annotate(\n text=artists, xy=[(bbox.x0+bbox.x1)/2, bbox.y0-10], \n xycoords='figure pixels', ha='center', va='top', size=size-5, c='C1'\n)\n\nfor text in (aname, art_text):\n text.set_path_effects(\n [pe.withSimplePatchShadow(shadow_rgbFace='C0', alpha=.3), pe.Normal()]\n )\n\nplt.tight_layout()\nfig.subplots_adjust(left=.3, right=.7, bottom=.3, top=.95)\n\nplt.savefig('header.png')\n\nheader = Image.open('header.png')\nbody = Image.open('body.png')\n\nboth = Image.new('RGBA', size=(header.width, header.height+body.height))\nboth.paste(header, (0, 0))\nboth.paste(body, (0, header.height))\nboth.save(GRAPH_DIR / f'{name}.png')\n"
] |
[
[
"matplotlib.patheffects.withSimplePatchShadow",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.style.use",
"matplotlib.patheffects.Normal",
"matplotlib.pyplot.imread"
]
] |
seyfullah/stockprediction
|
[
"aab0547cc1316a116ad032137722b73a36e67a51"
] |
[
"experiments/2.py"
] |
[
"import torch\nimport matplotlib.pyplot as plt\nfrom bindsnet.network import Network\nfrom bindsnet.network.nodes import Input, LIFNodes\nfrom bindsnet.network.topology import Connection\nfrom bindsnet.network.monitors import Monitor\nfrom bindsnet.analysis.plotting import plot_spikes, plot_voltages\n\n# Simulation time.\ntime = 500\n# Create the network.\nnetwork = Network()\n# Create and add input, output layers.\nsource_layer = Input(n=100)\ntarget_layer = LIFNodes(n=1000)\nnetwork.add_layer(layer=source_layer, name=\"A\")\nnetwork.add_layer(layer=target_layer, name=\"B\")\n# Create connection between input and output layers.\nforward_connection = Connection(source=source_layer, target=target_layer,\n w=0.05 + 0.1 * torch.randn(source_layer.n, target_layer.n),\n # Normal(0.05, 0.01)˓→weights.\n )\nnetwork.add_connection(connection=forward_connection, source=\"A\", target=\"B\")\n\n# Create recurrent connection in output layer.\nrecurrent_connection = Connection(source=target_layer, target=target_layer, w=0.025 * (torch.eye(target_layer.n) - 1),\n # Small, inhibitory \"competitive\"˓→weights.\n )\nnetwork.add_connection(connection=recurrent_connection, source=\"B\", target=\"B\")\n# Create and add input and output layer monitors.\nsource_monitor = Monitor(obj=source_layer, state_vars=(\"s\",),\n # Record spikes and voltages.\n time=time,# Length of simulation (if known ahead of time).\n )\ntarget_monitor = Monitor(obj=target_layer, state_vars=(\"s\", \"v\"),\n # Record spikes and voltages.\n time=time,# Length of simulation (if known ahead of time).\n )\nnetwork.add_monitor(monitor=source_monitor, name=\"A\")\nnetwork.add_monitor(monitor=target_monitor, name=\"B\")\n# Create input spike data, where each spike is distributed according to Bernoulli(0.˓→1).\ninput_data = torch.bernoulli(0.1 * torch.ones(time, source_layer.n)).byte()\ninputs = {\"A\": input_data}\n# Simulate network on input data.\nnetwork.run(inputs=inputs, time=time)\n# Retrieve and plot simulation spike, voltage data from monitors.\nspikes = {\"A\": source_monitor.get(\"s\"), \"B\": target_monitor.get(\"s\")}\nvoltages = {\"B\": target_monitor.get(\"v\")}\nplt.ioff()\nplot_spikes(spikes)\nplot_voltages(voltages, plot_type=\"line\")\nplt.show()\n"
] |
[
[
"torch.ones",
"matplotlib.pyplot.show",
"torch.eye",
"matplotlib.pyplot.ioff",
"torch.randn"
]
] |
karolciba/playground
|
[
"bfba14eaacfb6e7f820b85f95d9a1a72e251489e"
] |
[
"ecg/ecg_hmm.py"
] |
[
"#!/usr/bin/env python\n#\n# changes to hmmlearn:\n# 0. negative delta doesnt end \n# 1. prevent overfitting by not allowing for setting transition prob to 0\n\n# ideas:\n# 0. negative delta in logprobability is not wrong - it means model is rebuilding\n# lower train signal probability is exchange for better distribution (?)\n# validate model change not logprob\n# after minus often returns to big pluses\n# 1. init means with real signal (not much gain :(\n# 2. start with small model and double after each convergence (something positive)\n# 3. find unprobable states and randomize them - to escape local minima ?\n# 3.1 change least unprobable states (not used in pred) and change for best matched\n# - in naive way (replace row, replace mean, max covar from existing)\n# doesnt work.\n# 3.2 replace not used states to random state from used-ones (weighted?) and\n# fix transitions to those to states - setting them to 1/2 of original\n# slightly moving away means (by 10%?)\n# 3.3 do not fix unused for small nets or/and when states have similar transitions\n# for small nets whole model seems like Gaussian Mixture in disguise\n# for similar transitions change seems not doing anything good\n# 3.4 fix issues with convergence (consider negative delta?)\n\n# 4. gradient descent/meadow path etc\n# 5. reinforment? - find most probable for subsequence and strenghten it ?\n# which is attempt to minimize -> argmin Var( P(state | model, data ) in function of model\n# trying to maximize information density carried by model, in attemp to\n# achieve each state utilization equal\n\n\nimport wfdb\n# import hmm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\nfrom hmmlearn import hmm\nfrom hmmlearn import utils\nfrom sklearn.externals import joblib\n\n\ndef preprocess(decimation = 8):\n print(\"Preprocessing\")\n sig, fields = wfdb.srdsamp('data/mitdb/100')\n\n # ecg = sig[:500000,0]\n ecg = sig[:,0]\n\n from scipy import signal\n # eorig = signal.resample(eorig, len(eorig))\n if decimation != 0:\n ecg = signal.decimate(ecg, decimation, ftype='fir')\n\n diff = np.diff(ecg)\n\n cum = 0\n filtered = np.empty_like(ecg)\n for i in range(len(diff)):\n cum *= 0.9\n cum += diff[i]\n filtered[i] = cum\n\n return ecg[:-1],diff,filtered[:-1]\n\ndef latest_backup():\n import os\n import re\n\n files = os.listdir('.')\n pkl = [ f for f in files if re.match('model.*.pkl$',f) ]\n\n if not pkl:\n return None\n\n srt = sorted(pkl)\n f_name = srt[-1]\n\n print(\"Loading model\", f_name)\n model = joblib.load(f_name)\n return model\n\ndef plot(model, div = 8):\n ecg, diff, filt = preprocess(div)\n\n # e = np.atleast_2d(eorig).T\n # sube = np.atleast_2d(eorig[0:3000]).T\n e = diff[:10000].reshape(-1,1)\n # e = np.column_stack((diff,filt))\n sube = e[:3000]\n\n plt.clf()\n plt.subplot(411)\n plt.imshow(model.transmat_,interpolation='nearest', shape=model.transmat_.shape)\n ax = plt.subplot(412)\n plt.plot(e[0:3000])\n plt.plot(ecg[:3000])\n # plt.imshow(model.emissions,interpolation='nearest', shape=model.emissions.shape)\n plt.subplot(413, sharex = ax)\n model.algorithm = 'viterbi'\n plt.plot(model.predict(sube))\n model.algorithm = 'map'\n plt.plot(model.predict(sube))\n plt.subplot(414, sharex = ax)\n samp = model.sample(3000)[0]\n plt.plot(samp)\n plt.plot(np.cumsum(samp[:,0]))\n plt.show()\n plt.pause(1)\n\ndef model_plot(model):\n plt.clf()\n plt.subplot(121)\n plt.imshow(model.transmat_)\n plt.subplot(122)\n plt.plot(model.means_.flatten())\n plt.plot(model.covars_.flatten())\n plt.show()\n plt.pause(1)\n\ndef diff_plot(model,previous):\n plt.clf()\n ax = plt.subplot(221)\n plt.imshow(model.transmat_)\n plt.subplot(222, sharex = ax, sharey = ax)\n plt.imshow(model.transmat_ - previous.transmat_)\n ax = plt.subplot(223)\n plt.plot(model.means_)\n plt.plot(model.means_ - previous.means_)\n plt.subplot(224, sharex = ax)\n plt.plot(model._covars_)\n plt.plot(model._covars_ - previous._covars_)\n\ndef states_plot(model,div=8):\n ecg, diff, filt = preprocess(div)\n\n # e = np.atleast_2d(eorig).T\n # sube = np.atleast_2d(eorig[0:3000]).T\n e = diff[:3000].reshape(-1,1)\n\n logprob, posterior = model.score_samples(e)\n\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(e)\n plt.subplot(212,sharex=ax)\n plt.imshow(posterior.T, aspect='auto')\n\n\ndef usage_plot(model,div=8):\n ecg, diff, filt = preprocess(div)\n\n # e = np.atleast_2d(eorig).T\n # sube = np.atleast_2d(eorig[0:3000]).T\n e = diff[:10000].reshape(-1,1)\n\n logprob, posterior = model.score_samples(e)\n usage = np.sum(posterior.T,axis=1)\n\n # plt.clf()\n plt.plot(np.sort(usage)/float(sum(usage)))\n\ndef clone_model(model):\n from sklearn.externals import joblib\n joblib.dump(model,\"/tmp/foobarmodel.pkl\")\n return joblib.load(\"/tmp/foobarmodel.pkl\")\n\ndef double_model(model):\n\n symbols = model.n_components\n n_symbols = 2 * symbols\n\n n_model = hmm.GaussianHMM(n_components=n_symbols, verbose=True, min_covar=0.01, init_params='', n_iter = model.n_iter, covariance_type=\"diag\", tol=model.tol)\n\n transmat_ = np.random.random((n_symbols,n_symbols))/1000\n transmat_[0:symbols,0:symbols] = model.transmat_\n transmat_[symbols:n_symbols,symbols:n_symbols] = model.transmat_\n # unbalance it slightly\n transmat_ += np.random.random((n_symbols,n_symbols))/1000\n n_model.transmat_ = transmat_\n utils.normalize(n_model.transmat_, 1)\n\n n_model.startprob_ = np.concatenate((model.startprob_, model.startprob_))\n utils.normalize(n_model.startprob_)\n n_model.means_ = np.concatenate((model.means_, model.means_))\n n_model._covars_ = np.concatenate((model._covars_, model._covars_))\n\n return n_model\n\ndef train(model = None):\n # backup: symbols = 128, div = 1\n # symbols = 128\n symbols = 1024\n div = 8\n ecg, diff, filt = preprocess(div)\n\n # e = np.atleast_2d(eorig).T\n # sube = np.atleast_2d(eorig[0:3000]).T\n # e = np.column_stack((diff,filt))\n e = diff[:10000].reshape(-1,1)\n sube = e[:3000]\n\n # eps = np.finfo(np.float64).eps\n import sys\n eps = sys.float_info.min * symbols\n eps = 2e-290\n\n plt.ion()\n plt.clf()\n plt.plot(e[0:3000])\n # plt.subplot(311)\n # plt.imshow(model.transmat_,interpolation='nearest', shape=model.transitions.shape)\n # plt.subplot(312)\n # plt.imshow(model.,interpolation='nearest', shape=model.emissions.shape)\n # plt.subplot(313)\n # plt.plot(sampl)\n plt.show()\n plt.pause(1)\n\n if not model:\n model = hmm.GaussianHMM(n_components=symbols, verbose=True, min_covar=0.01, init_params='cmts', n_iter = 100, tol = 1, covariance_type=\"diag\")\n # left to right model, not staying in state but can jump to start\n\n # transmat_ = np.triu(np.random.random((symbols,symbols)),1)\n # transmat_[0,0] = 0\n # transmat_[:,0] = 1.0/symbols\n # model.transmat_ = transmat_\n # utils.normalize(model.transmat_, 1)\n\n # transmat_ = np.random.random((symbols,symbols))/10\n # # transmat_ += np.roll(np.eye(symbols),1,1)\n # model.transmat_ = transmat_\n # utils.normalize(model.transmat_, 1)\n\n # model.means_ = np.random.random((symbols,1))\n # model.means_ = e[0:symbols].reshape(-1,1)\n # model.covars_ = np.random.random((symbols,1))\n # model = hmm.GMMHMM(n_components=symbols, verbose=True, n_iter = 10, covariance_type=\"full\")\n else:\n plot(model, div)\n\n import os\n import re\n files = os.listdir('.')\n pkl = [ f for f in files if re.match('model.*.pkl$',f) ]\n srt = sorted(pkl)\n\n i = len(srt)\n # plt.savefig(\"out{}.png\".format(i))\n\n # try:\n\n\n best_model = clone_model(model)\n best_score = -999999999999.0\n print(\"\\nIteration {}\".format(i))\n while True:\n i += 1\n\n model.fit(e)\n model.init_params = ''\n joblib.dump(model, \"model{:06d}.pkl\".format(i))\n # print(model.transmat_)\n\n plt.clf()\n plt.subplot(411)\n plt.imshow(model.transmat_,interpolation='nearest', shape=model.transmat_.shape)\n ax = plt.subplot(412)\n plt.plot(e[0:3000])\n # plt.imshow(model.emissions,interpolation='nearest', shape=model.emissions.shape)\n plt.subplot(413, sharex = ax)\n plt.plot(model.predict(sube))\n plt.subplot(414, sharex = ax)\n samp = model.sample(3000)[0]\n plt.plot(samp)\n plt.plot(np.cumsum(samp[:,0]))\n plt.show()\n plt.pause(0.001)\n plt.savefig(\"out{:06d}.png\".format(i))\n\n # score = model.monitor_.history[1]\n # if score > best_score:\n # print(\"Found better {} than {}, switching\".format(score,best_score))\n # best_score = score\n # best_model = clone_model(model)\n # else:\n # model = best_model\n\n # hist = model.monitor_.history\n # if abs(hist[0] - hist[1]) < 0.01:\n # break\n\n fix_unused(model,e)\n\n # model.transmat_[model.transmat_ <= eps] = eps\n # utils.normalize(model.transmat_, 1)\n # except:\n # pass\n\n\n return model\n\ndef recursive_train(model = None):\n while True:\n model = train(model)\n print(\"doubling model\",model.n_components)\n model = double_model(model)\n\ndef reorder_usage(model, div = 8):\n ecg, diff, filt = preprocess(div)\n\n e = diff[:10000].reshape(-1,1)\n\n logprob, posterior = model.score_samples(e)\n usage = np.sum(posterior.T,axis=1)\n keys = np.flip(np.argsort(usage),axis=0)\n\n model.means_ = model.means_[keys]\n model._covars_ = model._covars_[keys]\n model.startprob_ = model.startprob_[keys]\n\n model.transmat_ = model.transmat_[keys]\n model.transmat_[:,:] = model.transmat_[:,keys]\n\ndef reorder_model(model, div = 8):\n ecg, diff, filt = preprocess(div)\n\n # e = np.atleast_2d(eorig).T\n # sube = np.atleast_2d(eorig[0:3000]).T\n e = diff[:10000].reshape(-1,1)\n # e = np.column_stack((diff,filt))\n # sube = e[:3000]\n\n pred = model.predict(e)\n bc = np.bincount(pred,minlength=model.n_components)\n\n keys = np.flip(np.argsort(bc),axis=0)\n\n model.means_ = model.means_[keys]\n model._covars_ = model._covars_[keys]\n model.startprob_ = model.startprob_[keys]\n\n\n model.transmat_ = model.transmat_[keys]\n model.transmat_[:,:] = model.transmat_[:,keys]\n\n\ndef fix_unused(model, signal):\n # \"\"\"Unused states decided MAP or viterbi usage\"\"\"\n # model.algorithm = 'map'\n # pred = model.predict(signal)\n # usage = np.bincount(pred,minlength=model.n_components)\n # treshold = np.sort(usage)[model.n_components//10]\n #\n # ids = np.argwhere(usage <= treshold).flatten()\n # used = np.argwhere(usage > treshold).flatten()\n # probs = usage/float(sum(usage))\n\n # \"\"\"Unused states decided on average state probability\"\"\"\n # logprob, posterior = model.score_samples(signal)\n # usage = np.sum(posterior.T,axis=1)\n # treshold = np.sort(usage)[model.n_components//10]\n # ids = np.argwhere(usage <= treshold).flatten()\n # used = np.argwhere(usage > treshold).flatten()\n #\n # probs = usage/float(sum(usage))\n\n\n \"\"\"Unused states decided on average state probability\"\"\"\n logprob, posterior = model.score_samples(signal)\n usage = np.sum(posterior.T,axis=1)\n # treshold = np.sort(usage)[model.n_components//10]\n # ids = np.argwhere(usage <= treshold).flatten()\n # used = np.argwhere(usage > treshold).flatten()\n probs = usage/float(sum(usage))\n ids = np.argwhere(probs <= 0.001).flatten()\n used = np.argwhere(probs > 0.001).flatten()\n\n mapped = {}\n # model.algorithm = 'map'\n\n import random\n import sklearn.mixture\n\n print(\"There are {} used and {} unsued\".format(len(used),len(ids)))\n\n ids = ids[0:len(used)]\n print(\"After clipping there are {} used and {} unused\".format(len(used),len(ids)))\n\n for id in ids:\n # replace_id = np.random.choice(used)\n # randomly select node to clone according to its \"information weight\"\n # replace_id = np.random.choice(model.n_components,p=probs)\n replace_id = random.choices(range(model.n_components),weights=probs)[0]\n\n mapped[id] = [replace_id, int(probs[id]*1000)/1000, int(probs[replace_id]*1000)/1000, int(model.transmat_[replace_id,replace_id]*1000)/1000]\n\n\n\n # if (np.sum(model.transmat_[:,replace_id])) > 3):\n # unroll thight self loop\n if model.transmat_[replace_id,replace_id] > 0.1:\n # can clone this state any more\n probs[replace_id] = 0\n probs[id] = probs[replace_id]\n\n mapped[id].append('s')\n in_trans = model.transmat_[:,id].copy()\n model.transmat_[id,:] = model.transmat_[replace_id,:]\n model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[replace_id,replace_id] = 2e-290\n\n # staing in giver state is forbidden\n # in place of that transit to cloned state\n # model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n # model.transmat_[replace_id,replace_id] = 0.0001\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n #TODO: unroll longer loops\n\n #refit to general node\n # to many ins, to many out, to large emission - coverage\n elif random.random() > 0.5:\n # lower prob of used node\n # allow cloning of both\n probs[replace_id] /= 2\n probs[id] = probs[replace_id]\n\n size = model.n_components\n ord = np.random.binomial(1,0.5,model.n_components)\n nord = 1 - ord\n\n mapped[id].append('i')\n in_trans = model.transmat_[:,id].copy()\n # clone the not used node\n # out transitions (row) like in original\n model.transmat_[id,:] = model.transmat_[replace_id,:]\n\n # in trasitions (column) half for each of two (original and clone)\n model.transmat_[:,id][ord == 1] = model.transmat_[:,replace_id][ord == 1]\n model.transmat_[:,id][ord == 0] = 2e-290\n model.transmat_[:,replace_id][ord == 1] = 2e-290\n\n # original trans should be small, add to them to keep row normalization to 1\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n model.means_[id] = model.means_[replace_id]\n model._covars_[id] = model._covars_[replace_id]\n else:\n # lower prob of used node\n # allow cloning of both\n probs[replace_id] /= 2\n probs[id] = probs[replace_id]\n\n size = model.n_components\n ord = np.random.binomial(1,0.5,model.n_components)\n nord = 1 - ord\n\n mapped[id].append('o')\n in_trans = model.transmat_[:,id].copy()\n # clone the not used node\n # out transitions (row) like in original\n model.transmat_[id,:][ord == 1] = model.transmat_[replace_id,:][ord == 1]\n model.transmat_[id,:][ord == 0] = 2e-290\n model.transmat_[replace_id,:][ord == 1] = 2e-290\n\n # in trasitions (column) half for each of two (original and clone)\n model.transmat_[:,replace_id] /= 2.\n model.transmat_[:,id] = in_trans/2. + model.transmat_[:,replace_id]\n # model.transmat_[:,replace_id] += in_trans/2.\n\n # original trans should be small, add to them to keep row normalization to 1\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n model.means_[id] = model.means_[replace_id]\n model._covars_[id] = model._covars_[replace_id]\n\n print(\"fixed no nodes\",len(ids), mapped)\n\ndef fix_unused_to_big_covar(model, signal):\n pred = model.predict(signal)\n bc = np.bincount(pred,minlength=model.n_components)\n max_id = np.argmax(bc)\n max_covar_id = np.argmax(model.covars_)\n ids = np.argwhere(model._covars_.flatten() > 100).flatten()\n\n used = np.argwhere(bc != 0).flatten()\n probs = bc/float(sum(bc))\n\n mapped = {}\n # model.algorithm = 'map'\n\n import random\n import sklearn.mixture\n\n ids = ids[0:len(used)]\n\n for id in ids:\n # replace_id = np.random.choice(used)\n # randomly select node to clone according to its \"information weight\"\n # replace_id = np.random.choice(model.n_components,p=probs)\n replace_id = random.choices(range(model.n_components),weights=bc)[0]\n\n mapped[id] = [replace_id, 2*bc[replace_id], int(model.transmat_[replace_id,replace_id]*1000)/1000]\n\n\n # lower prob of used node\n # allow cloning of both\n bc[replace_id] //= 2\n bc[id] = bc[replace_id]\n\n size = model.n_components\n ord = np.random.binomial(1,0.5,model.n_components)\n nord = 1 - ord\n\n mapped[id].append('g')\n in_trans = model.transmat_[:,id].copy()\n # clone the not used node\n # out transitions (row) like in original\n model.transmat_[id,ord] = model.transmat_[replace_id,ord]\n model.transmat_[id,nord] = 2e-290\n model.transmat_[replace_id,ord] = 2e-290\n\n # in trasitions (column) half for each of two (original and clone)\n model.transmat_[:,replace_id] /= 2.\n model.transmat_[:,id] = in_trans/2. + model.transmat_[:,replace_id]\n # model.transmat_[:,replace_id] += in_trans/2.\n\n # original trans should be small, add to them to keep row normalization to 1\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n # try:\n # gmm = sklearn.mixture.GMM(n_components=2, verbose=False)\n # gmm.fit(signal[pred == replace_id])\n # model.means_[id] = gmm.means_[0]\n # model.means_[replace_id] = gmm.means_[1]\n # model._covars_[id] = gmm.covars_[0]\n # model._covars_[replace_id] = gmm.covars_[1]\n # except:\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n\n\n print(\"fixed no nodes\",len(ids), mapped)\n\ndef fix_unused_best(model, signal):\n pred = model.predict(signal)\n bc = np.bincount(pred,minlength=model.n_components)\n max_id = np.argmax(bc)\n max_covar_id = np.argmax(model.covars_)\n ids = np.argwhere(bc == 0).flatten()\n used = np.argwhere(bc != 0).flatten()\n probs = bc/float(sum(bc))\n\n mapped = {}\n # model.algorithm = 'map'\n\n import random\n import sklearn.mixture\n\n ids = ids[0:len(used)]\n\n for id in ids:\n # replace_id = np.random.choice(used)\n # randomly select node to clone according to its \"information weight\"\n # replace_id = np.random.choice(model.n_components,p=probs)\n replace_id = random.choices(range(model.n_components),weights=bc)[0]\n\n mapped[id] = [replace_id, 2*bc[replace_id], int(model.transmat_[replace_id,replace_id]*1000)/1000]\n\n\n\n # if (np.sum(model.transmat_[:,replace_id])) > 3):\n # unroll thight self loop\n if model.transmat_[replace_id,replace_id] > 0.1:\n # can clone this state any more\n bc[replace_id] = 0\n bc[id] = bc[replace_id]\n\n mapped[id].append('s')\n in_trans = model.transmat_[:,id].copy()\n model.transmat_[id,:] = model.transmat_[replace_id,:]\n model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[replace_id,replace_id] = 2e-290\n\n # staing in giver state is forbidden\n # in place of that transit to cloned state\n # model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n # model.transmat_[replace_id,replace_id] = 0.0001\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n #TODO: unroll longer loops\n\n #refit to general node\n # to many ins, to many out, to large emission - coverage\n else:\n # lower prob of used node\n # allow cloning of both\n bc[replace_id] //= 2\n bc[id] = bc[replace_id]\n\n size = model.n_components\n ord = np.random.binomial(1,0.5,model.n_components)\n nord = 1 - ord\n\n mapped[id].append('g')\n in_trans = model.transmat_[:,id].copy()\n # clone the not used node\n # out transitions (row) like in original\n model.transmat_[id,ord] = model.transmat_[replace_id,ord]\n model.transmat_[id,nord] = 2e-290\n model.transmat_[replace_id,ord] = 2e-290\n\n # in trasitions (column) half for each of two (original and clone)\n model.transmat_[:,replace_id] /= 2.\n model.transmat_[:,id] = in_trans/2. + model.transmat_[:,replace_id]\n # model.transmat_[:,replace_id] += in_trans/2.\n\n # original trans should be small, add to them to keep row normalization to 1\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n # try:\n # gmm = sklearn.mixture.GMM(n_components=2, verbose=False)\n # gmm.fit(signal[pred == replace_id])\n # model.means_[id] = gmm.means_[0]\n # model.means_[replace_id] = gmm.means_[1]\n # model._covars_[id] = gmm.covars_[0]\n # model._covars_[replace_id] = gmm.covars_[1]\n # except:\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n\n\n print(\"fixed no nodes\",len(ids), mapped)\n\ndef fix_unused_unroll(model, signal):\n pred = model.predict(signal)\n bc = np.bincount(pred,minlength=model.n_components)\n max_id = np.argmax(bc)\n max_covar_id = np.argmax(model.covars_)\n ids = np.argwhere(bc == 0).flatten()\n used = np.argwhere(bc != 0).flatten()\n probs = bc/float(sum(bc))\n\n mapped = {}\n\n import random\n import sklearn.mixture\n\n ids = ids[0:len(used)]\n\n for id in ids:\n # replace_id = np.random.choice(used)\n # randomly select node to clone according to its \"information weight\"\n # replace_id = np.random.choice(model.n_components,p=probs)\n replace_id = random.choices(range(model.n_components),weights=bc)[0]\n\n mapped[id] = (replace_id, 2*bc[replace_id])\n\n # lower prob of used node\n bc[replace_id] = 0\n # this will make:\n # cloned states for clone fail in GMixture, and make a identical copy\n # cloned states from origin to have same GMixture, and idendical copy as well\n # TODO: if thats okay - store relation and avoid refitting GMixture\n bc[id] = bc[replace_id]\n\n\n in_trans = model.transmat_[:,id].copy()\n\n model.transmat_[id,:] = model.transmat_[replace_id,:]\n model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[id,id] += model.transmat_[replace_id,replace_id]\n model.transmat_[replace_id,replace_id] = 2e-290\n\n # staing in giver state is forbidden\n # in place of that transit to cloned state\n # model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n # model.transmat_[replace_id,replace_id] = 0.0001\n utils.normalize(model.transmat_, 1)\n\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n\n\n print(\"fixed no nodes\",len(ids), mapped)\n\ndef fix_unused_fair(model, signal):\n pred = model.predict(signal)\n bc = np.bincount(pred,minlength=model.n_components)\n max_id = np.argmax(bc)\n max_covar_id = np.argmax(model.covars_)\n ids = np.argwhere(bc == 0).flatten()\n used = np.argwhere(bc != 0).flatten()\n probs = bc/float(sum(bc))\n\n mapped = {}\n\n import random\n import sklearn.mixture\n\n for id in ids:\n # replace_id = np.random.choice(used)\n # randomly select node to clone according to its \"information weight\"\n # replace_id = np.random.choice(model.n_components,p=probs)\n replace_id = random.choices(range(model.n_components),weights=bc)[0]\n # lower prob of used node\n bc[replace_id] //= 2\n # this will make:\n # cloned states for clone fail in GMixture, and make a identical copy\n # cloned states from origin to have same GMixture, and idendical copy as well\n # TODO: if thats okay - store relation and avoid refitting GMixture\n bc[id] = bc[replace_id]\n\n mapped[id] = (replace_id, 2*bc[replace_id])\n\n in_trans = model.transmat_[:,id].copy()\n # clone the not used node\n # out transitions (row) like in original\n model.transmat_[id,:] = model.transmat_[replace_id,:]\n # model.transmat_[id,replace_id] = node_trans\n # model.means_[replace_id] *= 0.99\n # in trasitions (column) half for each of two (original and clone)\n model.transmat_[:,replace_id] /= 2.\n # original trans should be small, add to them to keep row normalization to 1\n model.transmat_[:,id] = in_trans + model.transmat_[:,replace_id]\n # staing in giver state is forbidden\n # in place of that transit to cloned state\n # model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]\n # model.transmat_[replace_id,replace_id] = 0.0001\n utils.normalize(model.transmat_, 1)\n model.startprob_[replace_id] /= 2.\n model.startprob_[id] += model.startprob_[replace_id]\n\n try:\n gmm = sklearn.mixture.GMM(n_components=2, verbose=False)\n gmm.fit(signal[pred == replace_id])\n model.means_[id] = gmm.means_[0]\n model.means_[replace_id] = gmm.means_[1]\n model._covars_[id] = gmm.covars_[0]\n model._covars_[replace_id] = gmm.covars_[1]\n except:\n model.means_[id] = model.means_[replace_id]\n # diverge them slighly to cover more ground\n # model.means_[replace_id] *= 1.001\n model._covars_[id] = model._covars_[replace_id]\n\n\n print(\"fixed no nodes\",len(ids), mapped)\n"
] |
[
[
"sklearn.externals.joblib.dump",
"numpy.sort",
"numpy.cumsum",
"scipy.signal.decimate",
"numpy.random.random",
"numpy.concatenate",
"numpy.bincount",
"numpy.random.binomial",
"numpy.argmax",
"numpy.empty_like",
"matplotlib.pyplot.subplot",
"numpy.diff",
"numpy.argsort",
"numpy.argwhere",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ion",
"numpy.sum",
"matplotlib.pyplot.plot",
"sklearn.externals.joblib.load",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.imshow"
]
] |
prajakta0111/pytorch-lightning
|
[
"3df02b880a6d145ff0aca24ea429c12c0d8f1181",
"3df02b880a6d145ff0aca24ea429c12c0d8f1181",
"3df02b880a6d145ff0aca24ea429c12c0d8f1181",
"3df02b880a6d145ff0aca24ea429c12c0d8f1181"
] |
[
"tests/metrics/regression/test_ssim.py",
"pytorch_lightning/metrics/functional/roc.py",
"tests/metrics/functional/test_reduction.py",
"tests/accelerators/test_tpu_backend.py"
] |
[
"from collections import namedtuple\nfrom functools import partial\n\nimport pytest\nimport torch\nfrom skimage.metrics import structural_similarity\n\nfrom pytorch_lightning.metrics.functional import ssim\nfrom pytorch_lightning.metrics.regression import SSIM\nfrom tests.metrics.utils import BATCH_SIZE, MetricTester, NUM_BATCHES\n\ntorch.manual_seed(42)\n\nInput = namedtuple('Input', [\"preds\", \"target\", \"multichannel\"])\n\n_inputs = []\nfor size, channel, coef, multichannel, dtype in [\n (12, 3, 0.9, True, torch.float),\n (13, 1, 0.8, False, torch.float32),\n (14, 1, 0.7, False, torch.double),\n (15, 3, 0.6, True, torch.float64),\n]:\n preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)\n _inputs.append(Input(\n preds=preds,\n target=preds * coef,\n multichannel=multichannel,\n ))\n\n\ndef _sk_metric(preds, target, data_range, multichannel):\n c, h, w = preds.shape[-3:]\n sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()\n sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()\n if not multichannel:\n sk_preds = sk_preds[:, :, :, 0]\n sk_target = sk_target[:, :, :, 0]\n\n return structural_similarity(\n sk_target,\n sk_preds,\n data_range=data_range,\n multichannel=multichannel,\n gaussian_weights=True,\n win_size=11,\n sigma=1.5,\n use_sample_covariance=False\n )\n\n\n@pytest.mark.parametrize(\n \"preds, target, multichannel\",\n [(i.preds, i.target, i.multichannel) for i in _inputs],\n)\nclass TestSSIM(MetricTester):\n atol = 6e-5\n\n @pytest.mark.parametrize(\"ddp\", [True, False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n def test_ssim(self, preds, target, multichannel, ddp, dist_sync_on_step):\n self.run_class_metric_test(\n ddp,\n preds,\n target,\n SSIM,\n partial(_sk_metric, data_range=1.0, multichannel=multichannel),\n metric_args={\"data_range\": 1.0},\n dist_sync_on_step=dist_sync_on_step,\n )\n\n def test_ssim_functional(self, preds, target, multichannel):\n self.run_functional_metric_test(\n preds,\n target,\n ssim,\n partial(_sk_metric, data_range=1.0, multichannel=multichannel),\n metric_args={\"data_range\": 1.0},\n )\n\n\n@pytest.mark.parametrize(\n ['pred', 'target', 'kernel', 'sigma'],\n [\n pytest.param([1, 16, 16], [1, 16, 16], [11, 11], [1.5, 1.5]), # len(shape)\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5]), # len(kernel), len(sigma)\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5, 1.5]), # len(kernel), len(sigma)\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5]), # len(kernel), len(sigma)\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5]), # invalid kernel input\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5]), # invalid kernel input\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5]), # invalid kernel input\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0]), # invalid sigma input\n pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, -1.5]), # invalid sigma input\n ],\n)\ndef test_ssim_invalid_inputs(pred, target, kernel, sigma):\n pred_t = torch.rand(pred)\n target_t = torch.rand(target, dtype=torch.float64)\n with pytest.raises(TypeError):\n ssim(pred_t, target_t)\n\n pred = torch.rand(pred)\n target = torch.rand(target)\n with pytest.raises(ValueError):\n ssim(pred, target, kernel, sigma)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport torch\n\nfrom pytorch_lightning.metrics.functional.precision_recall_curve import (\n _binary_clf_curve,\n _precision_recall_curve_update,\n)\n\n\ndef _roc_update(\n preds: torch.Tensor,\n target: torch.Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n) -> Tuple[torch.Tensor, torch.Tensor, int, int]:\n return _precision_recall_curve_update(preds, target, num_classes, pos_label)\n\n\ndef _roc_compute(\n preds: torch.Tensor,\n target: torch.Tensor,\n num_classes: int,\n pos_label: int,\n sample_weights: Optional[Sequence] = None,\n) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],\n List[torch.Tensor]]]:\n\n if num_classes == 1:\n fps, tps, thresholds = _binary_clf_curve(\n preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label\n )\n # Add an extra threshold position\n # to make sure that the curve starts at (0, 0)\n tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])\n fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])\n thresholds = torch.cat([thresholds[0][None] + 1, thresholds])\n\n if fps[-1] <= 0:\n raise ValueError(\"No negative samples in targets, false positive value should be meaningless\")\n fpr = fps / fps[-1]\n\n if tps[-1] <= 0:\n raise ValueError(\"No positive samples in targets, true positive value should be meaningless\")\n tpr = tps / tps[-1]\n\n return fpr, tpr, thresholds\n\n # Recursively call per class\n fpr, tpr, thresholds = [], [], []\n for c in range(num_classes):\n preds_c = preds[:, c]\n res = roc(\n preds=preds_c,\n target=target,\n num_classes=1,\n pos_label=c,\n sample_weights=sample_weights,\n )\n fpr.append(res[0])\n tpr.append(res[1])\n thresholds.append(res[2])\n\n return fpr, tpr, thresholds\n\n\ndef roc(\n preds: torch.Tensor,\n target: torch.Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],\n List[torch.Tensor]]]:\n \"\"\"\n Computes the Receiver Operating Characteristic (ROC).\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes. Not nessesary to provide\n for binary problems.\n pos_label: integer determining the positive class. Default is ``None``\n which for binary problem is translate to 1. For multiclass problems\n this argument should not be set as we iteratively change it in the\n range [0,num_classes-1]\n sample_weights: sample weights for each data point\n\n Returns:\n 3-element tuple containing\n\n fpr:\n tensor with false positive rates.\n If multiclass, this is a list of such tensors, one for each class.\n tpr:\n tensor with true positive rates.\n If multiclass, this is a list of such tensors, one for each class.\n thresholds:\n thresholds used for computing false- and true postive rates\n\n Example (binary case):\n\n >>> from pytorch_lightning.metrics.functional import roc\n >>> pred = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> fpr, tpr, thresholds = roc(pred, target, pos_label=1)\n >>> fpr\n tensor([0., 0., 0., 0., 1.])\n >>> tpr\n tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])\n >>> thresholds\n tensor([4, 3, 2, 1, 0])\n\n Example (multiclass case):\n\n >>> from pytorch_lightning.metrics.functional import roc\n >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05],\n ... [0.05, 0.05, 0.05, 0.75]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> fpr, tpr, thresholds = roc(pred, target, num_classes=4)\n >>> fpr\n [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]\n >>> tpr\n [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]\n >>> thresholds # doctest: +NORMALIZE_WHITESPACE\n [tensor([1.7500, 0.7500, 0.0500]),\n tensor([1.7500, 0.7500, 0.0500]),\n tensor([1.7500, 0.7500, 0.0500]),\n tensor([1.7500, 0.7500, 0.0500])]\n \"\"\"\n preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label)\n return _roc_compute(preds, target, num_classes, pos_label, sample_weights)\n",
"import pytest\nimport torch\n\nfrom pytorch_lightning.metrics.utils import class_reduce, reduce\n\n\ndef test_reduce():\n start_tensor = torch.rand(50, 40, 30)\n\n assert torch.allclose(reduce(start_tensor, 'elementwise_mean'), torch.mean(start_tensor))\n assert torch.allclose(reduce(start_tensor, 'sum'), torch.sum(start_tensor))\n assert torch.allclose(reduce(start_tensor, 'none'), start_tensor)\n\n with pytest.raises(ValueError):\n reduce(start_tensor, 'error_reduction')\n\n\ndef test_class_reduce():\n num = torch.randint(1, 10, (100, )).float()\n denom = torch.randint(10, 20, (100, )).float()\n weights = torch.randint(1, 100, (100, )).float()\n\n assert torch.allclose(class_reduce(num, denom, weights, 'micro'), torch.sum(num) / torch.sum(denom))\n assert torch.allclose(class_reduce(num, denom, weights, 'macro'), torch.mean(num / denom))\n assert torch.allclose(\n class_reduce(num, denom, weights, 'weighted'), torch.sum(num / denom * (weights / torch.sum(weights)))\n )\n assert torch.allclose(class_reduce(num, denom, weights, 'none'), num / denom)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.trainer.states import TrainerState\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE\nfrom tests.helpers.boring_model import BoringModel\nfrom tests.helpers.utils import pl_multi_process_test\n\n\nclass WeightSharingModule(BoringModel):\n\n def __init__(self):\n super().__init__()\n self.layer_1 = nn.Linear(32, 10, bias=False)\n self.layer_2 = nn.Linear(10, 32, bias=False)\n self.layer_3 = nn.Linear(32, 10, bias=False)\n self.layer_3.weight = self.layer_1.weight\n\n def forward(self, x):\n x = self.layer_1(x)\n x = self.layer_2(x)\n x = self.layer_3(x)\n return x\n\n\n@pytest.mark.skipif(not _TPU_AVAILABLE, reason=\"test requires TPU machine\")\n@pl_multi_process_test\ndef test_resume_training_on_cpu(tmpdir):\n \"\"\" Checks if training can be resumed from a saved checkpoint on CPU\"\"\"\n # Train a model on TPU\n model = BoringModel()\n trainer = Trainer(\n checkpoint_callback=True,\n max_epochs=1,\n tpu_cores=8,\n )\n trainer.fit(model)\n\n model_path = trainer.checkpoint_callback.best_model_path\n\n # Verify saved Tensors are on CPU\n ckpt = torch.load(model_path)\n weight_tensor = list(ckpt[\"state_dict\"].values())[0]\n assert weight_tensor.device == torch.device(\"cpu\")\n\n # Verify that training is resumed on CPU\n trainer = Trainer(\n resume_from_checkpoint=model_path,\n checkpoint_callback=True,\n max_epochs=1,\n default_root_dir=tmpdir,\n )\n trainer.fit(model)\n assert trainer.state == TrainerState.FINISHED, f\"Training failed with {trainer.state}\"\n\n\n@pytest.mark.skipif(not _TPU_AVAILABLE, reason=\"test requires TPU machine\")\n@pl_multi_process_test\ndef test_if_test_works_after_train(tmpdir):\n \"\"\" Ensure that .test() works after .fit() \"\"\"\n\n # Train a model on TPU\n model = BoringModel()\n trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n assert len(trainer.test(model)) == 1\n\n\n@pytest.mark.skipif(not _TPU_AVAILABLE, reason=\"test requires TPU machine\")\n@pl_multi_process_test\ndef test_weight_tying_warning(tmpdir, capsys=None):\n \"\"\"\n Ensure a warning is thrown if model parameter lengths do not match\n post moving to device.\n \"\"\"\n\n model = WeightSharingModule()\n trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)\n\n with pytest.warns(UserWarning, match=r'The model layers do not match after moving to the target device.'):\n result = trainer.fit(model)\n assert result\n\n\n@pytest.mark.skipif(not _TPU_AVAILABLE, reason=\"test requires TPU machine\")\n@pl_multi_process_test\ndef test_if_weights_tied(tmpdir, capsys=None):\n \"\"\"\n Test if weights are properly tied on `on_post_move_to_device`.\n Ensure no warning for parameter mismatch is thrown.\n \"\"\"\n\n class Model(WeightSharingModule):\n\n def on_post_move_to_device(self):\n self.layer_3.weight = self.layer_1.weight\n\n model = Model()\n trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)\n\n with pytest.warns(UserWarning) as warnings:\n result = trainer.fit(model)\n assert result\n\n assert not list(filter(lambda x: 'The model layers do not match' in str(x), warnings.list))\n assert len(trainer.test(model)) == 1\n"
] |
[
[
"torch.manual_seed",
"torch.rand"
],
[
"torch.zeros",
"torch.cat"
],
[
"torch.rand",
"torch.randint",
"torch.mean",
"torch.sum"
],
[
"torch.nn.Linear",
"torch.device",
"torch.load"
]
] |
rehohoho/models
|
[
"3577bc5959d7e2a3513ff1c5ec8b42c4f5bedca8"
] |
[
"official/vision/beta/ops/yolo_ops.py"
] |
[
"\"\"\"Utility ops for yolo data.\nReferenced from https://github.com/hunglc007/tensorflow-yolov4-tflite.\n\"\"\"\n\nfrom typing import List, Tuple, Mapping\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\n\ndef resize_image_and_bboxes(image: tf.Tensor, \n bboxes: tf.Tensor, \n target_size: Tuple[int, int],\n preserve_aspect_ratio: bool = False,\n image_height: int = None,\n image_width: int = None,\n image_normalized: bool = True):\n \"\"\"\n Args:\n image: `tf.Tensor` of shape (None, 5), denoting (x, y, w, h, class), non-normalized\n bboxes: `tf.Tensor` of shape (None, 4), denoting (ymin, xmin, ymax, xmax), non-normalized\n target: `Tuple[int,int]`, denoting height and width of resulting image/bbox\n preserve_aspect_ratio: `bool`, true to preserve image aspect ratio\n image_height: `int`, height of image\n image_width: `int`, width of image\n \n !! assumes image is normalized to 0-1\n \"\"\"\n target_height, target_width = target_size\n if image_height is None or image_width is None:\n image_height, image_width, _ = image.shape\n scale_height, scale_width = target_height / image_height, target_width / image_width\n\n if preserve_aspect_ratio:\n clip_size = max(image_height, image_width)\n pad_height = (clip_size - image_height)//2\n pad_width = (clip_size - image_width)//2\n\n if image_normalized:\n image = tf.pad(image, \n tf.constant([[pad_height, pad_height], [pad_width, pad_width], [0, 0]]), \n constant_values=0.5)\n else:\n image = tf.image.pad_to_bounding_box(\n image, pad_height, pad_width, clip_size, clip_size)\n\n scale = min(scale_height, scale_width)\n bboxes *= scale\n offset = tf.stack([pad_height, pad_width, pad_height, pad_width], axis=-1)\n bboxes += tf.cast(offset, tf.float32)\n \n else:\n scale = tf.stack([scale_height, scale_width, scale_height, scale_width], axis=-1)\n bboxes *= tf.cast(scale, tf.float32)\n\n image = tf.image.resize(image, target_size)\n\n return image, bboxes\n\n\ndef horizontal_flip_boxes(boxes, image_size):\n \"\"\"Flips normalized boxes horizontally.\"\"\"\n ymin, xmin, ymax, xmax = tf.split(\n value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(image_size, xmax)\n flipped_xmax = tf.subtract(image_size, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes\n\n\ndef random_horizontal_flip(image, box, seed=None):\n \"\"\"Randomly flips input image and bounding boxes.\"\"\"\n with tf.name_scope('random_horizontal_flip'):\n do_flip = tf.greater(tf.random.uniform([], seed=seed), 0.5)\n\n image = tf.cond(\n do_flip,\n lambda: tf.image.flip_left_right(image),\n lambda: image)\n \n image_size = tf.cast(tf.shape(image)[1], tf.float32)\n box = tf.cond(\n do_flip,\n lambda: horizontal_flip_boxes(box, image_size),\n lambda: box)\n\n return image, box\n\n\ndef random_translate(image, box, t, seed=None):\n \"\"\"Randomly translate the image and boxes.\n\n Args:\n image: a `Tensor` representing the image.\n box: a `Tensor` represeting the boxes.\n t: an `int` representing the translation factor\n seed: an optional seed for tf.random operations\n Returns:\n image: a `Tensor` representing the augmented image.\n box: a `Tensor` representing the augmented boxes.\n \"\"\"\n t_x = tf.random.uniform(minval=-t,\n maxval=t,\n shape=(),\n dtype=tf.float32,\n seed=seed)\n t_y = tf.random.uniform(minval=-t,\n maxval=t,\n shape=(),\n dtype=tf.float32,\n seed=seed)\n image_size = tf.cast(tf.shape(image)[1], tf.float32)\n with tf.name_scope('translate_boxes'):\n offset = tf.stack([t_y, t_x, t_y, t_x], axis=-1)\n box += offset * image_size\n with tf.name_scope('translate_image'):\n if (t_x != 0 and t_y != 0):\n image_jitter = tf.convert_to_tensor([t_x, t_y])\n image_jitter.set_shape([2])\n image = tfa.image.translate(image, image_jitter * image_size)\n return image, box\n\n\ndef preprocess_true_boxes(bboxes: tf.Tensor,\n train_output_sizes: List[int],\n anchor_per_scale: int,\n num_classes: int,\n max_bbox_per_scale: int,\n strides: List[int],\n anchors: tf.Tensor):\n \"\"\"\n Args:\n bboxes: `tf.Tensor` of shape (None, 5), denoting (x, y, w, h, class), non-normalized\n train_output_sizes: `List[int]`, dimension of each scaled feature map\n anchor_per_scale: `int`, number of anchors per scale\n num_classes: `int`, number of classes.\n max_bbox_per_Scale: `int`, maximum number of bounding boxes per scale.\n strides: `List[int]` of output strides, ratio of input to output resolution.\n scaling of target feature depends on output sizes predicted by output strides\n anchors: `tf.Tensor` of shape (None, anchor_per_scale, 2) denothing positions\n of anchors\n\n !!! Assumes the images and boxes are preprocessed to fit image size.\n \"\"\"\n\n max_output_size = tf.reduce_max(train_output_sizes)\n label = tf.zeros((len(strides), max_output_size, max_output_size, anchor_per_scale, 5+num_classes))\n\n bboxes_xywh = tf.zeros((len(strides), max_bbox_per_scale, 4))\n bbox_count = tf.zeros((3,))\n const = tf.constant([1.0], dtype=tf.float32)\n\n for bbox in bboxes:\n bbox_xywh = tf.cast(bbox[:4], tf.float32)\n bbox_class_ind = tf.cast(bbox[4], tf.int64)\n\n smooth_onehot = tf.one_hot(\n bbox_class_ind, num_classes, \n on_value=(num_classes-1)/num_classes, \n off_value=1.0/num_classes)\n\n bbox_xywh_scaled = tf.repeat(\n bbox_xywh[tf.newaxis, :], repeats=len(strides), axis=0)\n bbox_xywh_scaled /= [[i] for i in strides]\n\n bbox_label = tf.concat([bbox_xywh, const, smooth_onehot], axis=-1)\n\n iou = []\n exist_positive = False\n \n # register for each stride and corresponding anchor setting\n for i in range(3):\n # get anchor bbox in xywh format\n anchors_xywh = tf.add(tf.floor(bbox_xywh_scaled[i, 0:2]), 0.5)\n anchors_xywh = tf.repeat(anchors_xywh[tf.newaxis, :], 3, axis=0) \n anchors_xywh = tf.concat([anchors_xywh, tf.cast(anchors[i], tf.float32)], axis=-1)\n\n # calculate iou for each anchor in this stride\n iou_scale = bbox_iou(bbox_xywh_scaled[i][tf.newaxis, :], anchors_xywh)\n iou.append(iou_scale)\n iou_mask = iou_scale > 0.3\n\n # update label at corresponding coordinate and boxes\n if tf.reduce_any(iou_mask):\n xind = tf.cast(tf.floor(bbox_xywh_scaled[i, 0]), tf.int32)\n yind = tf.cast(tf.floor(bbox_xywh_scaled[i, 1]), tf.int32)\n\n update = tf.gather([tf.zeros_like(bbox_label), bbox_label], tf.cast(iou_mask, tf.int64))\n label = tf.tensor_scatter_nd_update(label, indices=[[i, yind, xind]], updates=[update])\n\n bbox_ind = tf.cast(bbox_count[i] % max_bbox_per_scale, tf.int32)\n bboxes_xywh = tf.tensor_scatter_nd_update(bboxes_xywh, indices=[[i, bbox_ind]], updates=[bbox_xywh])\n bbox_count = tf.tensor_scatter_nd_add(bbox_count, indices=[[i]], updates=[1])\n\n exist_positive = True\n\n # registers for best anchor if bbox not registered\n if not exist_positive:\n best_anchor_ind = tf.argmax(tf.concat(iou, axis=-1), axis=-1) # anchor with highest iou\n best_detect = tf.cast(best_anchor_ind / anchor_per_scale, tf.int32) # corresponding stride level\n best_anchor = tf.cast(best_anchor_ind % anchor_per_scale, tf.int32) # anchor idx within stride\n \n xind = tf.cast(tf.floor(bbox_xywh_scaled[best_detect, 0]), tf.int32)\n yind = tf.cast(tf.floor(bbox_xywh_scaled[best_detect, 1]), tf.int32)\n\n label = tf.tensor_scatter_nd_update(\n label, indices=[[best_detect, yind, xind, best_anchor]], updates=[bbox_label])\n\n bbox_ind = tf.cast(bbox_count[best_detect] % max_bbox_per_scale, tf.int32)\n bboxes_xywh = tf.tensor_scatter_nd_update(bboxes_xywh, indices=[[best_detect, bbox_ind]], updates=[bbox_xywh])\n bbox_count = tf.tensor_scatter_nd_add(bbox_count, indices=[[best_detect]], updates=[1])\n\n # retrieve actual sizes of each label feature an box\n target_labels = {}\n target_bboxes = {}\n for i in range(len(strides)):\n target_labels[i] = label[i, :train_output_sizes[i], :train_output_sizes[i]]\n for i in range(len(strides)):\n target_bboxes[i] = bboxes_xywh[i]\n\n return target_labels, target_bboxes\n\n\ndef bbox_iou(bboxes1, bboxes2):\n \"\"\"\n @param bboxes1: (a, b, ..., 4)\n @param bboxes2: (A, B, ..., 4)\n x:X is 1:n or n:n or n:1\n @return (max(a,A), max(b,B), ...)\n ex) (4,):(3,4) -> (3,)\n (2,1,4):(2,3,4) -> (2,3)\n \"\"\"\n bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]\n bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]\n\n bboxes1_coor = tf.concat(\n [\n bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,\n bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n bboxes2_coor = tf.concat(\n [\n bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,\n bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,\n ],\n axis=-1,\n )\n\n left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])\n right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n\n union_area = bboxes1_area + bboxes2_area - inter_area + 1e-7\n\n iou = tf.math.divide_no_nan(inter_area, union_area)\n\n return iou\n\n\ndef concat_tensor_dict(tensor_dict: Mapping[str, tf.Tensor],\n num_classes: int):\n \"\"\"Collate bbox and corresponding class tensors, from dictionary of tensors\n \n Args:\n tensor: `dict` with `tf.Tensor` values, of shape [batch, output_size, \n output_size, anchors_per_scale, 5 + classes]\n num_classes: `int`, number of classes\n \n Returns:\n `bbox`: `tf.Tensor` of shape [batch, None, 4]\n `classes`: `tf.Tensor` of shape [batch, None, 1]\n \"\"\"\n bbox_tensors = []\n conf_tensors = []\n prob_tensors = []\n\n for _, prediction in tensor_dict.items():\n pred_xywh, pred_conf, pred_prob = tf.split(prediction, (4, 1, num_classes), axis=-1)\n tensor_shape = pred_prob.shape\n num_instance = tensor_shape[1] * tensor_shape[2] * tensor_shape[3]\n\n pred_prob = pred_conf * pred_prob\n pred_prob = tf.reshape(pred_prob, (-1, num_instance, num_classes))\n pred_conf = tf.reshape(pred_conf, (-1, num_instance))\n pred_xywh = tf.reshape(pred_xywh, (-1, num_instance, 4))\n\n bbox_tensors.append(pred_xywh)\n conf_tensors.append(pred_conf)\n prob_tensors.append(pred_prob)\n\n bbox_tensors = tf.concat(bbox_tensors, axis=1)\n conf_tensors = tf.concat(conf_tensors, axis=1)\n prob_tensors = tf.concat(prob_tensors, axis=1)\n \n return bbox_tensors, conf_tensors, prob_tensors\n\n\ndef filter_boxes(box_xywh: tf.Tensor, \n scores: tf.Tensor, \n score_threshold: float, \n input_shape: tf.Tensor):\n \"\"\"Filter out boxes according to score threshold\n\n Args:\n box_xywh: `tf.Tensor`, of shape (batch size, None, 4), each entry being\n (centre x, centre y, width, height) of bbox\n scores: `tf.Tensor`, of shape (batch size, None, 6), denoting probabilities\n of being each class\n score_threshold: `float`, threshold to filter with\n input_shape: `tf.Tensor` denoting (height width) of image\n \n Returns:\n `boxes`: valid bounding boxes after filtered, shape (1, None, 4)\n `conf`: confidence score corresponding to bounding boxes, shape (1, None, classes)\n \"\"\"\n scores_max = tf.math.reduce_max(scores, axis=-1)\n\n mask = scores_max >= score_threshold\n class_boxes = tf.boolean_mask(box_xywh, mask)\n pred_conf = tf.boolean_mask(scores, mask)\n class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])\n pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])\n\n box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)\n\n input_shape = tf.cast(input_shape, dtype=tf.float32)\n\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n\n box_mins = (box_yx - (box_hw / 2.)) / input_shape\n box_maxes = (box_yx + (box_hw / 2.)) / input_shape\n boxes = tf.concat([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ], axis=-1)\n\n return boxes, pred_conf\n"
] |
[
[
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.stack",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.subtract",
"tensorflow.constant",
"tensorflow.image.pad_to_bounding_box",
"tensorflow.split",
"tensorflow.tensor_scatter_nd_add",
"tensorflow.floor",
"tensorflow.zeros",
"tensorflow.minimum",
"tensorflow.random.uniform",
"tensorflow.name_scope",
"tensorflow.math.divide_no_nan",
"tensorflow.repeat",
"tensorflow.boolean_mask",
"tensorflow.image.flip_left_right",
"tensorflow.convert_to_tensor",
"tensorflow.reduce_any",
"tensorflow.reduce_max",
"tensorflow.math.reduce_max",
"tensorflow.tensor_scatter_nd_update",
"tensorflow.maximum",
"tensorflow.image.resize"
]
] |
pranjalg96/Stylized-Image-captioning
|
[
"e95111f36e3eed83478c990fdd70c5b9604bf57a"
] |
[
"FLICKR/model_flickr.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom itertools import chain\n\n\nclass RNN_VAE(nn.Module):\n \"\"\"\n 1. Hu, Zhiting, et al. \"Toward controlled generation of text.\" ICML. 2017.\n 2. Bowman, Samuel R., et al. \"Generating sentences from a continuous space.\" arXiv preprint arXiv:1511.06349 (2015).\n 3. Kim, Yoon. \"Convolutional neural networks for sentence classification.\" arXiv preprint arXiv:1408.5882 (2014).\n \"\"\"\n\n def __init__(self, n_vocab, h_dim, z_dim, c_dim, p_word_dropout=0.3, unk_idx=0, pad_idx=1, start_idx=2, eos_idx=3, max_sent_len=35, pretrained_embeddings=None, freeze_embeddings=False, gpu=False):\n super(RNN_VAE, self).__init__()\n\n self.UNK_IDX = unk_idx\n self.PAD_IDX = pad_idx\n self.START_IDX = start_idx\n self.EOS_IDX = eos_idx\n self.MAX_SENT_LEN = max_sent_len\n\n self.n_vocab = n_vocab\n self.h_dim = h_dim\n self.z_dim = z_dim\n self.c_dim = c_dim\n self.p_word_dropout = p_word_dropout\n\n self.gpu = gpu\n\n \"\"\"\n Word embeddings layer\n \"\"\"\n if pretrained_embeddings is None:\n self.emb_dim = h_dim\n self.word_emb = nn.Embedding(n_vocab, h_dim, self.PAD_IDX)\n else:\n self.emb_dim = pretrained_embeddings.size(1)\n self.word_emb = nn.Embedding(n_vocab, self.emb_dim, self.PAD_IDX)\n\n # Set pretrained embeddings\n self.word_emb.weight.data.copy_(pretrained_embeddings)\n\n if freeze_embeddings:\n self.word_emb.weight.requires_grad = False\n\n \"\"\"\n Encoder is GRU with FC layers connected to last hidden unit\n \"\"\"\n # self.encoder = nn.GRU(self.emb_dim, h_dim)\n self.encoder = nn.LSTM(self.emb_dim, h_dim)\n self.q_mu = nn.Linear(h_dim, z_dim)\n self.q_logvar = nn.Linear(h_dim, z_dim)\n\n \"\"\"\n Decoder is GRU with `z` and `c` appended at its inputs\n \"\"\"\n # self.decoder = nn.GRU(self.emb_dim+z_dim+c_dim, z_dim+c_dim, dropout=0.3)\n self.decoder = nn.LSTM(self.emb_dim+z_dim+c_dim, z_dim+c_dim, dropout=0.3)\n self.decoder_fc = nn.Linear(z_dim+c_dim, n_vocab)\n\n \"\"\"\n Discriminator is CNN as in Kim, 2014\n \"\"\"\n self.conv3 = nn.Conv2d(1, 100, (3, self.emb_dim))\n self.conv4 = nn.Conv2d(1, 100, (4, self.emb_dim))\n self.conv5 = nn.Conv2d(1, 100, (5, self.emb_dim))\n\n self.disc_fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(300, 2)\n )\n\n self.discriminator = nn.ModuleList([\n self.conv3, self.conv4, self.conv5, self.disc_fc\n ])\n\n \"\"\"\n Grouping the model's parameters: separating encoder, decoder, and discriminator\n \"\"\"\n self.encoder_params = chain(\n self.encoder.parameters(), self.q_mu.parameters(),\n self.q_logvar.parameters()\n )\n\n self.decoder_params = chain(\n self.decoder.parameters(), self.decoder_fc.parameters()\n )\n\n self.vae_params = chain(\n self.word_emb.parameters(), self.encoder_params, self.decoder_params\n )\n self.vae_params = filter(lambda p: p.requires_grad, self.vae_params)\n\n self.discriminator_params = filter(lambda p: p.requires_grad, self.discriminator.parameters())\n\n \"\"\"\n Use GPU if set\n \"\"\"\n if self.gpu:\n self.cuda()\n\n def forward_encoder(self, inputs):\n \"\"\"\n Inputs is batch of sentences: seq_len x mbsize\n \"\"\"\n inputs = self.word_emb(inputs)\n return self.forward_encoder_embed(inputs)\n\n def forward_encoder_embed(self, inputs):\n \"\"\"\n Inputs is embeddings of: seq_len x mbsize x emb_dim\n \"\"\"\n _, (h, c) = self.encoder(inputs, None)\n\n # Forward to latent\n h = h.view(-1, self.h_dim)\n mu = self.q_mu(h)\n logvar = self.q_logvar(h)\n\n return mu, logvar\n\n def sample_z(self, mu, logvar):\n \"\"\"\n Reparameterization trick: z = mu + std*eps; eps ~ N(0, I)\n \"\"\"\n eps = Variable(torch.randn(mu.size(0), self.z_dim)) # Why sample from N(0,I) only once? mu.size(0),\n eps = eps.cuda() if self.gpu else eps\n return mu + torch.exp(logvar) * eps # Why divide by 2?\n\n def sample_z_val(self, mu, logvar):\n \"\"\"\n Reparameterization trick: z = mu + std*eps; eps ~ N(0, I)\n \"\"\"\n eps = Variable(torch.randn(mu.size(0), self.z_dim)) # Why sample from N(0,I) only once? mu.size(0),\n eps = eps.cuda() if self.gpu else eps\n return mu + torch.exp(logvar) * eps # Why divide by 2?\n\n def sample_z_prior(self, mbsize):\n \"\"\"\n Sample z ~ p(z) = N(0, I)\n \"\"\"\n z = Variable(torch.randn(mbsize, self.z_dim))\n z = z.cuda() if self.gpu else z\n return z\n\n def sample_c_prior(self, mbsize):\n \"\"\"\n Sample c ~ p(c) = Cat([0.5, 0.5])\n \"\"\"\n c = Variable(\n torch.from_numpy(np.random.multinomial(1, [0.5, 0.5], mbsize).astype('float32'))\n )\n c = c.cuda() if self.gpu else c\n return c\n\n def forward_decoder(self, inputs, z, c):\n \"\"\"\n Inputs must be embeddings: seq_len x mbsize\n \"\"\"\n dec_inputs = self.word_dropout(inputs)\n # dec_inputs = inputs\n # Forward\n seq_len = dec_inputs.size(0)\n b_size = dec_inputs.size(1)\n\n # 1 x mbsize x (z_dim+c_dim)\n init_h = torch.cat([z.unsqueeze(0), c.unsqueeze(0)], dim=2)\n init_c = torch.zeros(1, b_size, self.z_dim+self.c_dim)\n\n init_c = init_c.cuda() if self.gpu else init_c\n\n inputs_emb = self.word_emb(dec_inputs) # seq_len x mbsize x emb_dim\n inputs_emb = torch.cat([inputs_emb, init_h.repeat(seq_len, 1, 1)], 2)\n\n outputs, _ = self.decoder(inputs_emb, (init_h, init_c))\n seq_len, mbsize, _ = outputs.size()\n\n outputs = outputs.view(seq_len*mbsize, -1)\n y = self.decoder_fc(outputs)\n y = y.view(seq_len, mbsize, self.n_vocab)\n\n return y\n\n def forward_discriminator(self, inputs):\n \"\"\"\n Inputs is batch of sentences: mbsize x seq_len\n \"\"\"\n inputs = self.word_emb(inputs)\n return self.forward_discriminator_embed(inputs)\n\n def forward_discriminator_embed(self, inputs):\n \"\"\"\n Inputs must be embeddings: mbsize x seq_len x emb_dim\n \"\"\"\n inputs = inputs.unsqueeze(1) # mbsize x 1 x seq_len x emb_dim\n\n x3 = F.relu(self.conv3(inputs)).squeeze()\n x4 = F.relu(self.conv4(inputs)).squeeze()\n x5 = F.relu(self.conv5(inputs)).squeeze()\n\n # Max-over-time-pool\n x3 = F.max_pool1d(x3, x3.size(2)).squeeze()\n x4 = F.max_pool1d(x4, x4.size(2)).squeeze()\n x5 = F.max_pool1d(x5, x5.size(2)).squeeze()\n\n x = torch.cat([x3, x4, x5], dim=1)\n\n y = self.disc_fc(x)\n\n return y\n\n def forward(self, sentence, use_c_prior=True):\n \"\"\"\n Params:\n -------\n sentence: sequence of word indices.\n use_c_prior: whether to sample `c` from prior or from `discriminator`.\n\n Returns:\n --------\n recon_loss: reconstruction loss of VAE.\n kl_loss: KL-div loss of VAE.\n \"\"\"\n self.train()\n\n mbsize = sentence.size(1)\n\n # sentence: '<start> I want to fly <eos>'\n # enc_inputs: '<start> I want to fly <eos>'\n # dec_inputs: '<start> I want to fly <eos>'\n # dec_targets: 'I want to fly <eos> <pad>'\n pad_words = Variable(torch.LongTensor([self.PAD_IDX])).repeat(1, mbsize)\n pad_words = pad_words.cuda() if self.gpu else pad_words\n\n enc_inputs = sentence\n dec_inputs = sentence\n dec_targets = torch.cat([sentence[1:], pad_words], dim=0)\n\n # Encoder: sentence -> z\n mu, logvar = self.forward_encoder(enc_inputs)\n z = self.sample_z(mu, logvar)\n\n if use_c_prior:\n c = self.sample_c_prior(mbsize)\n else:\n c = self.forward_discriminator(sentence.transpose(0, 1))\n\n # Decoder: sentence -> y\n y = self.forward_decoder(dec_inputs, z, c)\n\n recon_loss = F.cross_entropy(\n y.view(-1, self.n_vocab), dec_targets.view(-1), size_average=True\n )\n\n kl_loss = torch.mean(0.5 * torch.sum(torch.exp(logvar) + mu**2 - 1 - logvar, 1))\n\n return recon_loss, kl_loss\n\n def generate_sentences(self, batch_size):\n \"\"\"\n Generate sentences and corresponding z of (batch_size x max_sent_len)\n \"\"\"\n samples = []\n cs = []\n\n for _ in range(batch_size):\n z = self.sample_z_prior(1)\n c = self.sample_c_prior(1)\n samples.append(self.sample_sentence(z, c, raw=True))\n cs.append(c.long())\n\n X_gen = torch.cat(samples, dim=0)\n c_gen = torch.cat(cs, dim=0)\n\n return X_gen, c_gen\n\n def sample_sentence(self, z, c, raw=False, temp=1): # Basically a language model sampling\n \"\"\"\n Sample single sentence from p(x|z,c) according to given temperature.\n `raw = True` means this returns sentence as in dataset which is useful\n to train discriminator. `False` means that this will return list of\n `word_idx` which is useful for evaluation.\n \"\"\"\n self.eval()\n\n word = torch.LongTensor([self.START_IDX])\n word = word.cuda() if self.gpu else word\n word = Variable(word) # '<start>'\n\n z, c = z.view(1, 1, -1), c.view(1, 1, -1)\n\n h = torch.cat([z, c], dim=2)\n init_c = torch.zeros((1, 1, self.z_dim+self.c_dim))\n init_c = init_c.cuda() if self.gpu else init_c\n\n if not isinstance(h, Variable):\n h = Variable(h)\n init_c = Variable(init_c)\n\n outputs = []\n\n if raw:\n outputs.append(self.START_IDX)\n\n for i in range(self.MAX_SENT_LEN):\n emb = self.word_emb(word).view(1, 1, -1)\n emb = torch.cat([emb, z, c], 2)\n\n output, (h, init_c) = self.decoder(emb, (h, init_c))\n y = self.decoder_fc(output).view(-1)\n y = F.softmax(y/temp, dim=0)\n\n idx = torch.multinomial(y, 1)\n\n word = Variable(torch.LongTensor([int(idx)]))\n word = word.cuda() if self.gpu else word\n\n idx = int(idx)\n\n if not raw and idx == self.EOS_IDX:\n break\n\n outputs.append(idx)\n\n # Back to default state: train\n self.train()\n\n if raw:\n outputs = Variable(torch.LongTensor(outputs)).unsqueeze(0)\n return outputs.cuda() if self.gpu else outputs\n else:\n return outputs\n\n def generate_soft_embed(self, mbsize, temp=1):\n \"\"\"\n Generate soft embeddings of (mbsize x emb_dim) along with target z\n and c for each row (mbsize x {z_dim, c_dim})\n \"\"\"\n samples = []\n targets_c = []\n targets_z = []\n\n for _ in range(mbsize):\n z = self.sample_z_prior(1)\n c = self.sample_c_prior(1)\n\n samples.append(self.sample_soft_embed(z, c, temp=1))\n targets_z.append(z)\n targets_c.append(c)\n\n X_gen = torch.cat(samples, dim=0)\n targets_z = torch.cat(targets_z, dim=0)\n _, targets_c = torch.cat(targets_c, dim=0).max(dim=1)\n\n return X_gen, targets_z, targets_c\n\n def sample_soft_embed(self, z, c, temp=1):\n \"\"\"\n Sample single soft embedded sentence from p(x|z,c) and temperature.\n Soft embeddings are calculated as weighted average of word_emb\n according to p(x|z,c).\n \"\"\"\n self.eval()\n\n z, c = z.view(1, 1, -1), c.view(1, 1, -1)\n\n word = torch.LongTensor([self.START_IDX])\n word = word.cuda() if self.gpu else word\n word = Variable(word) # '<start>'\n emb = self.word_emb(word).view(1, 1, -1)\n emb = torch.cat([emb, z, c], 2)\n\n h = torch.cat([z, c], dim=2)\n init_c = torch.zeros((1, 1, self.z_dim + self.c_dim))\n init_c = init_c.cuda() if self.gpu else init_c\n\n if not isinstance(h, Variable):\n h = Variable(h)\n init_c = Variable(init_c)\n\n outputs = [self.word_emb(word).view(1, -1)]\n\n for i in range(self.MAX_SENT_LEN):\n output, (h, init_c) = self.decoder(emb, (h, init_c))\n o = self.decoder_fc(output).view(-1)\n\n # Sample softmax with temperature\n y = F.softmax(o / temp, dim=0)\n\n # Take expectation of embedding given output prob -> soft embedding\n # <y, w> = 1 x n_vocab * n_vocab x emb_dim\n emb = y.unsqueeze(0) @ self.word_emb.weight\n emb = emb.view(1, 1, -1)\n\n # Save resulting soft embedding\n outputs.append(emb.view(1, -1))\n\n # Append with z and c for the next input\n emb = torch.cat([emb, z, c], 2)\n\n\n # 1 x 16 x emb_dim\n outputs = torch.cat(outputs, dim=0).unsqueeze(0)\n\n # Back to default state: train\n self.train()\n\n return outputs.cuda() if self.gpu else outputs\n\n def word_dropout(self, inputs): # WHY?\n \"\"\"\n Do word dropout: with prob `p_word_dropout`, set the word to '<unk>'.\n \"\"\"\n if isinstance(inputs, Variable):\n data = inputs.data.clone()\n else:\n data = inputs.clone()\n\n # Sample masks: elems with val 1 will be set to <unk>\n mask = torch.from_numpy(\n np.random.binomial(1, p=self.p_word_dropout, size=tuple(data.size()))\n .astype('uint8')\n )\n\n if self.gpu:\n mask = mask.cuda()\n\n # Set to <unk>\n data[mask] = self.UNK_IDX\n\n return Variable(data)\n"
] |
[
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.ModuleList",
"torch.autograd.Variable",
"torch.randn",
"torch.nn.Conv2d",
"torch.multinomial",
"torch.LongTensor",
"torch.nn.functional.softmax",
"torch.exp",
"torch.nn.Embedding",
"numpy.random.multinomial"
]
] |
igorss77/diabetes_project
|
[
"0353ec812989c90c43a61b6aee4e02f943c51944"
] |
[
"data_preparation.py"
] |
[
"import pandas as pd\nimport logging\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport pickle\nfrom sklearn.metrics import classification_report\n\n\nlogger = logging.getLogger(__name__)\n\ndef columns_to_dummies(df):\n \"\"\"\n Essa função altera os labels das colunas de string para valores binários.\n O campo \"Gender\" é transformado com a função \"get_dummies\"\n\n Parameters\n ----------\n df : pandas.dataframe\n dataframe com todas as colunas utilizadas do projeto\n Returns\n -------\n df: pandas.dataframe\n dataframe com colunas com \"yes\" e \"no\" para 1 e 0, além do target \"class\" com valores transformados\n \"Positive\" para 1 e \"Negative\" para 0.\n \"\"\"\n\n df['class']=df['class'].replace(['Positive'],1)\n df['class']=df['class'].replace(['Negative'],0)\n df=df.replace(['Yes'], 1)\n df=df.replace(['No'],0)\n df = pd.get_dummies(df, columns=['Gender'])\n return df\n\ndef transform_age(df):\n \"\"\"\n Função que retorna a média de idade dos pacientes com casos positivos\n no dataset que está sendo avaliado.\n\n Parameters\n ----------\n df : pandas.dataframe\n dataframe com todas as colunas utilizadas do projeto\n Returns\n -------\n df: pandas.dataframe\n dataframe com a coluna \"Age\" na forma boolean\n \"\"\"\n mean_age_positives = int(df.groupby(['class'])['Age'].mean()[1])\n logger.info(f'A média de idade dos pacientes positivos é de {mean_age_positives} anos')\n df['Age_mean'] = [1 if x >= int(df.groupby(['class'])['Age'].mean()[1]) else 0 for x in df.Age.values]\n return df\n\ndef featuring_select(df):\n \"\"\"\n Seleciona variáveis importantes utilizando o método \"KBest\"\n\n Parameters\n ----------\n df : pandas.dataframe\n Dataframe pré processado\n\n Returns\n -------\n df: pandas.dataframe\n Dataframe com variáveis a serem utilizadas no modelo\n chi_features: list\n Lista com variáveis selecionadas pelo KBest\n \"\"\"\n # Será considerado apenas o Gênero Feminino. Se 1 feminino, se 0 masculino\n df = df.drop(['Age', 'Gender_Male'], axis=1)\n X = df.drop('class', axis=1)\n y = df['class']\n chi_values = SelectKBest(chi2, k=11).fit(X, y)\n selected_features = pd.concat([pd.DataFrame(X.columns), pd.DataFrame(chi_values.scores_)], axis=1)\n selected_features.columns = [\"features\", \"values\"]\n selected_features = selected_features.sort_values(by=\"values\", ascending=False).reset_index(drop=False)\n logger.info(f'No teste com o \"chi-quadrado\", as variáveis selecionadas foram {selected_features[\"features\"][0:-5].to_list()}')\n chi_features = selected_features[\"features\"][0:-5].to_list()\n\n return df, chi_features\n\ndef train_model(X_train, X_test, y_train, y_test):\n \"\"\"\n Parameters\n ----------\n X_train : list\n Lista contendo dados explicativos de treino\n X_test : list\n Lista contendo dados explicativos de treino\n y_train : list\n Lista contendo dados do target para treino\n y_test : list\n Lista contendo dados do target para teste\n\n Returns\n -------\n\n \"\"\"\n params = {'n_estimators': [100, 300], 'max_depth': [2, 3, 4, 5], 'max_features': ['auto', 'sqrt', 'log2']}\n logger.info('Iniciando GridSearch')\n grid_search_cv = GridSearchCV(RandomForestClassifier(random_state=42), params, verbose=1, cv=5)\n grid_search_cv.fit(X_train, y_train)\n logger.info('GridSearch e treino do modelo finalizado')\n rf_model = grid_search_cv.best_estimator_\n y_pred = rf_model.predict(X_test)\n target_names = ['negative', 'positive']\n logger.info(f'{classification_report(y_test, y_pred, target_names=target_names)}')\n feature_scores = pd.Series(rf_model.feature_importances_, index=X_train.columns).sort_values(\n ascending=False).to_frame()\n logger.info('Salvando modelo treinado')\n with open(\"./models/model.pkl\",\"wb\") as f:\n pickle.dump(rf_model,f)\n return logger.info(f'Variáveis mais importantes no modelo {feature_scores}')"
] |
[
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"sklearn.metrics.classification_report",
"sklearn.feature_selection.SelectKBest",
"pandas.Series",
"pandas.get_dummies"
]
] |
aneeshnaik/spam
|
[
"f66212bf77d72c8528c1a0d6cbe814cd360794c7"
] |
[
"fit/likelihood.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated: 2018\nAuthor: A. P. Naik\nDescription: File containing likelihood function 'lnlike', to be fed to\nemcee sampler.\n\"\"\"\nimport numpy as np\nfrom .rotcurve import v_model\n\n\ndef lnlike(theta, theta_dict, galaxy, **kwargs):\n \"\"\"\n For given fit parameters (contained in 'theta') and data (contained in\n 'galaxy'), returns a Gaussian log-likelihood. This function is fed to emcee\n sampler. If 'infer_errors' is switched on, then additional error term is\n included.\n\n Parameters\n ----------\n theta : numpy.ndarray, shape (ndim,)\n Parameter values for which to calculate likelihood.\n theta_dict : dict\n Keys are names of free parameters, and values are indices. Indices are\n used, for example, in the stored Markov chains.\n galaxy : spam.data.SPARCGalaxy\n Instance of class spam.data.SPARCGalaxy, containing galaxy to be fit.\n **kwargs :\n Same as kwargs for spam.fit.GalaxyFit constructor. See documentation\n therein.\n\n Returns\n -------\n lnlike : float\n log-likelihood lnL(data | theta).\n \"\"\"\n\n # calculate rotation curve model\n model = v_model(theta, theta_dict, galaxy, **kwargs)\n\n # calculate Gaussian likelihood with or without additional error component\n if kwargs['infer_errors']:\n sigma = np.sqrt(galaxy.v_err**2 + theta[theta_dict['sigma_gal']]**2)\n lnlike = -np.sum(0.5*((galaxy.v - model)/sigma)**2 + np.log(sigma))\n else:\n lnlike = -0.5*np.sum(((galaxy.v - model)/galaxy.v_err)**2)\n\n return lnlike\n"
] |
[
[
"numpy.log",
"numpy.sum",
"numpy.sqrt"
]
] |
3vivekb/hail
|
[
"82c9e0f3ec2154335f91f2219b84c0fb5dbac526"
] |
[
"hail/python/hail/expr/types.py"
] |
[
"import abc\nimport json\nimport math\nfrom collections.abc import Mapping, Sequence\n\nimport numpy as np\n\nimport hail as hl\nfrom hail import genetics\nfrom hail.expr.nat import NatBase, NatLiteral\nfrom hail.expr.type_parsing import type_grammar, type_node_visitor\nfrom hail.genetics.reference_genome import reference_genome_type\nfrom hail.typecheck import *\nfrom hail.utils.java import scala_object, jset, Env, escape_parsable\n\n__all__ = [\n 'dtype',\n 'HailType',\n 'hail_type',\n 'is_container',\n 'is_compound',\n 'is_numeric',\n 'is_primitive',\n 'types_match',\n 'tint',\n 'tint32',\n 'tint64',\n 'tfloat',\n 'tfloat32',\n 'tfloat64',\n 'tstr',\n 'tbool',\n 'tarray',\n 'tndarray',\n 'tset',\n 'tdict',\n 'tstruct',\n 'tunion',\n 'ttuple',\n 'tinterval',\n 'tlocus',\n 'tcall',\n 'tvoid',\n 'tvariable',\n 'hts_entry_schema',\n]\n\ndef summary_type(t):\n if isinstance(t, hl.tdict):\n return f'dict<{summary_type(t.key_type)}, {summary_type(t.value_type)}>'\n elif isinstance(t, hl.tset):\n return f'set<{summary_type(t.element_type)}>'\n elif isinstance(t, hl.tarray):\n return f'array<{summary_type(t.element_type)}>'\n elif isinstance(t, hl.tstruct):\n return f'struct with {len(t)} fields'\n elif isinstance(t, hl.ttuple):\n return f'tuple with {len(t)} fields'\n elif isinstance(t, hl.tinterval):\n return f'interval<{summary_type(t.point_type)}>'\n else:\n return str(t)\n\ndef dtype(type_str):\n r\"\"\"Parse a type from its string representation.\n\n Examples\n --------\n\n >>> hl.dtype('int')\n dtype('int32')\n\n >>> hl.dtype('float')\n dtype('float64')\n\n >>> hl.dtype('array<int32>')\n dtype('array<int32>')\n\n >>> hl.dtype('dict<str, bool>')\n dtype('dict<str, bool>')\n\n >>> hl.dtype('struct{a: int32, `field with spaces`: int64}')\n dtype('struct{a: int32, `field with spaces`: int64}')\n\n Notes\n -----\n This function is able to reverse ``str(t)`` on a :class:`.HailType`.\n\n The grammar is defined as follows:\n\n .. code-block:: text\n\n type = _ (array / set / dict / struct / union / tuple / interval / int64 / int32 / float32 / float64 / bool / str / call / str / locus) _\n int64 = \"int64\" / \"tint64\"\n int32 = \"int32\" / \"tint32\" / \"int\" / \"tint\"\n float32 = \"float32\" / \"tfloat32\"\n float64 = \"float64\" / \"tfloat64\" / \"tfloat\" / \"float\"\n bool = \"tbool\" / \"bool\"\n call = \"tcall\" / \"call\"\n str = \"tstr\" / \"str\"\n locus = (\"tlocus\" / \"locus\") _ \"[\" identifier \"]\"\n array = (\"tarray\" / \"array\") _ \"<\" type \">\"\n ndarray = (\"tndarray\" / \"ndarray\") _ \"<\" type, identifier \">\"\n set = (\"tset\" / \"set\") _ \"<\" type \">\"\n dict = (\"tdict\" / \"dict\") _ \"<\" type \",\" type \">\"\n struct = (\"tstruct\" / \"struct\") _ \"{\" (fields / _) \"}\"\n union = (\"tunion\" / \"union\") _ \"{\" (fields / _) \"}\"\n tuple = (\"ttuple\" / \"tuple\") _ \"(\" ((type (\",\" type)*) / _) \")\"\n fields = field (\",\" field)*\n field = identifier \":\" type\n interval = (\"tinterval\" / \"interval\") _ \"<\" type \">\"\n identifier = _ (simple_identifier / escaped_identifier) _\n simple_identifier = ~\"\\w+\"\n escaped_identifier = ~\"`([^`\\\\\\\\]|\\\\\\\\.)*`\"\n _ = ~\"\\s*\"\n\n Parameters\n ----------\n type_str : :obj:`str`\n String representation of type.\n\n Returns\n -------\n :class:`.HailType`\n \"\"\"\n tree = type_grammar.parse(type_str)\n return type_node_visitor.visit(tree)\n\n\nclass HailTypeContext(object):\n def __init__(self, references=set()):\n self.references = references\n\n @property\n def is_empty(self):\n return len(self.references) == 0\n\n def _to_json_context(self):\n if self._json is None:\n self._json = {\n 'reference_genomes':\n {r: hl.get_reference(r)._config for r in self.references}\n }\n return self._json\n\n @classmethod\n def union(cls, *types):\n ctxs = [t.get_context() for t in types if not t.get_context().is_empty]\n if len(ctxs) == 0:\n return _empty_context\n if len(ctxs) == 1:\n return ctxs[0]\n refs = ctxs[0].references.union(*[ctx.references for ctx in ctxs[1:]])\n return HailTypeContext(refs)\n\n\n_empty_context = HailTypeContext()\n\n\nclass HailType(object):\n \"\"\"\n Hail type superclass.\n \"\"\"\n\n def __init__(self):\n super(HailType, self).__init__()\n self._context = None\n\n def __repr__(self):\n s = str(self).replace(\"'\", \"\\\\'\")\n return \"dtype('{}')\".format(s)\n\n @abc.abstractmethod\n def _eq(self, other):\n return\n\n def __eq__(self, other):\n return isinstance(other, HailType) and self._eq(other)\n\n @abc.abstractmethod\n def __str__(self):\n return\n\n def __hash__(self):\n # FIXME this is a bit weird\n return 43 + hash(str(self))\n\n def pretty(self, indent=0, increment=4):\n \"\"\"Returns a prettily formatted string representation of the type.\n\n Parameters\n ----------\n indent : :obj:`int`\n Spaces to indent.\n\n Returns\n -------\n :obj:`str`\n \"\"\"\n l = []\n l.append(' ' * indent)\n self._pretty(l, indent, increment)\n return ''.join(l)\n\n def _pretty(self, l, indent, increment):\n l.append(str(self))\n\n @abc.abstractmethod\n def _parsable_string(self):\n pass\n\n def typecheck(self, value):\n \"\"\"Check that `value` matches a type.\n\n Parameters\n ----------\n value\n Value to check.\n\n Raises\n ------\n :obj:`TypeError`\n \"\"\"\n def check(t, obj):\n t._typecheck_one_level(obj)\n return True\n self._traverse(value, check)\n\n @abc.abstractmethod\n def _typecheck_one_level(self, annotation):\n pass\n\n def _to_json(self, x):\n converted = self._convert_to_json_na(x)\n return json.dumps(converted)\n\n def _convert_to_json_na(self, x):\n if x is None:\n return x\n else:\n return self._convert_to_json(x)\n\n def _convert_to_json(self, x):\n return x\n\n def _from_json(self, s):\n x = json.loads(s)\n return self._convert_from_json_na(x)\n\n def _convert_from_json_na(self, x):\n if x is None:\n return x\n else:\n return self._convert_from_json(x)\n\n def _convert_from_json(self, x):\n return x\n\n\n def _traverse(self, obj, f):\n \"\"\"Traverse a nested type and object.\n\n Parameters\n ----------\n obj : Any\n f : Callable[[HailType, Any], bool]\n Function to evaluate on the type and object. Traverse children if\n the function returns ``True``.\n \"\"\"\n f(self, obj)\n\n @abc.abstractmethod\n def unify(self, t):\n raise NotImplementedError\n\n @abc.abstractmethod\n def subst(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def clear(self):\n raise NotImplementedError\n\n def _get_context(self):\n return _empty_context\n\n def get_context(self):\n if self._context is None:\n self._context = self._get_context()\n return self._context\n\n\nhail_type = oneof(HailType, transformed((str, dtype)))\n\n\nclass _tvoid(HailType):\n def __init__(self):\n super(_tvoid, self).__init__()\n\n def __str__(self):\n return \"void\"\n\n def _eq(self, other):\n return isinstance(other, _tvoid)\n\n def _parsable_string(self):\n return \"Void\"\n\n def unify(self, t):\n return t == tvoid\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\nclass _tint32(HailType):\n \"\"\"Hail type for signed 32-bit integers.\n\n Their values can range from :math:`-2^{31}` to :math:`2^{31} - 1`\n (approximately 2.15 billion).\n\n In Python, these are represented as :obj:`int`.\n \"\"\"\n\n def __init__(self):\n super(_tint32, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, int):\n raise TypeError(\"type 'tint32' expected Python 'int', but found type '%s'\" % type(annotation))\n elif not self.min_value <= annotation <= self.max_value:\n raise TypeError(f\"Value out of range for 32-bit integer: \"\n f\"expected [{self.min_value}, {self.max_value}], found {annotation}\")\n\n def __str__(self):\n return \"int32\"\n\n def _eq(self, other):\n return isinstance(other, _tint32)\n\n def _parsable_string(self):\n return \"Int32\"\n\n @property\n def min_value(self):\n return -(1 << 31)\n\n @property\n def max_value(self):\n return (1 << 31) - 1\n\n def unify(self, t):\n return t == tint32\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def to_numpy(self):\n return np.int32\n\n\nclass _tint64(HailType):\n \"\"\"Hail type for signed 64-bit integers.\n\n Their values can range from :math:`-2^{63}` to :math:`2^{63} - 1`.\n\n In Python, these are represented as :obj:`int`.\n \"\"\"\n\n def __init__(self):\n super(_tint64, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, int):\n raise TypeError(\"type 'int64' expected Python 'int', but found type '%s'\" % type(annotation))\n if not self.min_value <= annotation <= self.max_value:\n raise TypeError(f\"Value out of range for 64-bit integer: \"\n f\"expected [{self.min_value}, {self.max_value}], found {annotation}\")\n\n def __str__(self):\n return \"int64\"\n\n def _eq(self, other):\n return isinstance(other, _tint64)\n\n def _parsable_string(self):\n return \"Int64\"\n\n @property\n def min_value(self):\n return -(1 << 63)\n\n @property\n def max_value(self):\n return (1 << 63) - 1\n\n def unify(self, t):\n return t == tint64\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def to_numpy(self):\n return np.int64\n\n\nclass _tfloat32(HailType):\n \"\"\"Hail type for 32-bit floating point numbers.\n\n In Python, these are represented as :obj:`float`.\n \"\"\"\n\n def __init__(self):\n super(_tfloat32, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None and not isinstance(annotation, (float, int)):\n raise TypeError(\"type 'float32' expected Python 'float', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"float32\"\n\n def _eq(self, other):\n return isinstance(other, _tfloat32)\n\n def _parsable_string(self):\n return \"Float32\"\n\n def _convert_from_json(self, x):\n return float(x)\n\n def _convert_to_json(self, x):\n if math.isfinite(x):\n return x\n else:\n return str(x)\n\n def unify(self, t):\n return t == tfloat32\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def to_numpy(self):\n return np.float32\n\n\nclass _tfloat64(HailType):\n \"\"\"Hail type for 64-bit floating point numbers.\n\n In Python, these are represented as :obj:`float`.\n \"\"\"\n\n def __init__(self):\n super(_tfloat64, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None and not isinstance(annotation, (float, int)):\n raise TypeError(\"type 'float64' expected Python 'float', but found type '%s'\" % type(annotation))\n def __str__(self):\n return \"float64\"\n\n def _eq(self, other):\n return isinstance(other, _tfloat64)\n\n def _parsable_string(self):\n return \"Float64\"\n\n def _convert_from_json(self, x):\n return float(x)\n\n def _convert_to_json(self, x):\n if math.isfinite(x):\n return x\n else:\n return str(x)\n\n def unify(self, t):\n return t == tfloat64\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def to_numpy(self):\n return np.float64\n\n\nclass _tstr(HailType):\n \"\"\"Hail type for text strings.\n\n In Python, these are represented as strings.\n \"\"\"\n\n def __init__(self):\n super(_tstr, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation and not isinstance(annotation, str):\n raise TypeError(\"type 'str' expected Python 'str', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"str\"\n\n def _eq(self, other):\n return isinstance(other, _tstr)\n\n def _parsable_string(self):\n return \"String\"\n\n def unify(self, t):\n return t == tstr\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n\nclass _tbool(HailType):\n \"\"\"Hail type for Boolean (``True`` or ``False``) values.\n\n In Python, these are represented as :obj:`bool`.\n \"\"\"\n\n def __init__(self):\n super(_tbool, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None and not isinstance(annotation, bool):\n raise TypeError(\"type 'bool' expected Python 'bool', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"bool\"\n\n def _eq(self, other):\n return isinstance(other, _tbool)\n\n def _parsable_string(self):\n return \"Boolean\"\n\n def unify(self, t):\n return t == tbool\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def to_numpy(self):\n return np.bool\n\n\nclass tndarray(HailType):\n \"\"\"Hail type for n-dimensional arrays.\n\n .. include:: _templates/experimental.rst\n\n In Python, these are represented as NumPy :obj:`ndarray`.\n\n Notes\n -----\n\n NDArrays contain elements of only one type, which is parameterized by\n `element_type`.\n\n Parameters\n ----------\n element_type : :class:`.HailType`\n Element type of array.\n ndim : int32\n Number of dimensions.\n\n See Also\n --------\n :class:`.NDArrayExpression`, :func:`.ndarray`\n \"\"\"\n\n @typecheck_method(element_type=hail_type, ndim=oneof(NatBase, int))\n def __init__(self, element_type, ndim):\n self._element_type = element_type\n self._ndim = NatLiteral(ndim) if isinstance(ndim, int) else ndim\n super(tndarray, self).__init__()\n\n @property\n def element_type(self):\n \"\"\"NDArray element type.\n\n Returns\n -------\n :class:`.HailType`\n Element type.\n \"\"\"\n return self._element_type\n\n @property\n def ndim(self):\n \"\"\"NDArray number of dimensions.\n\n Returns\n -------\n :obj:`int`\n Number of dimensions.\n \"\"\"\n assert isinstance(self._ndim, NatLiteral), \"tndarray must be realized with a concrete number of dimensions\"\n return self._ndim.n\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for elt in np.nditer(obj):\n self.element_type._traverse(elt.item(), f)\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None and not isinstance(annotation, np.ndarray):\n raise TypeError(\"type 'ndarray' expected Python 'numpy.ndarray', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"ndarray<{}, {}>\".format(self.element_type, self.ndim)\n\n def _eq(self, other):\n return isinstance(other, tndarray) and self.element_type == other.element_type\n\n def _pretty(self, l, indent, increment):\n l.append('ndarray<')\n self._element_type._pretty(l, indent, increment)\n l.append(', ')\n l.append(str(self.ndim))\n l.append('>')\n\n def _parsable_string(self):\n return f'NDArray[{self._element_type._parsable_string()},{self.ndim}]'\n\n def _convert_from_json(self, x):\n np_type = self.element_type.to_numpy()\n return np.ndarray(shape=x['shape'], buffer=np.array(x['data'], dtype=np_type), strides=x['strides'], dtype=np_type)\n\n def _convert_to_json(self, x):\n data = x.reshape(x.size).tolist()\n json_dict = {\n \"shape\": x.shape,\n \"strides\": x.strides,\n \"flags\": 0,\n \"data\": data,\n \"offset\": 0\n }\n return json_dict\n\n def clear(self):\n self._element_type.clear()\n self._ndim.clear()\n\n def unify(self, t):\n return isinstance(t, tndarray) and \\\n self._element_type.unify(t._element_type) and \\\n self._ndim.unify(t._ndim)\n\n def subst(self):\n return tndarray(self._element_type.subst(), self._ndim.subst())\n\n def _get_context(self):\n return self.element_type.get_context()\n\n\nclass tarray(HailType):\n \"\"\"Hail type for variable-length arrays of elements.\n\n In Python, these are represented as :obj:`list`.\n\n Notes\n -----\n Arrays contain elements of only one type, which is parameterized by\n `element_type`.\n\n Parameters\n ----------\n element_type : :class:`.HailType`\n Element type of array.\n\n See Also\n --------\n :class:`.ArrayExpression`, :class:`.CollectionExpression`,\n :func:`.array`, :ref:`sec-collection-functions`\n \"\"\"\n\n @typecheck_method(element_type=hail_type)\n def __init__(self, element_type):\n self._element_type = element_type\n super(tarray, self).__init__()\n\n @property\n def element_type(self):\n \"\"\"Array element type.\n\n Returns\n -------\n :class:`.HailType`\n Element type.\n \"\"\"\n return self._element_type\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for elt in obj:\n self.element_type._traverse(elt, f)\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, Sequence):\n raise TypeError(\"type 'array' expected Python 'list', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"array<{}>\".format(self.element_type)\n\n def _eq(self, other):\n return isinstance(other, tarray) and self.element_type == other.element_type\n\n def _pretty(self, l, indent, increment):\n l.append('array<')\n self.element_type._pretty(l, indent, increment)\n l.append('>')\n\n def _parsable_string(self):\n return \"Array[\" + self.element_type._parsable_string() + \"]\"\n\n def _convert_from_json(self, x):\n return [self.element_type._convert_from_json_na(elt) for elt in x]\n\n def _convert_to_json(self, x):\n return [self.element_type._convert_to_json_na(elt) for elt in x]\n\n def _propagate_jtypes(self, jtype):\n self._element_type._add_jtype(jtype.elementType())\n\n def unify(self, t):\n return isinstance(t, tarray) and self.element_type.unify(t.element_type)\n\n def subst(self):\n return tarray(self.element_type.subst())\n\n def clear(self):\n self.element_type.clear()\n\n def _get_context(self):\n return self.element_type.get_context()\n\n\nclass tset(HailType):\n \"\"\"Hail type for collections of distinct elements.\n\n In Python, these are represented as :obj:`set`.\n\n Notes\n -----\n Sets contain elements of only one type, which is parameterized by\n `element_type`.\n\n Parameters\n ----------\n element_type : :class:`.HailType`\n Element type of set.\n\n See Also\n --------\n :class:`.SetExpression`, :class:`.CollectionExpression`,\n :func:`.set`, :ref:`sec-collection-functions`\n \"\"\"\n\n @typecheck_method(element_type=hail_type)\n def __init__(self, element_type):\n self._element_type = element_type\n super(tset, self).__init__()\n\n @property\n def element_type(self):\n \"\"\"Set element type.\n\n Returns\n -------\n :class:`.HailType`\n Element type.\n \"\"\"\n return self._element_type\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for elt in obj:\n self.element_type._traverse(elt, f)\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, set):\n raise TypeError(\"type 'set' expected Python 'set', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"set<{}>\".format(self.element_type)\n\n def _eq(self, other):\n return isinstance(other, tset) and self.element_type == other.element_type\n\n def _pretty(self, l, indent, increment):\n l.append('set<')\n self.element_type._pretty(l, indent, increment)\n l.append('>')\n\n def _parsable_string(self):\n return \"Set[\" + self.element_type._parsable_string() + \"]\"\n\n def _convert_from_json(self, x):\n return {self.element_type._convert_from_json_na(elt) for elt in x}\n\n def _convert_to_json(self, x):\n return [self.element_type._convert_to_json_na(elt) for elt in x]\n\n def _propagate_jtypes(self, jtype):\n self._element_type._add_jtype(jtype.elementType())\n\n def unify(self, t):\n return isinstance(t, tset) and self.element_type.unify(t.element_type)\n\n def subst(self):\n return tset(self.element_type.subst())\n\n def clear(self):\n self.element_type.clear()\n\n def _get_context(self):\n return self.element_type.get_context()\n\n\nclass tdict(HailType):\n \"\"\"Hail type for key-value maps.\n\n In Python, these are represented as :obj:`dict`.\n\n Notes\n -----\n Dicts parameterize the type of both their keys and values with\n `key_type` and `value_type`.\n\n Parameters\n ----------\n key_type: :class:`.HailType`\n Key type.\n value_type: :class:`.HailType`\n Value type.\n\n See Also\n --------\n :class:`.DictExpression`, :func:`.dict`, :ref:`sec-collection-functions`\n \"\"\"\n\n @typecheck_method(key_type=hail_type, value_type=hail_type)\n def __init__(self, key_type, value_type):\n self._key_type = key_type\n self._value_type = value_type\n super(tdict, self).__init__()\n\n @property\n def key_type(self):\n \"\"\"Dict key type.\n\n Returns\n -------\n :class:`.HailType`\n Key type.\n \"\"\"\n return self._key_type\n\n @property\n def value_type(self):\n \"\"\"Dict value type.\n\n Returns\n -------\n :class:`.HailType`\n Value type.\n \"\"\"\n return self._value_type\n\n @property\n def element_type(self):\n return tstruct(key = self._key_type, value = self._value_type)\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for k, v in obj.items():\n self.key_type._traverse(k, f)\n self.value_type._traverse(v, f)\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, dict):\n raise TypeError(\"type 'dict' expected Python 'dict', but found type '%s'\" % type(annotation))\n\n def __str__(self):\n return \"dict<{}, {}>\".format(self.key_type, self.value_type)\n\n def _eq(self, other):\n return isinstance(other, tdict) and self.key_type == other.key_type and self.value_type == other.value_type\n\n def _pretty(self, l, indent, increment):\n l.append('dict<')\n self.key_type._pretty(l, indent, increment)\n l.append(', ')\n self.value_type._pretty(l, indent, increment)\n l.append('>')\n\n def _parsable_string(self):\n return \"Dict[{},{}]\".format(self.key_type._parsable_string(), self.value_type._parsable_string())\n\n def _convert_from_json(self, x):\n return {self.key_type._convert_from_json_na(elt['key']): self.value_type._convert_from_json_na(elt['value']) for\n elt in x}\n\n def _convert_to_json(self, x):\n return [{'key': self.key_type._convert_to_json(k),\n 'value':self.value_type._convert_to_json(v)} for k, v in x.items()]\n\n def _propagate_jtypes(self, jtype):\n self._key_type._add_jtype(jtype.keyType())\n self._value_type._add_jtype(jtype.valueType())\n\n def unify(self, t):\n return (isinstance(t, tdict)\n and self.key_type.unify(t.key_type)\n and self.value_type.unify(t.value_type))\n\n def subst(self):\n return tdict(self._key_type.subst(), self._value_type.subst())\n\n def clear(self):\n self.key_type.clear()\n self.value_type.clear()\n\n def _get_context(self):\n return HailTypeContext.union(self.key_type, self.value_type)\n\n\nclass tstruct(HailType, Mapping):\n \"\"\"Hail type for structured groups of heterogeneous fields.\n\n In Python, these are represented as :class:`.Struct`.\n\n Parameters\n ----------\n field_types : keyword args of :class:`.HailType`\n Fields.\n\n See Also\n --------\n :class:`.StructExpression`, :class:`.Struct`\n \"\"\"\n\n @typecheck_method(field_types=hail_type)\n def __init__(self, **field_types):\n self._field_types = field_types\n self._fields = tuple(field_types)\n super(tstruct, self).__init__()\n\n @property\n def types(self):\n \"\"\"Struct field types.\n\n Returns\n -------\n :obj:`tuple` of :class:`.HailType`\n \"\"\"\n return tuple(self._field_types.values())\n\n @property\n def fields(self):\n \"\"\"Struct field names.\n\n Returns\n -------\n :obj:`tuple` of :obj:`str`\n Tuple of struct field names.\n \"\"\"\n return self._fields\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for k, v in obj.items():\n t = self[k]\n t._traverse(v, f)\n\n def _typecheck_one_level(self, annotation):\n if annotation:\n if isinstance(annotation, Mapping):\n s = set(self)\n for f in annotation:\n if f not in s:\n raise TypeError(\"type '%s' expected fields '%s', but found fields '%s'\" %\n (self, list(self), list(annotation)))\n else:\n raise TypeError(\"type 'struct' expected type Mapping (e.g. dict or hail.utils.Struct), but found '%s'\" %\n type(annotation))\n\n @typecheck_method(item=oneof(int, str))\n def __getitem__(self, item):\n if not isinstance(item, str):\n item = self._fields[item]\n return self._field_types[item]\n\n def __iter__(self):\n return iter(self._field_types)\n\n def __len__(self):\n return len(self._fields)\n\n def __str__(self):\n return \"struct{{{}}}\".format(\n ', '.join('{}: {}'.format(escape_parsable(f), str(t)) for f, t in self.items()))\n\n def _eq(self, other):\n return (isinstance(other, tstruct)\n and self._fields == other._fields\n and all(self[f] == other[f] for f in self._fields))\n\n def _pretty(self, l, indent, increment):\n if not self._fields:\n l.append('struct {}')\n return\n\n pre_indent = indent\n indent += increment\n l.append('struct {')\n for i, (f, t) in enumerate(self.items()):\n if i > 0:\n l.append(', ')\n l.append('\\n')\n l.append(' ' * indent)\n l.append('{}: '.format(escape_parsable(f)))\n t._pretty(l, indent, increment)\n l.append('\\n')\n l.append(' ' * pre_indent)\n l.append('}')\n\n def _parsable_string(self):\n return \"Struct{{{}}}\".format(\n ','.join('{}:{}'.format(escape_parsable(f), t._parsable_string()) for f, t in self.items()))\n\n def _convert_from_json(self, x):\n from hail.utils import Struct\n return Struct(**{f: t._convert_from_json_na(x.get(f)) for f, t in self.items()})\n\n def _convert_to_json(self, x):\n return {f: t._convert_to_json_na(x[f]) for f, t in self.items()}\n\n def _is_prefix_of(self, other):\n return (isinstance(other, tstruct) and\n len(self._fields) <= len(other._fields) and\n all(x == y for x, y in zip(self._field_types.values(), other._field_types.values())))\n\n def _concat(self, other):\n new_field_types = {}\n new_field_types.update(self._field_types)\n new_field_types.update(other._field_types)\n return tstruct(**new_field_types)\n\n def _insert(self, path, t):\n if not path:\n return t\n\n key = path[0]\n keyt = self.get(key)\n if not (keyt and isinstance(keyt, tstruct)):\n keyt = tstruct()\n return self._insert_fields(**{key: keyt._insert(path[1:], t)})\n\n def _insert_field(self, field, typ):\n return self._insert_fields(**{field: typ})\n\n def _insert_fields(self, **new_fields):\n new_field_types = {}\n new_field_types.update(self._field_types)\n new_field_types.update(new_fields)\n return tstruct(**new_field_types)\n\n def _drop_fields(self, fields):\n return tstruct(**{f: t for f, t in self.items() if f not in fields})\n\n def _select_fields(self, fields):\n return tstruct(**{f: self[f] for f in fields})\n\n def _index_path(self, path):\n t = self\n for p in path:\n t = t[p]\n return t\n\n def _rename(self, map):\n seen = {}\n new_field_types = {}\n\n for f0, t in self.items():\n f = map.get(f0, f0)\n if f in seen:\n raise ValueError(\n \"Cannot rename two fields to the same name: attempted to rename {} and {} both to {}\".format(\n repr(seen[f]), repr(f0), repr(f)))\n else:\n seen[f] = f0\n new_field_types[f] = t\n\n return tstruct(**new_field_types)\n\n def unify(self, t):\n if not (isinstance(t, tstruct) and len(self) == len(t)):\n return False\n for (f1, t1), (f2, t2) in zip(self.items(), t.items()):\n if not (f1 == f2 and t1.unify(t2)):\n return False\n return True\n\n def subst(self):\n return tstruct(**{f: t.subst() for f, t in self.items()})\n\n def clear(self):\n for f, t in self.items():\n t.clear()\n\n def _get_context(self):\n return HailTypeContext.union(*self.values())\n\nclass tunion(HailType, Mapping):\n @typecheck_method(case_types=hail_type)\n def __init__(self, **case_types):\n \"\"\"Tagged union type. Values of type union represent one of several\n heterogenous, named cases.\n\n Parameters\n ----------\n cases : keyword args of :class:`.HailType`\n The union cases.\n\n \"\"\"\n\n super(tunion, self).__init__()\n self._case_types = case_types\n self._cases = tuple(case_types)\n\n @property\n def cases(self):\n\n \"\"\"Return union case names.\n\n Returns\n -------\n :obj:`tuple` of :obj:`str`\n Tuple of union case names\n \"\"\"\n return self._cases\n\n @typecheck_method(item=oneof(int, str))\n def __getitem__(self, item):\n if isinstance(item, int):\n item = self._cases[item]\n return self._case_types[item]\n\n def __iter__(self):\n return iter(self._case_types)\n\n def __len__(self):\n return len(self._cases)\n\n def __str__(self):\n return \"union{{{}}}\".format(\n ', '.join('{}: {}'.format(escape_parsable(f), str(t)) for f, t in self.items()))\n\n def _eq(self, other):\n return (isinstance(other, tunion)\n and self._cases == other._cases\n and all(self[c] == other[c] for c in self._cases))\n\n def _pretty(self, l, indent, increment):\n if not self._cases:\n l.append('union {}')\n return\n\n pre_indent = indent\n indent += increment\n l.append('union {')\n for i, (f, t) in enumerate(self.items()):\n if i > 0:\n l.append(', ')\n l.append('\\n')\n l.append(' ' * indent)\n l.append('{}: '.format(escape_parsable(f)))\n t._pretty(l, indent, increment)\n l.append('\\n')\n l.append(' ' * pre_indent)\n l.append('}')\n\n def _parsable_string(self):\n return \"Union{{{}}}\".format(\n ','.join('{}:{}'.format(escape_parsable(f), t._parsable_string()) for f, t in self.items()))\n\n def unify(self, t):\n if not (isinstance(t, union) and len(self) == len(t)):\n return False\n for (f1, t1), (f2, t2) in zip(self.items(), t.items()):\n if not (f1 == f2 and t1.unify(t2)):\n return False\n return True\n\n def subst(self):\n return tunion(**{f: t.subst() for f, t in self.items()})\n\n def clear(self):\n for f, t in self.items():\n t.clear()\n\n def _get_context(self):\n return HailTypeContext.union(*self.values())\n\n\nclass ttuple(HailType, Sequence):\n \"\"\"Hail type for tuples.\n\n In Python, these are represented as :obj:`tuple`.\n\n Parameters\n ----------\n types: varargs of :class:`.HailType`\n Element types.\n\n See Also\n --------\n :class:`.TupleExpression`\n \"\"\"\n\n @typecheck_method(types=hail_type)\n def __init__(self, *types):\n self._types = types\n super(ttuple, self).__init__()\n\n @property\n def types(self):\n \"\"\"Tuple element types.\n\n Returns\n -------\n :obj:`tuple` of :class:`.HailType`\n \"\"\"\n return self._types\n\n def _traverse(self, obj, f):\n if f(self, obj):\n for t, elt in zip(self.types, obj):\n t._traverse(elt, f)\n\n def _typecheck_one_level(self, annotation):\n if annotation:\n if not isinstance(annotation, tuple):\n raise TypeError(\"type 'tuple' expected Python tuple, but found '%s'\" %\n type(annotation))\n if len(annotation) != len(self.types):\n raise TypeError(\"%s expected tuple of size '%i', but found '%s'\" %\n (self, len(self.types), annotation))\n\n @typecheck_method(item=int)\n def __getitem__(self, item):\n return self._types[item]\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n def __len__(self):\n return len(self._types)\n\n def __str__(self):\n return \"tuple({})\".format(\", \".join([str(t) for t in self.types]))\n\n def _eq(self, other):\n from operator import eq\n return isinstance(other, ttuple) and len(self.types) == len(other.types) and all(\n map(eq, self.types, other.types))\n\n def _pretty(self, l, indent, increment):\n pre_indent = indent\n indent += increment\n l.append('tuple (')\n for i, t in enumerate(self.types):\n if i > 0:\n l.append(', ')\n l.append('\\n')\n l.append(' ' * indent)\n t._pretty(l, indent, increment)\n l.append('\\n')\n l.append(' ' * pre_indent)\n l.append(')')\n\n def _parsable_string(self):\n return \"Tuple[{}]\".format(\",\".join([t._parsable_string() for t in self.types]))\n\n def _convert_from_json(self, x):\n return tuple(self.types[i]._convert_from_json_na(x[i]) for i in range(len(self.types)))\n\n def _convert_to_json(self, x):\n return [self.types[i]._convert_to_json_na(x[i]) for i in range(len(self.types))]\n\n def unify(self, t):\n if not (isinstance(t, ttuple) and len(self.types) == len(t.types)):\n return False\n for t1, t2 in zip(self.types, t.types):\n if not t1.unify(t2):\n return False\n return True\n\n def subst(self):\n return ttuple(*[t.subst() for t in self.types])\n\n def clear(self):\n for t in self.types:\n t.clear()\n\n def _get_context(self):\n return HailTypeContext.union(*self.types)\n\n\nclass _tcall(HailType):\n \"\"\"Hail type for a diploid genotype.\n\n In Python, these are represented by :class:`.Call`.\n \"\"\"\n\n def __init__(self):\n super(_tcall, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None and not isinstance(annotation, genetics.Call):\n raise TypeError(\"type 'call' expected Python hail.genetics.Call, but found %s'\" %\n type(annotation))\n\n def __str__(self):\n return \"call\"\n\n def _eq(self, other):\n return isinstance(other, _tcall)\n\n def _parsable_string(self):\n return \"Call\"\n\n def _convert_from_json(self, x):\n return hl.Call._from_java(hl.Call._call_jobject().parse(x))\n\n def _convert_to_json(self, x):\n return str(x)\n\n def unify(self, t):\n return t == tcall\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n\nclass tlocus(HailType):\n \"\"\"Hail type for a genomic coordinate with a contig and a position.\n\n In Python, these are represented by :class:`.Locus`.\n\n Parameters\n ----------\n reference_genome: :class:`.ReferenceGenome` or :obj:`str`\n Reference genome to use.\n\n See Also\n --------\n :class:`.LocusExpression`, :func:`.locus`, :func:`.parse_locus`,\n :class:`.Locus`\n \"\"\"\n\n @typecheck_method(reference_genome=reference_genome_type)\n def __init__(self, reference_genome='default'):\n self._rg = reference_genome\n super(tlocus, self).__init__()\n\n def _typecheck_one_level(self, annotation):\n if annotation is not None:\n if not isinstance(annotation, genetics.Locus):\n raise TypeError(\"type '{}' expected Python hail.genetics.Locus, but found '{}'\"\n .format(self, type(annotation)))\n if not self.reference_genome == annotation.reference_genome:\n raise TypeError(\"type '{}' encountered Locus with reference genome {}\"\n .format(self, repr(annotation.reference_genome)))\n\n def __str__(self):\n return \"locus<{}>\".format(escape_parsable(str(self.reference_genome)))\n\n def _parsable_string(self):\n return \"Locus({})\".format(escape_parsable(str(self.reference_genome)))\n\n def _eq(self, other):\n return isinstance(other, tlocus) and self.reference_genome == other.reference_genome\n\n @property\n def reference_genome(self):\n \"\"\"Reference genome.\n\n Returns\n -------\n :class:`.ReferenceGenome`\n Reference genome.\n \"\"\"\n if self._rg is None:\n self._rg = hl.default_reference()\n return self._rg\n\n def _pretty(self, l, indent, increment):\n l.append('locus<{}>'.format(escape_parsable(self.reference_genome.name)))\n\n def _convert_from_json(self, x):\n return genetics.Locus(x['contig'], x['position'], reference_genome=self.reference_genome)\n\n def _convert_to_json(self, x):\n return {'contig': x.contig, 'position': x.position}\n\n def unify(self, t):\n return isinstance(t, tlocus) and self.reference_genome == t.reference_genome\n\n def subst(self):\n return self\n\n def clear(self):\n pass\n\n def _get_context(self):\n return HailTypeContext(references={self.reference_genome.name})\n\n\nclass tinterval(HailType):\n \"\"\"Hail type for intervals of ordered values.\n\n In Python, these are represented by :class:`.Interval`.\n\n Parameters\n ----------\n point_type: :class:`.HailType`\n Interval point type.\n\n See Also\n --------\n :class:`.IntervalExpression`, :class:`.Interval`, :func:`.interval`,\n :func:`.parse_locus_interval`\n \"\"\"\n\n @typecheck_method(point_type=hail_type)\n def __init__(self, point_type):\n self._point_type = point_type\n super(tinterval, self).__init__()\n\n @property\n def point_type(self):\n \"\"\"Interval point type.\n\n Returns\n -------\n :class:`.HailType`\n Interval point type.\n \"\"\"\n return self._point_type\n\n def _traverse(self, obj, f):\n if f(self, obj):\n self.point_type._traverse(obj.start, f)\n self.point_type._traverse(obj.end, f)\n\n def _typecheck_one_level(self, annotation):\n from hail.utils import Interval\n if annotation is not None:\n if not isinstance(annotation, Interval):\n raise TypeError(\"type '{}' expected Python hail.utils.Interval, but found {}\"\n .format(self, type(annotation)))\n if annotation.point_type != self.point_type:\n raise TypeError(\"type '{}' encountered Interval with point type {}\"\n .format(self, repr(annotation.point_type)))\n\n def __str__(self):\n return \"interval<{}>\".format(str(self.point_type))\n\n def _eq(self, other):\n return isinstance(other, tinterval) and self.point_type == other.point_type\n\n def _pretty(self, l, indent, increment):\n l.append('interval<')\n self.point_type._pretty(l, indent, increment)\n l.append('>')\n\n def _parsable_string(self):\n return \"Interval[{}]\".format(self.point_type._parsable_string())\n\n def _convert_from_json(self, x):\n from hail.utils import Interval\n return Interval(self.point_type._convert_from_json_na(x['start']),\n self.point_type._convert_from_json_na(x['end']),\n x['includeStart'],\n x['includeEnd'])\n\n def _convert_to_json(self, x):\n return {'start': self.point_type._convert_to_json_na(x.start),\n 'end': self.point_type._convert_to_json_na(x.end),\n 'includeStart': x.includes_start,\n 'includeEnd': x.includes_end}\n\n def unify(self, t):\n return isinstance(t, tinterval) and self.point_type.unify(t.point_type)\n\n def subst(self):\n return tinterval(self.point_type.subst())\n\n def clear(self):\n self.point_type.clear()\n\n def _get_context(self):\n return self.point_type.get_context()\n\n\nclass Box(object):\n named_boxes = {}\n\n @staticmethod\n def from_name(name):\n if name in Box.named_boxes:\n return Box.named_boxes[name]\n b = Box()\n Box.named_boxes[name] = b\n return b\n\n def __init__(self):\n pass\n\n def unify(self, v):\n if hasattr(self, 'value'):\n return self.value == v\n self.value = v\n return True\n\n def clear(self):\n if hasattr(self, 'value'):\n del self.value\n\n def get(self):\n assert hasattr(self, 'value')\n return self.value\n\n\ntvoid = _tvoid()\n\n\ntint32 = _tint32()\n\"\"\"Hail type for signed 32-bit integers.\n\nTheir values can range from :math:`-2^{31}` to :math:`2^{31} - 1`\n(approximately 2.15 billion).\n\nIn Python, these are represented as :obj:`int`.\n\nSee Also\n--------\n:class:`.Int32Expression`, :func:`.int`, :func:`.int32`\n\"\"\"\n\n\ntint64 = _tint64()\n\"\"\"Hail type for signed 64-bit integers.\n\nTheir values can range from :math:`-2^{63}` to :math:`2^{63} - 1`.\n\nIn Python, these are represented as :obj:`int`.\n\nSee Also\n--------\n:class:`.Int64Expression`, :func:`.int64`\n\"\"\"\n\ntint = tint32\n\"\"\"Alias for :py:data:`.tint32`.\"\"\"\n\ntfloat32 = _tfloat32()\n\"\"\"Hail type for 32-bit floating point numbers.\n\nIn Python, these are represented as :obj:`float`.\n\nSee Also\n--------\n:class:`.Float32Expression`, :func:`.float64`\n\"\"\"\n\ntfloat64 = _tfloat64()\n\"\"\"Hail type for 64-bit floating point numbers.\n\nIn Python, these are represented as :obj:`float`.\n\nSee Also\n--------\n:class:`.Float64Expression`, :func:`.float`, :func:`.float64`\n\"\"\"\n\ntfloat = tfloat64\n\"\"\"Alias for :py:data:`.tfloat64`.\"\"\"\n\ntstr = _tstr()\n\"\"\"Hail type for text strings.\n\nIn Python, these are represented as strings.\n\nSee Also\n--------\n:class:`.StringExpression`, :func:`.str`\n\"\"\"\n\ntbool = _tbool()\n\"\"\"Hail type for Boolean (``True`` or ``False``) values.\n\nIn Python, these are represented as :obj:`bool`.\n\nSee Also\n--------\n:class:`.BooleanExpression`, :func:`.bool`\n\"\"\"\n\ntcall = _tcall()\n\"\"\"Hail type for a diploid genotype.\n\nIn Python, these are represented by :class:`.Call`.\n\nSee Also\n--------\n:class:`.CallExpression`, :class:`.Call`, :func:`.call`, :func:`.parse_call`,\n:func:`.unphased_diploid_gt_index_call`\n\"\"\"\n\nhts_entry_schema = tstruct(GT=tcall, AD=tarray(tint32), DP=tint32, GQ=tint32, PL=tarray(tint32))\n\n_numeric_types = {_tbool, _tint32, _tint64, _tfloat32, _tfloat64}\n_primitive_types = _numeric_types.union({_tstr})\n_interned_types = _primitive_types.union({_tcall})\n\n\n@typecheck(t=HailType)\ndef is_numeric(t) -> bool:\n return t.__class__ in _numeric_types\n\n\n@typecheck(t=HailType)\ndef is_primitive(t) -> bool:\n return t.__class__ in _primitive_types\n\n\n@typecheck(t=HailType)\ndef is_container(t) -> bool:\n return (isinstance(t, tarray)\n or isinstance(t, tset)\n or isinstance(t, tdict))\n\n\n@typecheck(t=HailType)\ndef is_compound(t) -> bool:\n return (is_container(t)\n or isinstance(t, tstruct)\n or isinstance(t, tunion)\n or isinstance(t, ttuple)\n or isinstance(t, tndarray))\n\n\ndef types_match(left, right) -> bool:\n return (len(left) == len(right)\n and all(map(lambda lr: lr[0].dtype == lr[1].dtype, zip(left, right))))\n\ndef from_numpy(np_dtype):\n if np_dtype == np.int32:\n return tint32\n elif np_dtype == np.int64:\n return tint64\n elif np_dtype == np.float32:\n return tfloat32\n elif np_dtype == np.float64:\n return tfloat64\n elif np_dtype == np.bool:\n return tbool\n else:\n raise ValueError(f\"numpy type {np_dtype} could not be converted to a hail type.\")\n\n\nclass tvariable(HailType):\n _cond_map = {\n 'numeric': is_numeric,\n 'int32': lambda x: x == tint32,\n 'int64': lambda x: x == tint64,\n 'float32': lambda x: x == tfloat32,\n 'float64': lambda x: x == tfloat64,\n 'locus': lambda x: isinstance(x, tlocus),\n 'struct': lambda x: isinstance(x, tstruct),\n 'union': lambda x: isinstance(x, tunion),\n 'tuple': lambda x: isinstance(x, ttuple)\n }\n\n def __init__(self, name, cond):\n self.name = name\n self.cond = cond\n self.condf = tvariable._cond_map[cond] if cond else None\n self.box = Box.from_name(name)\n\n def unify(self, t):\n if self.condf and not self.condf(t):\n return False\n return self.box.unify(t)\n\n def clear(self):\n self.box.clear()\n\n def subst(self):\n return self.box.get()\n\n def __str__(self):\n s = '?' + self.name\n if self.cond:\n s = s + ':' + self.cond\n return s\n\n\nimport pprint\n\n_old_printer = pprint.PrettyPrinter\n\n\nclass TypePrettyPrinter(pprint.PrettyPrinter):\n def _format(self, object, stream, indent, allowance, context, level):\n if isinstance(object, HailType):\n stream.write(object.pretty(self._indent_per_level))\n else:\n return _old_printer._format(self, object, stream, indent, allowance, context, level)\n\n\npprint.PrettyPrinter = TypePrettyPrinter # monkey-patch pprint\n"
] |
[
[
"numpy.array",
"numpy.nditer"
]
] |
YaoYinYing/OpenFold2
|
[
"57fd3cfba0bc70a2ca4c6943ba00e1c4892c1945"
] |
[
"alphafold/Tests/utils.py"
] |
[
"import torch\nimport numpy as np\nfrom pathlib import Path\nimport pickle\n\nfrom pytorch_memlab import MemReporter\nfrom pytorch_memlab.utils import readable_size as mem_to_str\nreporter = MemReporter()\n\n\ndef convert(arg, device:torch.device=None):\n\tif isinstance(arg, tuple):\n\t\treturn tuple([convert(arg_i) for arg_i in arg])\n\telif isinstance(arg, list):\n\t\treturn [convert(arg_i) for arg_i in arg]\n\telif isinstance(arg, np.ndarray):\n\t\tif device is None:\n\t\t\treturn torch.from_numpy(arg)\n\t\telse:\n\t\t\treturn torch.from_numpy(arg).to(device=device)\n\telif isinstance(arg, dict):\n\t\treturn {k: convert(v) for k, v in arg.items()}\n\telse:\n\t\treturn arg\n\ndef check_success(this_res, res):\n\terr = torch.abs(this_res.detach().to(dtype=torch.float32, device='cpu') - res.detach().to(dtype=torch.float32, device='cpu'))\n\tmax_err = torch.max(err).item()\n\tmean_err = torch.mean(err).item()\n\treturn err.sum().numpy(), max_err, mean_err\n\ndef check_recursive(a, b, depth:int=0, key=None, tol_max:float=1e-3, tol_mean=1e-3):\n\tstr_depth = ''.join(['--' for i in range(depth)])\n\tif isinstance(a, tuple) or isinstance(a, list):\n\t\terrs = []\n\t\tmax_errs = []\n\t\tmean_errs = []\n\t\tfor i, (a_i, b_i) in enumerate(zip(a, b)):\n\t\t\terr_i, max_err_i, mean_err_i = check_recursive(a_i, b_i, depth=depth+1, key=i)\n\t\t\terrs.append(err_i)\n\t\t\tmax_errs.append(max_err_i)\n\t\t\tmean_errs.append(mean_err_i)\n\t\t\tsucc = (max_err_i<tol_max) and (mean_err_i<tol_mean)\n\t\t\tif succ:\n\t\t\t\tprint(f'{str_depth}>{i}: success = {succ}')\n\t\t\telse:\n\t\t\t\tprint(f'{str_depth}>{i}: success = {succ}:\\t{err_i}\\t{max_err_i}\\t{mean_err_i}')\n\n\t\treturn np.sum(errs), max(max_errs), np.mean(mean_errs)\n\t\n\tif isinstance(a, dict):\n\t\terrs = []\n\t\tmax_errs = []\n\t\tmean_errs = []\n\t\tfor key in a.keys():\n\t\t\terr_i, max_err_i, mean_err_i = check_recursive(a[key], b[key], depth=depth+1, key=key)\n\t\t\terrs.append(err_i)\n\t\t\tmax_errs.append(max_err_i)\n\t\t\tmean_errs.append(mean_err_i)\n\t\t\tsucc = (max_err_i<tol_max) and (mean_err_i<tol_mean)\n\t\t\tif succ:\n\t\t\t\tprint(f'{str_depth}>{key}: success = {succ}')\n\t\t\telse:\n\t\t\t\tprint(f'{str_depth}>{key}: success = {succ}:\\t{err_i}\\t{max_err_i}\\t{mean_err_i}')\n\n\t\treturn np.sum(errs), max(max_errs), np.mean(mean_errs)\n\t\n\tif isinstance(a, np.ndarray):\n\t\ta = torch.from_numpy(a)\n\t\n\tif isinstance(b, np.ndarray):\n\t\tb = torch.from_numpy(b)\n\n\tif isinstance(a, float) or isinstance(a, int):\n\t\ta = torch.Tensor([a])\n\tif isinstance(b, float) or isinstance(b, int):\n\t\tb = torch.Tensor([b])\n\t\n\terr, max_err, mean_err = check_success(a, b)\n\tsucc = (max_err<tol_max) and (mean_err<tol_mean)\n\tprint(f'{str_depth}> success = {succ}:\\t{err}\\t{max_err}\\t{mean_err}')\n\treturn check_success(a, b)\n\ndef load_data(args, filename):\n\twith open(Path(args.debug_dir)/Path(f'{filename}.pkl'), 'rb') as f:\n\t\tdata = pickle.load(f)\n\tif len(data) == 4:\n\t\tfnargs1, fnargs2, params, res = data\n\t\treturn convert(fnargs1), convert(fnargs2), params, convert(res)\n\tif len(data) == 3:\n\t\targs, params, res = data\n\t\treturn convert(args), params, convert(res)\n\telif len(data) == 2:\n\t\targs, res = data\n\t\treturn convert(args), res\n\ndef get_total_alloc():\n\treporter.collect_tensor()\n\treporter.get_stats()\n\ttarget_device = torch.device('cuda:0')\n\ttotal_mem = 0\n\ttotal_numel = 0\n\tfor device, tensor_stats in reporter.device_tensor_stat.items():\n\t\tif device != target_device:\n\t\t\tcontinue\n\t\tfor stat in tensor_stats:\n\t\t\tname, size, numel, mem = stat\n\t\t\ttotal_mem += mem\n\t\t\ttotal_numel += numel\n\treturn total_mem"
] |
[
[
"torch.device",
"torch.max",
"numpy.sum",
"numpy.mean",
"torch.from_numpy",
"torch.Tensor",
"torch.mean"
]
] |
AWSjswinney/LightGBM
|
[
"abdb234f7051b15d486902f2bda93eb5a2221e06"
] |
[
"tests/python_package_test/test_engine.py"
] |
[
"# coding: utf-8\nimport copy\nimport itertools\nimport math\nimport pickle\nimport platform\nimport random\nfrom pathlib import Path\n\nimport numpy as np\nimport psutil\nimport pytest\nfrom scipy.sparse import csr_matrix, isspmatrix_csc, isspmatrix_csr\nfrom sklearn.datasets import load_svmlight_file, make_multilabel_classification\nfrom sklearn.metrics import average_precision_score, log_loss, mean_absolute_error, mean_squared_error, roc_auc_score\nfrom sklearn.model_selection import GroupKFold, TimeSeriesSplit, train_test_split\n\nimport lightgbm as lgb\n\nfrom .utils import load_boston, load_breast_cancer, load_digits, load_iris\n\ndecreasing_generator = itertools.count(0, -1)\n\n\ndef dummy_obj(preds, train_data):\n return np.ones(preds.shape), np.ones(preds.shape)\n\n\ndef multi_logloss(y_true, y_pred):\n return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])\n\n\ndef top_k_error(y_true, y_pred, k):\n if k == y_pred.shape[1]:\n return 0\n max_rest = np.max(-np.partition(-y_pred, k)[:, k:], axis=1)\n return 1 - np.mean((y_pred[np.arange(len(y_true)), y_true] > max_rest))\n\n\ndef constant_metric(preds, train_data):\n return ('error', 0.0, False)\n\n\ndef decreasing_metric(preds, train_data):\n return ('decreasing_metric', next(decreasing_generator), False)\n\n\ndef categorize(continuous_x):\n return np.digitize(continuous_x, bins=np.arange(0, 1, 0.01))\n\n\ndef test_binary():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1,\n 'num_iteration': 50 # test num_iteration in dict here\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=20,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = log_loss(y_test, gbm.predict(X_test))\n assert ret < 0.14\n assert len(evals_result['valid_0']['binary_logloss']) == 50\n assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)\n\n\ndef test_rf():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'boosting_type': 'rf',\n 'objective': 'binary',\n 'bagging_freq': 1,\n 'bagging_fraction': 0.5,\n 'feature_fraction': 0.5,\n 'num_leaves': 50,\n 'metric': 'binary_logloss',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = log_loss(y_test, gbm.predict(X_test))\n assert ret < 0.19\n assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)\n\n\ndef test_regression():\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'metric': 'l2',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = mean_squared_error(y_test, gbm.predict(X_test))\n assert ret < 7\n assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)\n\n\ndef test_missing_value_handle():\n X_train = np.zeros((100, 1))\n y_train = np.zeros(100)\n trues = random.sample(range(100), 20)\n for idx in trues:\n X_train[idx, 0] = np.nan\n y_train[idx] = 1\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'metric': 'l2',\n 'verbose': -1,\n 'boost_from_average': False\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=20,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = mean_squared_error(y_train, gbm.predict(X_train))\n assert ret < 0.005\n assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)\n\n\ndef test_missing_value_handle_more_na():\n X_train = np.ones((100, 1))\n y_train = np.ones(100)\n trues = random.sample(range(100), 80)\n for idx in trues:\n X_train[idx, 0] = np.nan\n y_train[idx] = 0\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'metric': 'l2',\n 'verbose': -1,\n 'boost_from_average': False\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=20,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = mean_squared_error(y_train, gbm.predict(X_train))\n assert ret < 0.005\n assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)\n\n\ndef test_missing_value_handle_na():\n x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]\n y = [1, 1, 1, 1, 0, 0, 0, 0, 1]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'zero_as_missing': False\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n np.testing.assert_allclose(pred, y)\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.999\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_missing_value_handle_zero():\n x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]\n y = [0, 1, 1, 1, 0, 0, 0, 0, 0]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'zero_as_missing': True\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n np.testing.assert_allclose(pred, y)\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.999\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_missing_value_handle_none():\n x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]\n y = [0, 1, 1, 1, 0, 0, 0, 0, 0]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'use_missing': False\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n assert pred[0] == pytest.approx(pred[1])\n assert pred[-1] == pytest.approx(pred[0])\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.83\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_categorical_handle():\n x = [0, 1, 2, 3, 4, 5, 6, 7]\n y = [0, 1, 0, 1, 0, 1, 0, 1]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'min_data_per_group': 1,\n 'cat_smooth': 1,\n 'cat_l2': 0,\n 'max_cat_to_onehot': 1,\n 'zero_as_missing': True,\n 'categorical_column': 0\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n np.testing.assert_allclose(pred, y)\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.999\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_categorical_handle_na():\n x = [0, np.nan, 0, np.nan, 0, np.nan]\n y = [0, 1, 0, 1, 0, 1]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'min_data_per_group': 1,\n 'cat_smooth': 1,\n 'cat_l2': 0,\n 'max_cat_to_onehot': 1,\n 'zero_as_missing': False,\n 'categorical_column': 0\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n np.testing.assert_allclose(pred, y)\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.999\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_categorical_non_zero_inputs():\n x = [1, 1, 1, 1, 1, 1, 2, 2]\n y = [1, 1, 1, 1, 1, 1, 0, 0]\n\n X_train = np.array(x).reshape(len(x), 1)\n y_train = np.array(y)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_train, y_train)\n\n params = {\n 'objective': 'regression',\n 'metric': 'auc',\n 'verbose': -1,\n 'boost_from_average': False,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'min_data_per_group': 1,\n 'cat_smooth': 1,\n 'cat_l2': 0,\n 'max_cat_to_onehot': 1,\n 'zero_as_missing': False,\n 'categorical_column': 0\n }\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n pred = gbm.predict(X_train)\n np.testing.assert_allclose(pred, y)\n ret = roc_auc_score(y_train, pred)\n assert ret > 0.999\n assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)\n\n\ndef test_multiclass():\n X, y = load_digits(n_class=10, return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'multiclass',\n 'metric': 'multi_logloss',\n 'num_class': 10,\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, params=params)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = multi_logloss(y_test, gbm.predict(X_test))\n assert ret < 0.16\n assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)\n\n\ndef test_multiclass_rf():\n X, y = load_digits(n_class=10, return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'boosting_type': 'rf',\n 'objective': 'multiclass',\n 'metric': 'multi_logloss',\n 'bagging_freq': 1,\n 'bagging_fraction': 0.6,\n 'feature_fraction': 0.6,\n 'num_class': 10,\n 'num_leaves': 50,\n 'min_data': 1,\n 'verbose': -1,\n 'gpu_use_dp': True\n }\n lgb_train = lgb.Dataset(X_train, y_train, params=params)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = multi_logloss(y_test, gbm.predict(X_test))\n assert ret < 0.23\n assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)\n\n\ndef test_multiclass_prediction_early_stopping():\n X, y = load_digits(n_class=10, return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'multiclass',\n 'metric': 'multi_logloss',\n 'num_class': 10,\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, params=params)\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50)\n\n pred_parameter = {\"pred_early_stop\": True,\n \"pred_early_stop_freq\": 5,\n \"pred_early_stop_margin\": 1.5}\n ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))\n assert ret < 0.8\n assert ret > 0.6 # loss will be higher than when evaluating the full model\n\n pred_parameter[\"pred_early_stop_margin\"] = 5.5\n ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))\n assert ret < 0.2\n\n\ndef test_multi_class_error():\n X, y = load_digits(n_class=10, return_X_y=True)\n params = {'objective': 'multiclass', 'num_classes': 10, 'metric': 'multi_error',\n 'num_leaves': 4, 'verbose': -1}\n lgb_data = lgb.Dataset(X, label=y)\n est = lgb.train(params, lgb_data, num_boost_round=10)\n predict_default = est.predict(X)\n results = {}\n est = lgb.train(dict(params, multi_error_top_k=1), lgb_data, num_boost_round=10,\n valid_sets=[lgb_data], evals_result=results, verbose_eval=False)\n predict_1 = est.predict(X)\n # check that default gives same result as k = 1\n np.testing.assert_allclose(predict_1, predict_default)\n # check against independent calculation for k = 1\n err = top_k_error(y, predict_1, 1)\n assert results['training']['multi_error'][-1] == pytest.approx(err)\n # check against independent calculation for k = 2\n results = {}\n est = lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,\n valid_sets=[lgb_data], evals_result=results, verbose_eval=False)\n predict_2 = est.predict(X)\n err = top_k_error(y, predict_2, 2)\n assert results['training']['multi_error@2'][-1] == pytest.approx(err)\n # check against independent calculation for k = 10\n results = {}\n est = lgb.train(dict(params, multi_error_top_k=10), lgb_data, num_boost_round=10,\n valid_sets=[lgb_data], evals_result=results, verbose_eval=False)\n predict_3 = est.predict(X)\n err = top_k_error(y, predict_3, 10)\n assert results['training']['multi_error@10'][-1] == pytest.approx(err)\n # check cases where predictions are equal\n X = np.array([[0, 0], [0, 0]])\n y = np.array([0, 1])\n lgb_data = lgb.Dataset(X, label=y)\n params['num_classes'] = 2\n results = {}\n lgb.train(params, lgb_data, num_boost_round=10,\n valid_sets=[lgb_data], evals_result=results, verbose_eval=False)\n assert results['training']['multi_error'][-1] == pytest.approx(1)\n results = {}\n lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,\n valid_sets=[lgb_data], evals_result=results, verbose_eval=False)\n assert results['training']['multi_error@2'][-1] == pytest.approx(0)\n\n\ndef test_auc_mu():\n # should give same result as binary auc for 2 classes\n X, y = load_digits(n_class=10, return_X_y=True)\n y_new = np.zeros((len(y)))\n y_new[y != 0] = 1\n lgb_X = lgb.Dataset(X, label=y_new)\n params = {'objective': 'multiclass',\n 'metric': 'auc_mu',\n 'verbose': -1,\n 'num_classes': 2,\n 'seed': 0}\n results_auc_mu = {}\n lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)\n params = {'objective': 'binary',\n 'metric': 'auc',\n 'verbose': -1,\n 'seed': 0}\n results_auc = {}\n lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc)\n np.testing.assert_allclose(results_auc_mu['training']['auc_mu'], results_auc['training']['auc'])\n # test the case where all predictions are equal\n lgb_X = lgb.Dataset(X[:10], label=y_new[:10])\n params = {'objective': 'multiclass',\n 'metric': 'auc_mu',\n 'verbose': -1,\n 'num_classes': 2,\n 'min_data_in_leaf': 20,\n 'seed': 0}\n results_auc_mu = {}\n lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)\n assert results_auc_mu['training']['auc_mu'][-1] == pytest.approx(0.5)\n # test that weighted data gives different auc_mu\n lgb_X = lgb.Dataset(X, label=y)\n lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.abs(np.random.normal(size=y.shape)))\n results_unweighted = {}\n results_weighted = {}\n params = dict(params, num_classes=10, num_leaves=5)\n lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_unweighted)\n lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],\n evals_result=results_weighted)\n assert results_weighted['training']['auc_mu'][-1] < 1\n assert results_unweighted['training']['auc_mu'][-1] != results_weighted['training']['auc_mu'][-1]\n # test that equal data weights give same auc_mu as unweighted data\n lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.ones(y.shape) * 0.5)\n lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],\n evals_result=results_weighted)\n assert results_unweighted['training']['auc_mu'][-1] == pytest.approx(\n results_weighted['training']['auc_mu'][-1], abs=1e-5)\n # should give 1 when accuracy = 1\n X = X[:10, :]\n y = y[:10]\n lgb_X = lgb.Dataset(X, label=y)\n params = {'objective': 'multiclass',\n 'metric': 'auc_mu',\n 'num_classes': 10,\n 'min_data_in_leaf': 1,\n 'verbose': -1}\n results = {}\n lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], evals_result=results)\n assert results['training']['auc_mu'][-1] == pytest.approx(1)\n # test loading class weights\n Xy = np.loadtxt(\n str(Path(__file__).absolute().parents[2] / 'examples' / 'multiclass_classification' / 'multiclass.train')\n )\n y = Xy[:, 0]\n X = Xy[:, 1:]\n lgb_X = lgb.Dataset(X, label=y)\n params = {'objective': 'multiclass',\n 'metric': 'auc_mu',\n 'auc_mu_weights': [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],\n 'num_classes': 5,\n 'verbose': -1,\n 'seed': 0}\n results_weight = {}\n lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_weight)\n params['auc_mu_weights'] = []\n results_no_weight = {}\n lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_no_weight)\n assert results_weight['training']['auc_mu'][-1] != results_no_weight['training']['auc_mu'][-1]\n\n\ndef test_ranking_prediction_early_stopping():\n rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'\n X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))\n q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))\n X_test, _ = load_svmlight_file(str(rank_example_dir / 'rank.test'))\n params = {\n 'objective': 'rank_xendcg',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)\n gbm = lgb.train(params, lgb_train, num_boost_round=50)\n\n pred_parameter = {\"pred_early_stop\": True,\n \"pred_early_stop_freq\": 5,\n \"pred_early_stop_margin\": 1.5}\n ret_early = gbm.predict(X_test, **pred_parameter)\n\n pred_parameter[\"pred_early_stop_margin\"] = 5.5\n ret_early_more_strict = gbm.predict(X_test, **pred_parameter)\n with pytest.raises(AssertionError):\n np.testing.assert_allclose(ret_early, ret_early_more_strict)\n\n\ndef test_early_stopping():\n X, y = load_breast_cancer(return_X_y=True)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1\n }\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n valid_set_name = 'valid_set'\n # no early stopping\n gbm = lgb.train(params, lgb_train,\n num_boost_round=10,\n valid_sets=lgb_eval,\n valid_names=valid_set_name,\n verbose_eval=False,\n early_stopping_rounds=5)\n assert gbm.best_iteration == 10\n assert valid_set_name in gbm.best_score\n assert 'binary_logloss' in gbm.best_score[valid_set_name]\n # early stopping occurs\n gbm = lgb.train(params, lgb_train,\n num_boost_round=40,\n valid_sets=lgb_eval,\n valid_names=valid_set_name,\n verbose_eval=False,\n early_stopping_rounds=5)\n assert gbm.best_iteration <= 39\n assert valid_set_name in gbm.best_score\n assert 'binary_logloss' in gbm.best_score[valid_set_name]\n\n\ndef test_continue_train():\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'regression',\n 'metric': 'l1',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)\n init_gbm = lgb.train(params, lgb_train, num_boost_round=20)\n model_name = 'model.txt'\n init_gbm.save_model(model_name)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=30,\n valid_sets=lgb_eval,\n verbose_eval=False,\n # test custom eval metrics\n feval=(lambda p, d: ('custom_mae', mean_absolute_error(p, d.get_label()), False)),\n evals_result=evals_result,\n init_model='model.txt')\n ret = mean_absolute_error(y_test, gbm.predict(X_test))\n assert ret < 2.0\n assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)\n np.testing.assert_allclose(evals_result['valid_0']['l1'], evals_result['valid_0']['custom_mae'])\n\n\ndef test_continue_train_reused_dataset():\n X, y = load_boston(return_X_y=True)\n params = {\n 'objective': 'regression',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X, y, free_raw_data=False)\n init_gbm = lgb.train(params, lgb_train, num_boost_round=5)\n init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm)\n init_gbm_3 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_2)\n gbm = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_3)\n assert gbm.current_iteration() == 20\n\n\ndef test_continue_train_dart():\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'boosting_type': 'dart',\n 'objective': 'regression',\n 'metric': 'l1',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)\n init_gbm = lgb.train(params, lgb_train, num_boost_round=50)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=50,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result,\n init_model=init_gbm)\n ret = mean_absolute_error(y_test, gbm.predict(X_test))\n assert ret < 2.0\n assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)\n\n\ndef test_continue_train_multiclass():\n X, y = load_iris(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'multiclass',\n 'metric': 'multi_logloss',\n 'num_class': 3,\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)\n init_gbm = lgb.train(params, lgb_train, num_boost_round=20)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=30,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result,\n init_model=init_gbm)\n ret = multi_logloss(y_test, gbm.predict(X_test))\n assert ret < 0.1\n assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)\n\n\ndef test_cv():\n X_train, y_train = load_boston(return_X_y=True)\n params = {'verbose': -1}\n lgb_train = lgb.Dataset(X_train, y_train)\n # shuffle = False, override metric in params\n params_with_metric = {'metric': 'l2', 'verbose': -1}\n cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,\n nfold=3, stratified=False, shuffle=False,\n metrics='l1', verbose_eval=False)\n assert 'l1-mean' in cv_res\n assert 'l2-mean' not in cv_res\n assert len(cv_res['l1-mean']) == 10\n # shuffle = True, callbacks\n cv_res = lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,\n metrics='l1', verbose_eval=False,\n callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])\n assert 'l1-mean' in cv_res\n assert len(cv_res['l1-mean']) == 10\n # enable display training loss\n cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,\n nfold=3, stratified=False, shuffle=False,\n metrics='l1', verbose_eval=False, eval_train_metric=True)\n assert 'train l1-mean' in cv_res\n assert 'valid l1-mean' in cv_res\n assert 'train l2-mean' not in cv_res\n assert 'valid l2-mean' not in cv_res\n assert len(cv_res['train l1-mean']) == 10\n assert len(cv_res['valid l1-mean']) == 10\n # self defined folds\n tss = TimeSeriesSplit(3)\n folds = tss.split(X_train)\n cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds,\n verbose_eval=False)\n cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss,\n verbose_eval=False)\n np.testing.assert_allclose(cv_res_gen['l2-mean'], cv_res_obj['l2-mean'])\n # LambdaRank\n rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'\n X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))\n q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))\n params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3}\n lgb_train = lgb.Dataset(X_train, y_train, group=q_train)\n # ... with l2 metric\n cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,\n metrics='l2', verbose_eval=False)\n assert len(cv_res_lambda) == 2\n assert not np.isnan(cv_res_lambda['l2-mean']).any()\n # ... with NDCG (default) metric\n cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,\n verbose_eval=False)\n assert len(cv_res_lambda) == 2\n assert not np.isnan(cv_res_lambda['ndcg@3-mean']).any()\n # self defined folds with lambdarank\n cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10,\n folds=GroupKFold(n_splits=3),\n verbose_eval=False)\n np.testing.assert_allclose(cv_res_lambda['ndcg@3-mean'], cv_res_lambda_obj['ndcg@3-mean'])\n\n\ndef test_cvbooster():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1,\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n # with early stopping\n cv_res = lgb.cv(params, lgb_train,\n num_boost_round=25,\n early_stopping_rounds=5,\n verbose_eval=False,\n nfold=3,\n return_cvbooster=True)\n assert 'cvbooster' in cv_res\n cvb = cv_res['cvbooster']\n assert isinstance(cvb, lgb.CVBooster)\n assert isinstance(cvb.boosters, list)\n assert len(cvb.boosters) == 3\n assert all(isinstance(bst, lgb.Booster) for bst in cvb.boosters)\n assert cvb.best_iteration > 0\n # predict by each fold booster\n preds = cvb.predict(X_test, num_iteration=cvb.best_iteration)\n assert isinstance(preds, list)\n assert len(preds) == 3\n # fold averaging\n avg_pred = np.mean(preds, axis=0)\n ret = log_loss(y_test, avg_pred)\n assert ret < 0.13\n # without early stopping\n cv_res = lgb.cv(params, lgb_train,\n num_boost_round=20,\n verbose_eval=False,\n nfold=3,\n return_cvbooster=True)\n cvb = cv_res['cvbooster']\n assert cvb.best_iteration == -1\n preds = cvb.predict(X_test)\n avg_pred = np.mean(preds, axis=0)\n ret = log_loss(y_test, avg_pred)\n assert ret < 0.15\n\n\ndef test_feature_name():\n X_train, y_train = load_boston(return_X_y=True)\n params = {'verbose': -1}\n lgb_train = lgb.Dataset(X_train, y_train)\n feature_names = [f'f_{i}' for i in range(X_train.shape[-1])]\n gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)\n assert feature_names == gbm.feature_name()\n # test feature_names with whitespaces\n feature_names_with_space = [f'f {i}' for i in range(X_train.shape[-1])]\n gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)\n assert feature_names == gbm.feature_name()\n\n\ndef test_feature_name_with_non_ascii():\n X_train = np.random.normal(size=(100, 4))\n y_train = np.random.random(100)\n # This has non-ascii strings.\n feature_names = [u'F_零', u'F_一', u'F_二', u'F_三']\n params = {'verbose': -1}\n lgb_train = lgb.Dataset(X_train, y_train)\n\n gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)\n assert feature_names == gbm.feature_name()\n gbm.save_model('lgb.model')\n\n gbm2 = lgb.Booster(model_file='lgb.model')\n assert feature_names == gbm2.feature_name()\n\n\ndef test_save_load_copy_pickle():\n def train_and_predict(init_model=None, return_model=False):\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'regression',\n 'metric': 'l2',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)\n return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))\n\n gbm = train_and_predict(return_model=True)\n ret_origin = train_and_predict(init_model=gbm)\n other_ret = []\n gbm.save_model('lgb.model')\n with open('lgb.model') as f: # check all params are logged into model file correctly\n assert f.read().find(\"[num_iterations: 10]\") != -1\n other_ret.append(train_and_predict(init_model='lgb.model'))\n gbm_load = lgb.Booster(model_file='lgb.model')\n other_ret.append(train_and_predict(init_model=gbm_load))\n other_ret.append(train_and_predict(init_model=copy.copy(gbm)))\n other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm)))\n with open('lgb.pkl', 'wb') as f:\n pickle.dump(gbm, f)\n with open('lgb.pkl', 'rb') as f:\n gbm_pickle = pickle.load(f)\n other_ret.append(train_and_predict(init_model=gbm_pickle))\n gbm_pickles = pickle.loads(pickle.dumps(gbm))\n other_ret.append(train_and_predict(init_model=gbm_pickles))\n for ret in other_ret:\n assert ret_origin == pytest.approx(ret)\n\n\ndef test_pandas_categorical():\n pd = pytest.importorskip(\"pandas\")\n np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat)\n X = pd.DataFrame({\"A\": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str\n \"B\": np.random.permutation([1, 2, 3] * 100), # int\n \"C\": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float\n \"D\": np.random.permutation([True, False] * 150), # bool\n \"E\": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),\n ordered=True)}) # str and ordered categorical\n y = np.random.permutation([0, 1] * 150)\n X_test = pd.DataFrame({\"A\": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category\n \"B\": np.random.permutation([1, 3] * 30),\n \"C\": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),\n \"D\": np.random.permutation([True, False] * 30),\n \"E\": pd.Categorical(np.random.permutation(['z', 'y'] * 30),\n ordered=True)})\n np.random.seed() # reset seed\n cat_cols_actual = [\"A\", \"B\", \"C\", \"D\"]\n cat_cols_to_store = cat_cols_actual + [\"E\"]\n X[cat_cols_actual] = X[cat_cols_actual].astype('category')\n X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')\n cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X, y)\n gbm0 = lgb.train(params, lgb_train, num_boost_round=10)\n pred0 = gbm0.predict(X_test)\n assert lgb_train.categorical_feature == 'auto'\n lgb_train = lgb.Dataset(X, pd.DataFrame(y)) # also test that label can be one-column pd.DataFrame\n gbm1 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[0])\n pred1 = gbm1.predict(X_test)\n assert lgb_train.categorical_feature == [0]\n lgb_train = lgb.Dataset(X, pd.Series(y)) # also test that label can be pd.Series\n gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A'])\n pred2 = gbm2.predict(X_test)\n assert lgb_train.categorical_feature == ['A']\n lgb_train = lgb.Dataset(X, y)\n gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D'])\n pred3 = gbm3.predict(X_test)\n assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D']\n gbm3.save_model('categorical.model')\n gbm4 = lgb.Booster(model_file='categorical.model')\n pred4 = gbm4.predict(X_test)\n model_str = gbm4.model_to_string()\n gbm4.model_from_string(model_str, False)\n pred5 = gbm4.predict(X_test)\n gbm5 = lgb.Booster(model_str=model_str)\n pred6 = gbm5.predict(X_test)\n lgb_train = lgb.Dataset(X, y)\n gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D', 'E'])\n pred7 = gbm6.predict(X_test)\n assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D', 'E']\n lgb_train = lgb.Dataset(X, y)\n gbm7 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[])\n pred8 = gbm7.predict(X_test)\n assert lgb_train.categorical_feature == []\n with pytest.raises(AssertionError):\n np.testing.assert_allclose(pred0, pred1)\n with pytest.raises(AssertionError):\n np.testing.assert_allclose(pred0, pred2)\n np.testing.assert_allclose(pred1, pred2)\n np.testing.assert_allclose(pred0, pred3)\n np.testing.assert_allclose(pred0, pred4)\n np.testing.assert_allclose(pred0, pred5)\n np.testing.assert_allclose(pred0, pred6)\n with pytest.raises(AssertionError):\n np.testing.assert_allclose(pred0, pred7) # ordered cat features aren't treated as cat features by default\n with pytest.raises(AssertionError):\n np.testing.assert_allclose(pred0, pred8)\n assert gbm0.pandas_categorical == cat_values\n assert gbm1.pandas_categorical == cat_values\n assert gbm2.pandas_categorical == cat_values\n assert gbm3.pandas_categorical == cat_values\n assert gbm4.pandas_categorical == cat_values\n assert gbm5.pandas_categorical == cat_values\n assert gbm6.pandas_categorical == cat_values\n assert gbm7.pandas_categorical == cat_values\n\n\ndef test_pandas_sparse():\n pd = pytest.importorskip(\"pandas\")\n try:\n from pandas.arrays import SparseArray\n except ImportError: # support old versions\n from pandas import SparseArray\n X = pd.DataFrame({\"A\": SparseArray(np.random.permutation([0, 1, 2] * 100)),\n \"B\": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),\n \"C\": SparseArray(np.random.permutation([True, False] * 150))})\n y = pd.Series(SparseArray(np.random.permutation([0, 1] * 150)))\n X_test = pd.DataFrame({\"A\": SparseArray(np.random.permutation([0, 2] * 30)),\n \"B\": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)),\n \"C\": SparseArray(np.random.permutation([True, False] * 30))})\n if pd.__version__ >= '0.24.0':\n for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):\n assert pd.api.types.is_sparse(dtype)\n params = {\n 'objective': 'binary',\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X, y)\n gbm = lgb.train(params, lgb_train, num_boost_round=10)\n pred_sparse = gbm.predict(X_test, raw_score=True)\n if hasattr(X_test, 'sparse'):\n pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)\n else:\n pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)\n np.testing.assert_allclose(pred_sparse, pred_dense)\n\n\ndef test_reference_chain():\n X = np.random.normal(size=(100, 2))\n y = np.random.normal(size=100)\n tmp_dat = lgb.Dataset(X, y)\n # take subsets and train\n tmp_dat_train = tmp_dat.subset(np.arange(80))\n tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))\n params = {'objective': 'regression_l2', 'metric': 'rmse'}\n evals_result = {}\n lgb.train(params, tmp_dat_train, num_boost_round=20,\n valid_sets=[tmp_dat_train, tmp_dat_val],\n verbose_eval=False, evals_result=evals_result)\n assert len(evals_result['training']['rmse']) == 20\n assert len(evals_result['valid_1']['rmse']) == 20\n\n\ndef test_contribs():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1,\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n gbm = lgb.train(params, lgb_train, num_boost_round=20)\n\n assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)\n - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)) < 1e-4)\n\n\ndef test_contribs_sparse():\n n_features = 20\n n_samples = 100\n # generate CSR sparse dataset\n X, y = make_multilabel_classification(n_samples=n_samples,\n sparse=True,\n n_features=n_features,\n n_classes=1,\n n_labels=2)\n y = y.flatten()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'verbose': -1,\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n gbm = lgb.train(params, lgb_train, num_boost_round=20)\n contribs_csr = gbm.predict(X_test, pred_contrib=True)\n assert isspmatrix_csr(contribs_csr)\n # convert data to dense and get back same contribs\n contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)\n # validate the values are the same\n if platform.machine() == 'aarch64':\n np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense, rtol=1, atol=1e-12)\n else:\n np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense)\n assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)\n - np.sum(contribs_dense, axis=1)) < 1e-4)\n # validate using CSC matrix\n X_test_csc = X_test.tocsc()\n contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)\n assert isspmatrix_csc(contribs_csc)\n # validate the values are the same\n if platform.machine() == 'aarch64':\n np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense, rtol=1, atol=1e-12)\n else:\n np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense)\n\n\ndef test_contribs_sparse_multiclass():\n n_features = 20\n n_samples = 100\n n_labels = 4\n # generate CSR sparse dataset\n X, y = make_multilabel_classification(n_samples=n_samples,\n sparse=True,\n n_features=n_features,\n n_classes=1,\n n_labels=n_labels)\n y = y.flatten()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'multiclass',\n 'num_class': n_labels,\n 'verbose': -1,\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n gbm = lgb.train(params, lgb_train, num_boost_round=20)\n contribs_csr = gbm.predict(X_test, pred_contrib=True)\n assert isinstance(contribs_csr, list)\n for perclass_contribs_csr in contribs_csr:\n assert isspmatrix_csr(perclass_contribs_csr)\n # convert data to dense and get back same contribs\n contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)\n # validate the values are the same\n contribs_csr_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csr]), 0, 1)\n contribs_csr_arr_re = contribs_csr_array.reshape((contribs_csr_array.shape[0],\n contribs_csr_array.shape[1] * contribs_csr_array.shape[2]))\n if platform.machine() == 'aarch64':\n np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense, rtol=1, atol=1e-12)\n else:\n np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense)\n contribs_dense_re = contribs_dense.reshape(contribs_csr_array.shape)\n assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense_re, axis=2)) < 1e-4\n # validate using CSC matrix\n X_test_csc = X_test.tocsc()\n contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)\n assert isinstance(contribs_csc, list)\n for perclass_contribs_csc in contribs_csc:\n assert isspmatrix_csc(perclass_contribs_csc)\n # validate the values are the same\n contribs_csc_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csc]), 0, 1)\n contribs_csc_array = contribs_csc_array.reshape((contribs_csc_array.shape[0],\n contribs_csc_array.shape[1] * contribs_csc_array.shape[2]))\n if platform.machine() == 'aarch64':\n np.testing.assert_allclose(contribs_csc_array, contribs_dense, rtol=1, atol=1e-12)\n else:\n np.testing.assert_allclose(contribs_csc_array, contribs_dense)\n\n\n@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')\ndef test_int32_max_sparse_contribs():\n params = {\n 'objective': 'binary'\n }\n train_features = np.random.rand(100, 1000)\n train_targets = [0] * 50 + [1] * 50\n lgb_train = lgb.Dataset(train_features, train_targets)\n gbm = lgb.train(params, lgb_train, num_boost_round=2)\n csr_input_shape = (3000000, 1000)\n test_features = csr_matrix(csr_input_shape)\n for i in range(0, csr_input_shape[0], csr_input_shape[0] // 6):\n for j in range(0, 1000, 100):\n test_features[i, j] = random.random()\n y_pred_csr = gbm.predict(test_features, pred_contrib=True)\n # Note there is an extra column added to the output for the expected value\n csr_output_shape = (csr_input_shape[0], csr_input_shape[1] + 1)\n assert y_pred_csr.shape == csr_output_shape\n y_pred_csc = gbm.predict(test_features.tocsc(), pred_contrib=True)\n # Note output CSC shape should be same as CSR output shape\n assert y_pred_csc.shape == csr_output_shape\n\n\ndef test_sliced_data():\n def train_and_get_predictions(features, labels):\n dataset = lgb.Dataset(features, label=labels)\n lgb_params = {\n 'application': 'binary',\n 'verbose': -1,\n 'min_data': 5,\n }\n gbm = lgb.train(\n params=lgb_params,\n train_set=dataset,\n num_boost_round=10,\n )\n return gbm.predict(features)\n\n num_samples = 100\n features = np.random.rand(num_samples, 5)\n positive_samples = int(num_samples * 0.25)\n labels = np.append(np.ones(positive_samples, dtype=np.float32),\n np.zeros(num_samples - positive_samples, dtype=np.float32))\n # test sliced labels\n origin_pred = train_and_get_predictions(features, labels)\n stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))\n sliced_labels = stacked_labels[:, 0]\n sliced_pred = train_and_get_predictions(features, sliced_labels)\n np.testing.assert_allclose(origin_pred, sliced_pred)\n # append some columns\n stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))\n stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))\n stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))\n stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))\n # append some rows\n stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)\n stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)\n stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)\n stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)\n # test sliced 2d matrix\n sliced_features = stacked_features[2:102, 2:7]\n assert np.all(sliced_features == features)\n sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)\n np.testing.assert_allclose(origin_pred, sliced_pred)\n # test sliced CSR\n stacked_csr = csr_matrix(stacked_features)\n sliced_csr = stacked_csr[2:102, 2:7]\n assert np.all(sliced_csr == features)\n sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)\n np.testing.assert_allclose(origin_pred, sliced_pred)\n\n\ndef test_init_with_subset():\n data = np.random.random((50, 2))\n y = [1] * 25 + [0] * 25\n lgb_train = lgb.Dataset(data, y, free_raw_data=False)\n subset_index_1 = np.random.choice(np.arange(50), 30, replace=False)\n subset_data_1 = lgb_train.subset(subset_index_1)\n subset_index_2 = np.random.choice(np.arange(50), 20, replace=False)\n subset_data_2 = lgb_train.subset(subset_index_2)\n params = {\n 'objective': 'binary',\n 'verbose': -1\n }\n init_gbm = lgb.train(params=params,\n train_set=subset_data_1,\n num_boost_round=10,\n keep_training_booster=True)\n lgb.train(params=params,\n train_set=subset_data_2,\n num_boost_round=10,\n init_model=init_gbm)\n assert lgb_train.get_data().shape[0] == 50\n assert subset_data_1.get_data().shape[0] == 30\n assert subset_data_2.get_data().shape[0] == 20\n lgb_train.save_binary(\"lgb_train_data.bin\")\n lgb_train_from_file = lgb.Dataset('lgb_train_data.bin', free_raw_data=False)\n subset_data_3 = lgb_train_from_file.subset(subset_index_1)\n subset_data_4 = lgb_train_from_file.subset(subset_index_2)\n init_gbm_2 = lgb.train(params=params,\n train_set=subset_data_3,\n num_boost_round=10,\n keep_training_booster=True)\n with np.testing.assert_raises_regex(lgb.basic.LightGBMError, \"Unknown format of training data\"):\n lgb.train(params=params,\n train_set=subset_data_4,\n num_boost_round=10,\n init_model=init_gbm_2)\n assert lgb_train_from_file.get_data() == \"lgb_train_data.bin\"\n assert subset_data_3.get_data() == \"lgb_train_data.bin\"\n assert subset_data_4.get_data() == \"lgb_train_data.bin\"\n\n\ndef generate_trainset_for_monotone_constraints_tests(x3_to_category=True):\n number_of_dpoints = 3000\n x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints)\n x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)\n x3_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)\n x = np.column_stack(\n (x1_positively_correlated_with_y,\n x2_negatively_correlated_with_y,\n categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y))\n\n zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)\n scales = 10. * (np.random.random(6) + 0.5)\n y = (scales[0] * x1_positively_correlated_with_y\n + np.sin(scales[1] * np.pi * x1_positively_correlated_with_y)\n - scales[2] * x2_negatively_correlated_with_y\n - np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y)\n - scales[4] * x3_negatively_correlated_with_y\n - np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y)\n + zs)\n categorical_features = []\n if x3_to_category:\n categorical_features = [2]\n trainset = lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False)\n return trainset\n\n\n@pytest.mark.parametrize(\"test_with_categorical_variable\", [True, False])\ndef test_monotone_constraints(test_with_categorical_variable):\n def is_increasing(y):\n return (np.diff(y) >= 0.0).all()\n\n def is_decreasing(y):\n return (np.diff(y) <= 0.0).all()\n\n def is_non_monotone(y):\n return (np.diff(y) < 0.0).any() and (np.diff(y) > 0.0).any()\n\n def is_correctly_constrained(learner, x3_to_category=True):\n iterations = 10\n n = 1000\n variable_x = np.linspace(0, 1, n).reshape((n, 1))\n fixed_xs_values = np.linspace(0, 1, n)\n for i in range(iterations):\n fixed_x = fixed_xs_values[i] * np.ones((n, 1))\n monotonically_increasing_x = np.column_stack((variable_x, fixed_x, fixed_x))\n monotonically_increasing_y = learner.predict(monotonically_increasing_x)\n monotonically_decreasing_x = np.column_stack((fixed_x, variable_x, fixed_x))\n monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)\n non_monotone_x = np.column_stack(\n (\n fixed_x,\n fixed_x,\n categorize(variable_x) if x3_to_category else variable_x,\n )\n )\n non_monotone_y = learner.predict(non_monotone_x)\n if not (\n is_increasing(monotonically_increasing_y)\n and is_decreasing(monotonically_decreasing_y)\n and is_non_monotone(non_monotone_y)\n ):\n return False\n return True\n\n def are_interactions_enforced(gbm, feature_sets):\n def parse_tree_features(gbm):\n # trees start at position 1.\n tree_str = gbm.model_to_string().split(\"Tree\")[1:]\n feature_sets = []\n for tree in tree_str:\n # split_features are in 4th line.\n features = tree.splitlines()[3].split(\"=\")[1].split(\" \")\n features = set(f\"Column_{f}\" for f in features)\n feature_sets.append(features)\n return np.array(feature_sets)\n\n def has_interaction(treef):\n n = 0\n for fs in feature_sets:\n if len(treef.intersection(fs)) > 0:\n n += 1\n return n > 1\n\n tree_features = parse_tree_features(gbm)\n has_interaction_flag = np.array(\n [has_interaction(treef) for treef in tree_features]\n )\n\n return not has_interaction_flag.any()\n\n trainset = generate_trainset_for_monotone_constraints_tests(\n test_with_categorical_variable\n )\n for test_with_interaction_constraints in [True, False]:\n error_msg = (\"Model not correctly constrained \"\n f\"(test_with_interaction_constraints={test_with_interaction_constraints})\")\n for monotone_constraints_method in [\"basic\", \"intermediate\", \"advanced\"]:\n params = {\n \"min_data\": 20,\n \"num_leaves\": 20,\n \"monotone_constraints\": [1, -1, 0],\n \"monotone_constraints_method\": monotone_constraints_method,\n \"use_missing\": False,\n }\n if test_with_interaction_constraints:\n params[\"interaction_constraints\"] = [[0], [1], [2]]\n constrained_model = lgb.train(params, trainset)\n assert is_correctly_constrained(\n constrained_model, test_with_categorical_variable\n ), error_msg\n if test_with_interaction_constraints:\n feature_sets = [[\"Column_0\"], [\"Column_1\"], \"Column_2\"]\n assert are_interactions_enforced(constrained_model, feature_sets)\n\n\ndef test_monotone_penalty():\n def are_first_splits_non_monotone(tree, n, monotone_constraints):\n if n <= 0:\n return True\n if \"leaf_value\" in tree:\n return True\n if monotone_constraints[tree[\"split_feature\"]] != 0:\n return False\n return (are_first_splits_non_monotone(tree[\"left_child\"], n - 1, monotone_constraints)\n and are_first_splits_non_monotone(tree[\"right_child\"], n - 1, monotone_constraints))\n\n def are_there_monotone_splits(tree, monotone_constraints):\n if \"leaf_value\" in tree:\n return False\n if monotone_constraints[tree[\"split_feature\"]] != 0:\n return True\n return (are_there_monotone_splits(tree[\"left_child\"], monotone_constraints)\n or are_there_monotone_splits(tree[\"right_child\"], monotone_constraints))\n\n max_depth = 5\n monotone_constraints = [1, -1, 0]\n penalization_parameter = 2.0\n trainset = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)\n for monotone_constraints_method in [\"basic\", \"intermediate\", \"advanced\"]:\n params = {\n 'max_depth': max_depth,\n 'monotone_constraints': monotone_constraints,\n 'monotone_penalty': penalization_parameter,\n \"monotone_constraints_method\": monotone_constraints_method,\n }\n constrained_model = lgb.train(params, trainset, 10)\n dumped_model = constrained_model.dump_model()[\"tree_info\"]\n for tree in dumped_model:\n assert are_first_splits_non_monotone(tree[\"tree_structure\"], int(penalization_parameter),\n monotone_constraints)\n assert are_there_monotone_splits(tree[\"tree_structure\"], monotone_constraints)\n\n\n# test if a penalty as high as the depth indeed prohibits all monotone splits\ndef test_monotone_penalty_max():\n max_depth = 5\n monotone_constraints = [1, -1, 0]\n penalization_parameter = max_depth\n trainset_constrained_model = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)\n x = trainset_constrained_model.data\n y = trainset_constrained_model.label\n x3_negatively_correlated_with_y = x[:, 2]\n trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y)\n params_constrained_model = {\n 'monotone_constraints': monotone_constraints,\n 'monotone_penalty': penalization_parameter,\n \"max_depth\": max_depth,\n \"gpu_use_dp\": True,\n }\n params_unconstrained_model = {\n \"max_depth\": max_depth,\n \"gpu_use_dp\": True,\n }\n\n unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10)\n unconstrained_model_predictions = unconstrained_model.predict(\n x3_negatively_correlated_with_y.reshape(-1, 1)\n )\n\n for monotone_constraints_method in [\"basic\", \"intermediate\", \"advanced\"]:\n params_constrained_model[\"monotone_constraints_method\"] = monotone_constraints_method\n # The penalization is so high that the first 2 features should not be used here\n constrained_model = lgb.train(params_constrained_model, trainset_constrained_model, 10)\n\n # Check that a very high penalization is the same as not using the features at all\n np.testing.assert_array_equal(constrained_model.predict(x), unconstrained_model_predictions)\n\n\ndef test_max_bin_by_feature():\n col1 = np.arange(0, 100)[:, np.newaxis]\n col2 = np.zeros((100, 1))\n col2[20:] = 1\n X = np.concatenate([col1, col2], axis=1)\n y = np.arange(0, 100)\n params = {\n 'objective': 'regression_l2',\n 'verbose': -1,\n 'num_leaves': 100,\n 'min_data_in_leaf': 1,\n 'min_sum_hessian_in_leaf': 0,\n 'min_data_in_bin': 1,\n 'max_bin_by_feature': [100, 2]\n }\n lgb_data = lgb.Dataset(X, label=y)\n est = lgb.train(params, lgb_data, num_boost_round=1)\n assert len(np.unique(est.predict(X))) == 100\n params['max_bin_by_feature'] = [2, 100]\n lgb_data = lgb.Dataset(X, label=y)\n est = lgb.train(params, lgb_data, num_boost_round=1)\n assert len(np.unique(est.predict(X))) == 3\n\n\ndef test_small_max_bin():\n np.random.seed(0)\n y = np.random.choice([0, 1], 100)\n x = np.ones((100, 1))\n x[:30, 0] = -1\n x[60:, 0] = 2\n params = {'objective': 'binary',\n 'seed': 0,\n 'min_data_in_leaf': 1,\n 'verbose': -1,\n 'max_bin': 2}\n lgb_x = lgb.Dataset(x, label=y)\n lgb.train(params, lgb_x, num_boost_round=5)\n x[0, 0] = np.nan\n params['max_bin'] = 3\n lgb_x = lgb.Dataset(x, label=y)\n lgb.train(params, lgb_x, num_boost_round=5)\n np.random.seed() # reset seed\n\n\ndef test_refit():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'verbose': -1,\n 'min_data': 10\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n gbm = lgb.train(params, lgb_train, num_boost_round=20)\n err_pred = log_loss(y_test, gbm.predict(X_test))\n new_gbm = gbm.refit(X_test, y_test)\n new_err_pred = log_loss(y_test, new_gbm.predict(X_test))\n assert err_pred > new_err_pred\n\n\ndef test_mape_rf():\n X, y = load_boston(return_X_y=True)\n params = {\n 'boosting_type': 'rf',\n 'objective': 'mape',\n 'verbose': -1,\n 'bagging_freq': 1,\n 'bagging_fraction': 0.8,\n 'feature_fraction': 0.8,\n 'boost_from_average': True\n }\n lgb_train = lgb.Dataset(X, y)\n gbm = lgb.train(params, lgb_train, num_boost_round=20)\n pred = gbm.predict(X)\n pred_mean = pred.mean()\n assert pred_mean > 20\n\n\ndef test_mape_dart():\n X, y = load_boston(return_X_y=True)\n params = {\n 'boosting_type': 'dart',\n 'objective': 'mape',\n 'verbose': -1,\n 'bagging_freq': 1,\n 'bagging_fraction': 0.8,\n 'feature_fraction': 0.8,\n 'boost_from_average': False\n }\n lgb_train = lgb.Dataset(X, y)\n gbm = lgb.train(params, lgb_train, num_boost_round=40)\n pred = gbm.predict(X)\n pred_mean = pred.mean()\n assert pred_mean > 18\n\n\ndef check_constant_features(y_true, expected_pred, more_params):\n X_train = np.ones((len(y_true), 1))\n y_train = np.array(y_true)\n params = {\n 'objective': 'regression',\n 'num_class': 1,\n 'verbose': -1,\n 'min_data': 1,\n 'num_leaves': 2,\n 'learning_rate': 1,\n 'min_data_in_bin': 1,\n 'boost_from_average': True\n }\n params.update(more_params)\n lgb_train = lgb.Dataset(X_train, y_train, params=params)\n gbm = lgb.train(params, lgb_train, num_boost_round=2)\n pred = gbm.predict(X_train)\n assert np.allclose(pred, expected_pred)\n\n\ndef test_constant_features_regression():\n params = {\n 'objective': 'regression'\n }\n check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params)\n check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params)\n check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params)\n\n\ndef test_constant_features_binary():\n params = {\n 'objective': 'binary'\n }\n check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params)\n check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params)\n\n\ndef test_constant_features_multiclass():\n params = {\n 'objective': 'multiclass',\n 'num_class': 3\n }\n check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)\n check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)\n\n\ndef test_constant_features_multiclassova():\n params = {\n 'objective': 'multiclassova',\n 'num_class': 3\n }\n check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)\n check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)\n\n\ndef test_fpreproc():\n def preprocess_data(dtrain, dtest, params):\n train_data = dtrain.construct().get_data()\n test_data = dtest.construct().get_data()\n train_data[:, 0] += 1\n test_data[:, 0] += 1\n dtrain.label[-5:] = 3\n dtest.label[-5:] = 3\n dtrain = lgb.Dataset(train_data, dtrain.label)\n dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain)\n params['num_class'] = 4\n return dtrain, dtest, params\n\n X, y = load_iris(return_X_y=True)\n dataset = lgb.Dataset(X, y, free_raw_data=False)\n params = {'objective': 'multiclass', 'num_class': 3, 'verbose': -1}\n results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data)\n assert 'multi_logloss-mean' in results\n assert len(results['multi_logloss-mean']) == 10\n\n\ndef test_metrics():\n X, y = load_digits(n_class=2, return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n lgb_train = lgb.Dataset(X_train, y_train, silent=True)\n lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)\n\n evals_result = {}\n params_verbose = {'verbose': -1}\n params_obj_verbose = {'objective': 'binary', 'verbose': -1}\n params_obj_metric_log_verbose = {'objective': 'binary', 'metric': 'binary_logloss', 'verbose': -1}\n params_obj_metric_err_verbose = {'objective': 'binary', 'metric': 'binary_error', 'verbose': -1}\n params_obj_metric_inv_verbose = {'objective': 'binary', 'metric': 'invalid_metric', 'verbose': -1}\n params_obj_metric_multi_verbose = {'objective': 'binary',\n 'metric': ['binary_logloss', 'binary_error'],\n 'verbose': -1}\n params_obj_metric_none_verbose = {'objective': 'binary', 'metric': 'None', 'verbose': -1}\n params_metric_log_verbose = {'metric': 'binary_logloss', 'verbose': -1}\n params_metric_err_verbose = {'metric': 'binary_error', 'verbose': -1}\n params_metric_inv_verbose = {'metric_types': 'invalid_metric', 'verbose': -1}\n params_metric_multi_verbose = {'metric': ['binary_logloss', 'binary_error'], 'verbose': -1}\n params_metric_none_verbose = {'metric': 'None', 'verbose': -1}\n\n def get_cv_result(params=params_obj_verbose, **kwargs):\n return lgb.cv(params, lgb_train, num_boost_round=2, verbose_eval=False, **kwargs)\n\n def train_booster(params=params_obj_verbose, **kwargs):\n lgb.train(params, lgb_train,\n num_boost_round=2,\n valid_sets=[lgb_valid],\n evals_result=evals_result,\n verbose_eval=False, **kwargs)\n\n # no fobj, no feval\n # default metric\n res = get_cv_result()\n assert len(res) == 2\n assert 'binary_logloss-mean' in res\n\n # non-default metric in params\n res = get_cv_result(params=params_obj_metric_err_verbose)\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # default metric in args\n res = get_cv_result(metrics='binary_logloss')\n assert len(res) == 2\n assert 'binary_logloss-mean' in res\n\n # non-default metric in args\n res = get_cv_result(metrics='binary_error')\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # metric in args overwrites one in params\n res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error')\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # multiple metrics in params\n res = get_cv_result(params=params_obj_metric_multi_verbose)\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n\n # multiple metrics in args\n res = get_cv_result(metrics=['binary_logloss', 'binary_error'])\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n\n # remove default metric by 'None' in list\n res = get_cv_result(metrics=['None'])\n assert len(res) == 0\n\n # remove default metric by 'None' aliases\n for na_alias in ('None', 'na', 'null', 'custom'):\n res = get_cv_result(metrics=na_alias)\n assert len(res) == 0\n\n # fobj, no feval\n # no default metric\n res = get_cv_result(params=params_verbose, fobj=dummy_obj)\n assert len(res) == 0\n\n # metric in params\n res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj)\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # metric in args\n res = get_cv_result(params=params_verbose, fobj=dummy_obj, metrics='binary_error')\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # metric in args overwrites its' alias in params\n res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj, metrics='binary_error')\n assert len(res) == 2\n assert 'binary_error-mean' in res\n\n # multiple metrics in params\n res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj)\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n\n # multiple metrics in args\n res = get_cv_result(params=params_verbose, fobj=dummy_obj,\n metrics=['binary_logloss', 'binary_error'])\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n\n # no fobj, feval\n # default metric with custom one\n res = get_cv_result(feval=constant_metric)\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'error-mean' in res\n\n # non-default metric in params with custom one\n res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric)\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # default metric in args with custom one\n res = get_cv_result(metrics='binary_logloss', feval=constant_metric)\n assert len(res) == 4\n assert 'binary_logloss-mean' in res\n assert 'error-mean' in res\n\n # non-default metric in args with custom one\n res = get_cv_result(metrics='binary_error', feval=constant_metric)\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # metric in args overwrites one in params, custom one is evaluated too\n res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error', feval=constant_metric)\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # multiple metrics in params with custom one\n res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric)\n assert len(res) == 6\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # multiple metrics in args with custom one\n res = get_cv_result(metrics=['binary_logloss', 'binary_error'], feval=constant_metric)\n assert len(res) == 6\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # custom metric is evaluated despite 'None' is passed\n res = get_cv_result(metrics=['None'], feval=constant_metric)\n assert len(res) == 2\n assert 'error-mean' in res\n\n # fobj, feval\n # no default metric, only custom one\n res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 2\n assert 'error-mean' in res\n\n # metric in params with custom one\n res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # metric in args with custom one\n res = get_cv_result(params=params_verbose, fobj=dummy_obj,\n feval=constant_metric, metrics='binary_error')\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # metric in args overwrites one in params, custom one is evaluated too\n res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj,\n feval=constant_metric, metrics='binary_error')\n assert len(res) == 4\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # multiple metrics in params with custom one\n res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 6\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # multiple metrics in args with custom one\n res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric,\n metrics=['binary_logloss', 'binary_error'])\n assert len(res) == 6\n assert 'binary_logloss-mean' in res\n assert 'binary_error-mean' in res\n assert 'error-mean' in res\n\n # custom metric is evaluated despite 'None' is passed\n res = get_cv_result(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 2\n assert 'error-mean' in res\n\n # no fobj, no feval\n # default metric\n train_booster()\n assert len(evals_result['valid_0']) == 1\n assert 'binary_logloss' in evals_result['valid_0']\n\n # default metric in params\n train_booster(params=params_obj_metric_log_verbose)\n assert len(evals_result['valid_0']) == 1\n assert 'binary_logloss' in evals_result['valid_0']\n\n # non-default metric in params\n train_booster(params=params_obj_metric_err_verbose)\n assert len(evals_result['valid_0']) == 1\n assert 'binary_error' in evals_result['valid_0']\n\n # multiple metrics in params\n train_booster(params=params_obj_metric_multi_verbose)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'binary_error' in evals_result['valid_0']\n\n # remove default metric by 'None' aliases\n for na_alias in ('None', 'na', 'null', 'custom'):\n params = {'objective': 'binary', 'metric': na_alias, 'verbose': -1}\n train_booster(params=params)\n assert len(evals_result) == 0\n\n # fobj, no feval\n # no default metric\n train_booster(params=params_verbose, fobj=dummy_obj)\n assert len(evals_result) == 0\n\n # metric in params\n train_booster(params=params_metric_log_verbose, fobj=dummy_obj)\n assert len(evals_result['valid_0']) == 1\n assert 'binary_logloss' in evals_result['valid_0']\n\n # multiple metrics in params\n train_booster(params=params_metric_multi_verbose, fobj=dummy_obj)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'binary_error' in evals_result['valid_0']\n\n # no fobj, feval\n # default metric with custom one\n train_booster(feval=constant_metric)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # default metric in params with custom one\n train_booster(params=params_obj_metric_log_verbose, feval=constant_metric)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # non-default metric in params with custom one\n train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_error' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # multiple metrics in params with custom one\n train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric)\n assert len(evals_result['valid_0']) == 3\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'binary_error' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # custom metric is evaluated despite 'None' is passed\n train_booster(params=params_obj_metric_none_verbose, feval=constant_metric)\n assert len(evals_result) == 1\n assert 'error' in evals_result['valid_0']\n\n # fobj, feval\n # no default metric, only custom one\n train_booster(params=params_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(evals_result['valid_0']) == 1\n assert 'error' in evals_result['valid_0']\n\n # metric in params with custom one\n train_booster(params=params_metric_log_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(evals_result['valid_0']) == 2\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # multiple metrics in params with custom one\n train_booster(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(evals_result['valid_0']) == 3\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'binary_error' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n\n # custom metric is evaluated despite 'None' is passed\n train_booster(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(evals_result) == 1\n assert 'error' in evals_result['valid_0']\n\n X, y = load_digits(n_class=3, return_X_y=True)\n lgb_train = lgb.Dataset(X, y, silent=True)\n\n obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']\n for obj_multi_alias in obj_multi_aliases:\n params_obj_class_3_verbose = {'objective': obj_multi_alias, 'num_class': 3, 'verbose': -1}\n params_obj_class_1_verbose = {'objective': obj_multi_alias, 'num_class': 1, 'verbose': -1}\n params_obj_verbose = {'objective': obj_multi_alias, 'verbose': -1}\n # multiclass default metric\n res = get_cv_result(params_obj_class_3_verbose)\n assert len(res) == 2\n assert 'multi_logloss-mean' in res\n # multiclass default metric with custom one\n res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric)\n assert len(res) == 4\n assert 'multi_logloss-mean' in res\n assert 'error-mean' in res\n # multiclass metric alias with custom one for custom objective\n res = get_cv_result(params_obj_class_3_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 2\n assert 'error-mean' in res\n # no metric for invalid class_num\n res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj)\n assert len(res) == 0\n # custom metric for invalid class_num\n res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj, feval=constant_metric)\n assert len(res) == 2\n assert 'error-mean' in res\n # multiclass metric alias with custom one with invalid class_num\n with pytest.raises(lgb.basic.LightGBMError):\n get_cv_result(params_obj_class_1_verbose, metrics=obj_multi_alias,\n fobj=dummy_obj, feval=constant_metric)\n # multiclass default metric without num_class\n with pytest.raises(lgb.basic.LightGBMError):\n get_cv_result(params_obj_verbose)\n for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:\n # multiclass metric alias\n res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias)\n assert len(res) == 2\n assert 'multi_logloss-mean' in res\n # multiclass metric\n res = get_cv_result(params_obj_class_3_verbose, metrics='multi_error')\n assert len(res) == 2\n assert 'multi_error-mean' in res\n # non-valid metric for multiclass objective\n with pytest.raises(lgb.basic.LightGBMError):\n get_cv_result(params_obj_class_3_verbose, metrics='binary_logloss')\n params_class_3_verbose = {'num_class': 3, 'verbose': -1}\n # non-default num_class for default objective\n with pytest.raises(lgb.basic.LightGBMError):\n get_cv_result(params_class_3_verbose)\n # no metric with non-default num_class for custom objective\n res = get_cv_result(params_class_3_verbose, fobj=dummy_obj)\n assert len(res) == 0\n for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:\n # multiclass metric alias for custom objective\n res = get_cv_result(params_class_3_verbose, metrics=metric_multi_alias, fobj=dummy_obj)\n assert len(res) == 2\n assert 'multi_logloss-mean' in res\n # multiclass metric for custom objective\n res = get_cv_result(params_class_3_verbose, metrics='multi_error', fobj=dummy_obj)\n assert len(res) == 2\n assert 'multi_error-mean' in res\n # binary metric with non-default num_class for custom objective\n with pytest.raises(lgb.basic.LightGBMError):\n get_cv_result(params_class_3_verbose, metrics='binary_error', fobj=dummy_obj)\n\n\ndef test_multiple_feval_train():\n X, y = load_breast_cancer(return_X_y=True)\n\n params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}\n\n X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)\n\n train_dataset = lgb.Dataset(data=X_train, label=y_train, silent=True)\n validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset, silent=True)\n evals_result = {}\n lgb.train(\n params=params,\n train_set=train_dataset,\n valid_sets=validation_dataset,\n num_boost_round=5,\n feval=[constant_metric, decreasing_metric],\n evals_result=evals_result)\n\n assert len(evals_result['valid_0']) == 3\n assert 'binary_logloss' in evals_result['valid_0']\n assert 'error' in evals_result['valid_0']\n assert 'decreasing_metric' in evals_result['valid_0']\n\n\ndef test_multiple_feval_cv():\n X, y = load_breast_cancer(return_X_y=True)\n\n params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}\n\n train_dataset = lgb.Dataset(data=X, label=y, silent=True)\n\n cv_results = lgb.cv(\n params=params,\n train_set=train_dataset,\n num_boost_round=5,\n feval=[constant_metric, decreasing_metric])\n\n # Expect three metrics but mean and stdv for each metric\n assert len(cv_results) == 6\n assert 'binary_logloss-mean' in cv_results\n assert 'error-mean' in cv_results\n assert 'decreasing_metric-mean' in cv_results\n assert 'binary_logloss-stdv' in cv_results\n assert 'error-stdv' in cv_results\n assert 'decreasing_metric-stdv' in cv_results\n\n\n@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')\ndef test_model_size():\n X, y = load_boston(return_X_y=True)\n data = lgb.Dataset(X, y)\n bst = lgb.train({'verbose': -1}, data, num_boost_round=2)\n y_pred = bst.predict(X)\n model_str = bst.model_to_string()\n one_tree = model_str[model_str.find('Tree=1'):model_str.find('end of trees')]\n one_tree_size = len(one_tree)\n one_tree = one_tree.replace('Tree=1', 'Tree={}')\n multiplier = 100\n total_trees = multiplier + 2\n try:\n before_tree_sizes = model_str[:model_str.find('tree_sizes')]\n trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')]\n more_trees = (one_tree * multiplier).format(*range(2, total_trees))\n after_trees = model_str[model_str.find('end of trees'):]\n num_end_spaces = 2**31 - one_tree_size * total_trees\n new_model_str = f\"{before_tree_sizes}\\n\\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}\"\n assert len(new_model_str) > 2**31\n bst.model_from_string(new_model_str, verbose=False)\n assert bst.num_trees() == total_trees\n y_pred_new = bst.predict(X, num_iteration=2)\n np.testing.assert_allclose(y_pred, y_pred_new)\n except MemoryError:\n pytest.skipTest('not enough RAM')\n\n\ndef test_get_split_value_histogram():\n X, y = load_boston(return_X_y=True)\n lgb_train = lgb.Dataset(X, y, categorical_feature=[2])\n gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20)\n # test XGBoost-style return value\n params = {'feature': 0, 'xgboost_style': True}\n assert gbm.get_split_value_histogram(**params).shape == (9, 2)\n assert gbm.get_split_value_histogram(bins=999, **params).shape == (9, 2)\n assert gbm.get_split_value_histogram(bins=-1, **params).shape == (1, 2)\n assert gbm.get_split_value_histogram(bins=0, **params).shape == (1, 2)\n assert gbm.get_split_value_histogram(bins=1, **params).shape == (1, 2)\n assert gbm.get_split_value_histogram(bins=2, **params).shape == (2, 2)\n assert gbm.get_split_value_histogram(bins=6, **params).shape == (5, 2)\n assert gbm.get_split_value_histogram(bins=7, **params).shape == (6, 2)\n if lgb.compat.PANDAS_INSTALLED:\n np.testing.assert_allclose(\n gbm.get_split_value_histogram(0, xgboost_style=True).values,\n gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values\n )\n np.testing.assert_allclose(\n gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values,\n gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values\n )\n else:\n np.testing.assert_allclose(\n gbm.get_split_value_histogram(0, xgboost_style=True),\n gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True)\n )\n np.testing.assert_allclose(\n gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True),\n gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True)\n )\n # test numpy-style return value\n hist, bins = gbm.get_split_value_histogram(0)\n assert len(hist) == 23\n assert len(bins) == 24\n hist, bins = gbm.get_split_value_histogram(0, bins=999)\n assert len(hist) == 999\n assert len(bins) == 1000\n with pytest.raises(ValueError):\n gbm.get_split_value_histogram(0, bins=-1)\n with pytest.raises(ValueError):\n gbm.get_split_value_histogram(0, bins=0)\n hist, bins = gbm.get_split_value_histogram(0, bins=1)\n assert len(hist) == 1\n assert len(bins) == 2\n hist, bins = gbm.get_split_value_histogram(0, bins=2)\n assert len(hist) == 2\n assert len(bins) == 3\n hist, bins = gbm.get_split_value_histogram(0, bins=6)\n assert len(hist) == 6\n assert len(bins) == 7\n hist, bins = gbm.get_split_value_histogram(0, bins=7)\n assert len(hist) == 7\n assert len(bins) == 8\n hist_idx, bins_idx = gbm.get_split_value_histogram(0)\n hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[0])\n np.testing.assert_array_equal(hist_idx, hist_name)\n np.testing.assert_allclose(bins_idx, bins_name)\n hist_idx, bins_idx = gbm.get_split_value_histogram(X.shape[-1] - 1)\n hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1])\n np.testing.assert_array_equal(hist_idx, hist_name)\n np.testing.assert_allclose(bins_idx, bins_name)\n # test bins string type\n if np.__version__ > '1.11.0':\n hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins='auto')\n hist = gbm.get_split_value_histogram(0, bins='auto', xgboost_style=True)\n if lgb.compat.PANDAS_INSTALLED:\n mask = hist_vals > 0\n np.testing.assert_array_equal(hist_vals[mask], hist['Count'].values)\n np.testing.assert_allclose(bin_edges[1:][mask], hist['SplitValue'].values)\n else:\n mask = hist_vals > 0\n np.testing.assert_array_equal(hist_vals[mask], hist[:, 1])\n np.testing.assert_allclose(bin_edges[1:][mask], hist[:, 0])\n # test histogram is disabled for categorical features\n with pytest.raises(lgb.basic.LightGBMError):\n gbm.get_split_value_histogram(2)\n\n\ndef test_early_stopping_for_only_first_metric():\n\n def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration,\n first_metric_only, feval=None):\n params = {\n 'objective': 'regression',\n 'learning_rate': 1.1,\n 'num_leaves': 10,\n 'metric': metric_list,\n 'verbose': -1,\n 'seed': 123\n }\n gbm = lgb.train(dict(params, first_metric_only=first_metric_only), lgb_train,\n num_boost_round=25, valid_sets=valid_sets, feval=feval,\n early_stopping_rounds=5, verbose_eval=False)\n assert assumed_iteration == gbm.best_iteration\n\n def metrics_combination_cv_regression(metric_list, assumed_iteration,\n first_metric_only, eval_train_metric, feval=None):\n params = {\n 'objective': 'regression',\n 'learning_rate': 0.9,\n 'num_leaves': 10,\n 'metric': metric_list,\n 'verbose': -1,\n 'seed': 123,\n 'gpu_use_dp': True\n }\n ret = lgb.cv(dict(params, first_metric_only=first_metric_only),\n train_set=lgb_train, num_boost_round=25,\n stratified=False, feval=feval,\n early_stopping_rounds=5, verbose_eval=False,\n eval_train_metric=eval_train_metric)\n assert assumed_iteration == len(ret[list(ret.keys())[0]])\n\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=73)\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_valid1 = lgb.Dataset(X_test1, y_test1, reference=lgb_train)\n lgb_valid2 = lgb.Dataset(X_test2, y_test2, reference=lgb_train)\n\n iter_valid1_l1 = 3\n iter_valid1_l2 = 14\n iter_valid2_l1 = 2\n iter_valid2_l2 = 15\n assert len(set([iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2])) == 4\n iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1])\n iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2])\n iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2])\n\n iter_cv_l1 = 4\n iter_cv_l2 = 12\n assert len(set([iter_cv_l1, iter_cv_l2])) == 2\n iter_cv_min = min([iter_cv_l1, iter_cv_l2])\n\n # test for lgb.train\n metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, False)\n metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True)\n metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False)\n metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True)\n metrics_combination_train_regression(lgb_valid1, 'l2', iter_valid1_l2, True)\n metrics_combination_train_regression(lgb_valid1, 'l1', iter_valid1_l1, True)\n metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_valid1_l2, True)\n metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_valid1_l1, True)\n metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_min_valid1, False)\n metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_min_valid1, False)\n\n # test feval for lgb.train\n metrics_combination_train_regression(lgb_valid1, 'None', 1, False,\n feval=lambda preds, train_data: [decreasing_metric(preds, train_data),\n constant_metric(preds, train_data)])\n metrics_combination_train_regression(lgb_valid1, 'None', 25, True,\n feval=lambda preds, train_data: [decreasing_metric(preds, train_data),\n constant_metric(preds, train_data)])\n metrics_combination_train_regression(lgb_valid1, 'None', 1, True,\n feval=lambda preds, train_data: [constant_metric(preds, train_data),\n decreasing_metric(preds, train_data)])\n\n # test with two valid data for lgb.train\n metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l2', 'l1'], iter_min_l2, True)\n metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l2', 'l1'], iter_min_l2, True)\n metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l1', 'l2'], iter_min_l1, True)\n metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l1', 'l2'], iter_min_l1, True)\n\n # test for lgb.cv\n metrics_combination_cv_regression(None, iter_cv_l2, True, False)\n metrics_combination_cv_regression('l2', iter_cv_l2, True, False)\n metrics_combination_cv_regression('l1', iter_cv_l1, True, False)\n metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, False)\n metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, False)\n metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, False)\n metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, False)\n metrics_combination_cv_regression(None, iter_cv_l2, True, True)\n metrics_combination_cv_regression('l2', iter_cv_l2, True, True)\n metrics_combination_cv_regression('l1', iter_cv_l1, True, True)\n metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, True)\n metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, True)\n metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, True)\n metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, True)\n\n # test feval for lgb.cv\n metrics_combination_cv_regression('None', 1, False, False,\n feval=lambda preds, train_data: [decreasing_metric(preds, train_data),\n constant_metric(preds, train_data)])\n metrics_combination_cv_regression('None', 25, True, False,\n feval=lambda preds, train_data: [decreasing_metric(preds, train_data),\n constant_metric(preds, train_data)])\n metrics_combination_cv_regression('None', 1, True, False,\n feval=lambda preds, train_data: [constant_metric(preds, train_data),\n decreasing_metric(preds, train_data)])\n\n\ndef test_node_level_subcol():\n X, y = load_breast_cancer(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n params = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'feature_fraction_bynode': 0.8,\n 'feature_fraction': 1.0,\n 'verbose': -1\n }\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n evals_result = {}\n gbm = lgb.train(params, lgb_train,\n num_boost_round=25,\n valid_sets=lgb_eval,\n verbose_eval=False,\n evals_result=evals_result)\n ret = log_loss(y_test, gbm.predict(X_test))\n assert ret < 0.14\n assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)\n params['feature_fraction'] = 0.5\n gbm2 = lgb.train(params, lgb_train, num_boost_round=25)\n ret2 = log_loss(y_test, gbm2.predict(X_test))\n assert ret != ret2\n\n\ndef test_forced_bins():\n x = np.empty((100, 2))\n x[:, 0] = np.arange(0, 1, 0.01)\n x[:, 1] = -np.arange(0, 1, 0.01)\n y = np.arange(0, 1, 0.01)\n forcedbins_filename = (\n Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins.json'\n )\n params = {'objective': 'regression_l1',\n 'max_bin': 5,\n 'forcedbins_filename': forcedbins_filename,\n 'num_leaves': 2,\n 'min_data_in_leaf': 1,\n 'verbose': -1}\n lgb_x = lgb.Dataset(x, label=y)\n est = lgb.train(params, lgb_x, num_boost_round=20)\n new_x = np.zeros((3, x.shape[1]))\n new_x[:, 0] = [0.31, 0.37, 0.41]\n predicted = est.predict(new_x)\n assert len(np.unique(predicted)) == 3\n new_x[:, 0] = [0, 0, 0]\n new_x[:, 1] = [-0.9, -0.6, -0.3]\n predicted = est.predict(new_x)\n assert len(np.unique(predicted)) == 1\n params['forcedbins_filename'] = ''\n lgb_x = lgb.Dataset(x, label=y)\n est = lgb.train(params, lgb_x, num_boost_round=20)\n predicted = est.predict(new_x)\n assert len(np.unique(predicted)) == 3\n params['forcedbins_filename'] = (\n Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins2.json'\n )\n params['max_bin'] = 11\n lgb_x = lgb.Dataset(x[:, :1], label=y)\n est = lgb.train(params, lgb_x, num_boost_round=50)\n predicted = est.predict(x[1:, :1])\n _, counts = np.unique(predicted, return_counts=True)\n assert min(counts) >= 9\n assert max(counts) <= 11\n\n\ndef test_binning_same_sign():\n # test that binning works properly for features with only positive or only negative values\n x = np.empty((99, 2))\n x[:, 0] = np.arange(0.01, 1, 0.01)\n x[:, 1] = -np.arange(0.01, 1, 0.01)\n y = np.arange(0.01, 1, 0.01)\n params = {'objective': 'regression_l1',\n 'max_bin': 5,\n 'num_leaves': 2,\n 'min_data_in_leaf': 1,\n 'verbose': -1,\n 'seed': 0}\n lgb_x = lgb.Dataset(x, label=y)\n est = lgb.train(params, lgb_x, num_boost_round=20)\n new_x = np.zeros((3, 2))\n new_x[:, 0] = [-1, 0, 1]\n predicted = est.predict(new_x)\n assert predicted[0] == pytest.approx(predicted[1])\n assert predicted[1] != pytest.approx(predicted[2])\n new_x = np.zeros((3, 2))\n new_x[:, 1] = [-1, 0, 1]\n predicted = est.predict(new_x)\n assert predicted[0] != pytest.approx(predicted[1])\n assert predicted[1] == pytest.approx(predicted[2])\n\n\ndef test_dataset_update_params():\n default_params = {\"max_bin\": 100,\n \"max_bin_by_feature\": [20, 10],\n \"bin_construct_sample_cnt\": 10000,\n \"min_data_in_bin\": 1,\n \"use_missing\": False,\n \"zero_as_missing\": False,\n \"categorical_feature\": [0],\n \"feature_pre_filter\": True,\n \"pre_partition\": False,\n \"enable_bundle\": True,\n \"data_random_seed\": 0,\n \"is_enable_sparse\": True,\n \"header\": True,\n \"two_round\": True,\n \"label_column\": 0,\n \"weight_column\": 0,\n \"group_column\": 0,\n \"ignore_column\": 0,\n \"min_data_in_leaf\": 10,\n \"linear_tree\": False,\n \"precise_float_parser\": True,\n \"verbose\": -1}\n unchangeable_params = {\"max_bin\": 150,\n \"max_bin_by_feature\": [30, 5],\n \"bin_construct_sample_cnt\": 5000,\n \"min_data_in_bin\": 2,\n \"use_missing\": True,\n \"zero_as_missing\": True,\n \"categorical_feature\": [0, 1],\n \"feature_pre_filter\": False,\n \"pre_partition\": True,\n \"enable_bundle\": False,\n \"data_random_seed\": 1,\n \"is_enable_sparse\": False,\n \"header\": False,\n \"two_round\": False,\n \"label_column\": 1,\n \"weight_column\": 1,\n \"group_column\": 1,\n \"ignore_column\": 1,\n \"forcedbins_filename\": \"/some/path/forcedbins.json\",\n \"min_data_in_leaf\": 2,\n \"linear_tree\": True,\n \"precise_float_parser\": False}\n X = np.random.random((100, 2))\n y = np.random.random(100)\n\n # decreasing without freeing raw data is allowed\n lgb_data = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()\n default_params[\"min_data_in_leaf\"] -= 1\n lgb.train(default_params, lgb_data, num_boost_round=3)\n\n # decreasing before lazy init is allowed\n lgb_data = lgb.Dataset(X, y, params=default_params)\n default_params[\"min_data_in_leaf\"] -= 1\n lgb.train(default_params, lgb_data, num_boost_round=3)\n\n # increasing is allowed\n default_params[\"min_data_in_leaf\"] += 2\n lgb.train(default_params, lgb_data, num_boost_round=3)\n\n # decreasing with disabled filter is allowed\n default_params[\"feature_pre_filter\"] = False\n lgb_data = lgb.Dataset(X, y, params=default_params).construct()\n default_params[\"min_data_in_leaf\"] -= 4\n lgb.train(default_params, lgb_data, num_boost_round=3)\n\n # decreasing with enabled filter is disallowed;\n # also changes of other params are disallowed\n default_params[\"feature_pre_filter\"] = True\n lgb_data = lgb.Dataset(X, y, params=default_params).construct()\n for key, value in unchangeable_params.items():\n new_params = default_params.copy()\n new_params[key] = value\n if key != \"forcedbins_filename\":\n param_name = key\n else:\n param_name = \"forced bins\"\n err_msg = (\"Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *\"\n if key == \"min_data_in_leaf\"\n else f\"Cannot change {param_name} *\")\n with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg):\n lgb.train(new_params, lgb_data, num_boost_round=3)\n\n\ndef test_dataset_params_with_reference():\n default_params = {\"max_bin\": 100}\n X = np.random.random((100, 2))\n y = np.random.random(100)\n X_val = np.random.random((100, 2))\n y_val = np.random.random(100)\n lgb_train = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()\n lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train, free_raw_data=False).construct()\n assert lgb_train.get_params() == default_params\n assert lgb_val.get_params() == default_params\n lgb.train(default_params, lgb_train, valid_sets=[lgb_val])\n\n\ndef test_extra_trees():\n # check extra trees increases regularization\n X, y = load_boston(return_X_y=True)\n lgb_x = lgb.Dataset(X, label=y)\n params = {'objective': 'regression',\n 'num_leaves': 32,\n 'verbose': -1,\n 'extra_trees': False,\n 'seed': 0}\n est = lgb.train(params, lgb_x, num_boost_round=10)\n predicted = est.predict(X)\n err = mean_squared_error(y, predicted)\n params['extra_trees'] = True\n est = lgb.train(params, lgb_x, num_boost_round=10)\n predicted_new = est.predict(X)\n err_new = mean_squared_error(y, predicted_new)\n assert err < err_new\n\n\ndef test_path_smoothing():\n # check path smoothing increases regularization\n X, y = load_boston(return_X_y=True)\n lgb_x = lgb.Dataset(X, label=y)\n params = {'objective': 'regression',\n 'num_leaves': 32,\n 'verbose': -1,\n 'seed': 0}\n est = lgb.train(params, lgb_x, num_boost_round=10)\n predicted = est.predict(X)\n err = mean_squared_error(y, predicted)\n params['path_smooth'] = 1\n est = lgb.train(params, lgb_x, num_boost_round=10)\n predicted_new = est.predict(X)\n err_new = mean_squared_error(y, predicted_new)\n assert err < err_new\n\n\ndef test_trees_to_dataframe():\n pytest.importorskip(\"pandas\")\n\n def _imptcs_to_numpy(X, impcts_dict):\n cols = [f'Column_{i}' for i in range(X.shape[1])]\n return [impcts_dict.get(col, 0.) for col in cols]\n\n X, y = load_breast_cancer(return_X_y=True)\n data = lgb.Dataset(X, label=y)\n num_trees = 10\n bst = lgb.train({\"objective\": \"binary\", \"verbose\": -1}, data, num_trees)\n tree_df = bst.trees_to_dataframe()\n split_dict = (tree_df[~tree_df['split_gain'].isnull()]\n .groupby('split_feature')\n .size()\n .to_dict())\n\n gains_dict = (tree_df\n .groupby('split_feature')['split_gain']\n .sum()\n .to_dict())\n\n tree_split = _imptcs_to_numpy(X, split_dict)\n tree_gains = _imptcs_to_numpy(X, gains_dict)\n mod_split = bst.feature_importance('split')\n mod_gains = bst.feature_importance('gain')\n num_trees_from_df = tree_df['tree_index'].nunique()\n obs_counts_from_df = tree_df.loc[tree_df['node_depth'] == 1, 'count'].values\n\n np.testing.assert_equal(tree_split, mod_split)\n np.testing.assert_allclose(tree_gains, mod_gains)\n assert num_trees_from_df == num_trees\n np.testing.assert_equal(obs_counts_from_df, len(y))\n\n # test edge case with one leaf\n X = np.ones((10, 2))\n y = np.random.rand(10)\n data = lgb.Dataset(X, label=y)\n bst = lgb.train({\"objective\": \"binary\", \"verbose\": -1}, data, num_trees)\n tree_df = bst.trees_to_dataframe()\n\n assert len(tree_df) == 1\n assert tree_df.loc[0, 'tree_index'] == 0\n assert tree_df.loc[0, 'node_depth'] == 1\n assert tree_df.loc[0, 'node_index'] == \"0-L0\"\n assert tree_df.loc[0, 'value'] is not None\n for col in ('left_child', 'right_child', 'parent_index', 'split_feature',\n 'split_gain', 'threshold', 'decision_type', 'missing_direction',\n 'missing_type', 'weight', 'count'):\n assert tree_df.loc[0, col] is None\n\n\ndef test_interaction_constraints():\n X, y = load_boston(return_X_y=True)\n num_features = X.shape[1]\n train_data = lgb.Dataset(X, label=y)\n # check that constraint containing all features is equivalent to no constraint\n params = {'verbose': -1,\n 'seed': 0}\n est = lgb.train(params, train_data, num_boost_round=10)\n pred1 = est.predict(X)\n est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data,\n num_boost_round=10)\n pred2 = est.predict(X)\n np.testing.assert_allclose(pred1, pred2)\n # check that constraint partitioning the features reduces train accuracy\n est = lgb.train(dict(params, interaction_constraints=[list(range(num_features // 2)),\n list(range(num_features // 2, num_features))]),\n train_data, num_boost_round=10)\n pred3 = est.predict(X)\n assert mean_squared_error(y, pred1) < mean_squared_error(y, pred3)\n # check that constraints consisting of single features reduce accuracy further\n est = lgb.train(dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data,\n num_boost_round=10)\n pred4 = est.predict(X)\n assert mean_squared_error(y, pred3) < mean_squared_error(y, pred4)\n # test that interaction constraints work when not all features are used\n X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1)\n num_features = X.shape[1]\n train_data = lgb.Dataset(X, label=y)\n est = lgb.train(dict(params, interaction_constraints=[[0] + list(range(2, num_features)),\n [1] + list(range(2, num_features))]),\n train_data, num_boost_round=10)\n\n\ndef test_linear_trees(tmp_path):\n # check that setting linear_tree=True fits better than ordinary trees when data has linear relationship\n np.random.seed(0)\n x = np.arange(0, 100, 0.1)\n y = 2 * x + np.random.normal(0, 0.1, len(x))\n x = x[:, np.newaxis]\n lgb_train = lgb.Dataset(x, label=y)\n params = {'verbose': -1,\n 'metric': 'mse',\n 'seed': 0,\n 'num_leaves': 2}\n est = lgb.train(params, lgb_train, num_boost_round=10)\n pred1 = est.predict(x)\n lgb_train = lgb.Dataset(x, label=y)\n res = {}\n est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,\n valid_sets=[lgb_train], valid_names=['train'])\n pred2 = est.predict(x)\n assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)\n assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)\n # test again with nans in data\n x[:10] = np.nan\n lgb_train = lgb.Dataset(x, label=y)\n est = lgb.train(params, lgb_train, num_boost_round=10)\n pred1 = est.predict(x)\n lgb_train = lgb.Dataset(x, label=y)\n res = {}\n est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,\n valid_sets=[lgb_train], valid_names=['train'])\n pred2 = est.predict(x)\n assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)\n assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)\n # test again with bagging\n res = {}\n est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,\n num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])\n pred = est.predict(x)\n assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)\n # test with a feature that has only one non-nan value\n x = np.concatenate([np.ones([x.shape[0], 1]), x], 1)\n x[500:, 1] = np.nan\n y[500:] += 10\n lgb_train = lgb.Dataset(x, label=y)\n res = {}\n est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,\n num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])\n pred = est.predict(x)\n assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)\n # test with a categorical feature\n x[:250, 0] = 0\n y[:250] += 10\n lgb_train = lgb.Dataset(x, label=y)\n est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,\n num_boost_round=10, categorical_feature=[0])\n # test refit: same results on same data\n est2 = est.refit(x, label=y)\n p1 = est.predict(x)\n p2 = est2.predict(x)\n assert np.mean(np.abs(p1 - p2)) < 2\n\n # test refit with save and load\n temp_model = str(tmp_path / \"temp_model.txt\")\n est.save_model(temp_model)\n est2 = lgb.Booster(model_file=temp_model)\n est2 = est2.refit(x, label=y)\n p1 = est.predict(x)\n p2 = est2.predict(x)\n assert np.mean(np.abs(p1 - p2)) < 2\n # test refit: different results training on different data\n est3 = est.refit(x[:100, :], label=y[:100])\n p3 = est3.predict(x)\n assert np.mean(np.abs(p2 - p1)) > np.abs(np.max(p3 - p1))\n # test when num_leaves - 1 < num_features and when num_leaves - 1 > num_features\n X_train, _, y_train, _ = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)\n params = {'linear_tree': True,\n 'verbose': -1,\n 'metric': 'mse',\n 'seed': 0}\n train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=2))\n est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])\n train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=60))\n est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])\n\n\ndef test_save_and_load_linear(tmp_path):\n X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1,\n random_state=2)\n X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1)\n X_train[:X_train.shape[0] // 2, 0] = 0\n y_train[:X_train.shape[0] // 2] = 1\n params = {'linear_tree': True}\n train_data_1 = lgb.Dataset(X_train, label=y_train, params=params)\n est_1 = lgb.train(params, train_data_1, num_boost_round=10, categorical_feature=[0])\n pred_1 = est_1.predict(X_train)\n\n tmp_dataset = str(tmp_path / 'temp_dataset.bin')\n train_data_1.save_binary(tmp_dataset)\n train_data_2 = lgb.Dataset(tmp_dataset)\n est_2 = lgb.train(params, train_data_2, num_boost_round=10)\n pred_2 = est_2.predict(X_train)\n np.testing.assert_allclose(pred_1, pred_2)\n\n model_file = str(tmp_path / 'model.txt')\n est_2.save_model(model_file)\n est_3 = lgb.Booster(model_file=model_file)\n pred_3 = est_3.predict(X_train)\n np.testing.assert_allclose(pred_2, pred_3)\n\n\ndef test_linear_single_leaf():\n X_train, y_train = load_breast_cancer(return_X_y=True)\n train_data = lgb.Dataset(X_train, label=y_train)\n params = {\n \"objective\": \"binary\",\n \"linear_tree\": True,\n \"min_sum_hessian\": 5000\n }\n bst = lgb.train(params, train_data, num_boost_round=5)\n y_pred = bst.predict(X_train)\n assert log_loss(y_train, y_pred) < 0.661\n\n\ndef test_predict_with_start_iteration():\n def inner_test(X, y, params, early_stopping_rounds):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n train_data = lgb.Dataset(X_train, label=y_train)\n valid_data = lgb.Dataset(X_test, label=y_test)\n booster = lgb.train(params, train_data, num_boost_round=50, early_stopping_rounds=early_stopping_rounds,\n valid_sets=[valid_data])\n\n # test that the predict once with all iterations equals summed results with start_iteration and num_iteration\n all_pred = booster.predict(X, raw_score=True)\n all_pred_contrib = booster.predict(X, pred_contrib=True)\n steps = [10, 12]\n for step in steps:\n pred = np.zeros_like(all_pred)\n pred_contrib = np.zeros_like(all_pred_contrib)\n for start_iter in range(0, 50, step):\n pred += booster.predict(X, start_iteration=start_iter, num_iteration=step, raw_score=True)\n pred_contrib += booster.predict(X, start_iteration=start_iter, num_iteration=step, pred_contrib=True)\n np.testing.assert_allclose(all_pred, pred)\n np.testing.assert_allclose(all_pred_contrib, pred_contrib)\n # test the case where start_iteration <= 0, and num_iteration is None\n pred1 = booster.predict(X, start_iteration=-1)\n pred2 = booster.predict(X, num_iteration=booster.best_iteration)\n np.testing.assert_allclose(pred1, pred2)\n\n # test the case where start_iteration > 0, and num_iteration <= 0\n pred4 = booster.predict(X, start_iteration=10, num_iteration=-1)\n pred5 = booster.predict(X, start_iteration=10, num_iteration=90)\n pred6 = booster.predict(X, start_iteration=10, num_iteration=0)\n np.testing.assert_allclose(pred4, pred5)\n np.testing.assert_allclose(pred4, pred6)\n\n # test the case where start_iteration > 0, and num_iteration <= 0, with pred_leaf=True\n pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_leaf=True)\n pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_leaf=True)\n pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_leaf=True)\n np.testing.assert_allclose(pred4, pred5)\n np.testing.assert_allclose(pred4, pred6)\n\n # test the case where start_iteration > 0, and num_iteration <= 0, with pred_contrib=True\n pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_contrib=True)\n pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_contrib=True)\n pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_contrib=True)\n np.testing.assert_allclose(pred4, pred5)\n np.testing.assert_allclose(pred4, pred6)\n\n # test for regression\n X, y = load_boston(return_X_y=True)\n params = {\n 'objective': 'regression',\n 'verbose': -1,\n 'metric': 'l2',\n 'learning_rate': 0.5\n }\n # test both with and without early stopping\n inner_test(X, y, params, early_stopping_rounds=1)\n inner_test(X, y, params, early_stopping_rounds=5)\n inner_test(X, y, params, early_stopping_rounds=None)\n\n # test for multi-class\n X, y = load_iris(return_X_y=True)\n params = {\n 'objective': 'multiclass',\n 'num_class': 3,\n 'verbose': -1,\n 'metric': 'multi_error'\n }\n # test both with and without early stopping\n inner_test(X, y, params, early_stopping_rounds=1)\n inner_test(X, y, params, early_stopping_rounds=5)\n inner_test(X, y, params, early_stopping_rounds=None)\n\n # test for binary\n X, y = load_breast_cancer(return_X_y=True)\n params = {\n 'objective': 'binary',\n 'verbose': -1,\n 'metric': 'auc'\n }\n # test both with and without early stopping\n inner_test(X, y, params, early_stopping_rounds=1)\n inner_test(X, y, params, early_stopping_rounds=5)\n inner_test(X, y, params, early_stopping_rounds=None)\n\n\ndef test_average_precision_metric():\n # test against sklearn average precision metric\n X, y = load_breast_cancer(return_X_y=True)\n params = {\n 'objective': 'binary',\n 'metric': 'average_precision',\n 'verbose': -1\n }\n res = {}\n lgb_X = lgb.Dataset(X, label=y)\n est = lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=res)\n ap = res['training']['average_precision'][-1]\n pred = est.predict(X)\n sklearn_ap = average_precision_score(y, pred)\n assert ap == pytest.approx(sklearn_ap)\n # test that average precision is 1 where model predicts perfectly\n y = y.copy()\n y[:] = 1\n lgb_X = lgb.Dataset(X, label=y)\n lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], evals_result=res)\n assert res['training']['average_precision'][-1] == pytest.approx(1)\n\n\ndef test_reset_params_works_with_metric_num_class_and_boosting():\n X, y = load_breast_cancer(return_X_y=True)\n dataset_params = {\"max_bin\": 150}\n booster_params = {\n 'objective': 'multiclass',\n 'max_depth': 4,\n 'bagging_fraction': 0.8,\n 'metric': ['multi_logloss', 'multi_error'],\n 'boosting': 'gbdt',\n 'num_class': 5\n }\n dtrain = lgb.Dataset(X, y, params=dataset_params)\n bst = lgb.Booster(\n params=booster_params,\n train_set=dtrain\n )\n\n expected_params = dict(dataset_params, **booster_params)\n assert bst.params == expected_params\n\n booster_params['bagging_fraction'] += 0.1\n new_bst = bst.reset_parameter(booster_params)\n\n expected_params = dict(dataset_params, **booster_params)\n assert bst.params == expected_params\n assert new_bst.params == expected_params\n\n\ndef test_dump_model():\n X, y = load_breast_cancer(return_X_y=True)\n train_data = lgb.Dataset(X, label=y)\n params = {\n \"objective\": \"binary\",\n \"verbose\": -1\n }\n bst = lgb.train(params, train_data, num_boost_round=5)\n dumped_model_str = str(bst.dump_model(5, 0))\n assert \"leaf_features\" not in dumped_model_str\n assert \"leaf_coeff\" not in dumped_model_str\n assert \"leaf_const\" not in dumped_model_str\n assert \"leaf_value\" in dumped_model_str\n assert \"leaf_count\" in dumped_model_str\n params['linear_tree'] = True\n train_data = lgb.Dataset(X, label=y)\n bst = lgb.train(params, train_data, num_boost_round=5)\n dumped_model_str = str(bst.dump_model(5, 0))\n assert \"leaf_features\" in dumped_model_str\n assert \"leaf_coeff\" in dumped_model_str\n assert \"leaf_const\" in dumped_model_str\n assert \"leaf_value\" in dumped_model_str\n assert \"leaf_count\" in dumped_model_str\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.random.rand",
"numpy.random.choice",
"scipy.sparse.isspmatrix_csr",
"numpy.testing.assert_raises_regex",
"numpy.mean",
"sklearn.metrics.average_precision_score",
"numpy.cos",
"numpy.random.random",
"numpy.concatenate",
"numpy.random.normal",
"numpy.max",
"numpy.zeros_like",
"numpy.empty",
"numpy.sin",
"scipy.sparse.isspmatrix_csc",
"numpy.arange",
"numpy.column_stack",
"scipy.sparse.csr_matrix",
"numpy.partition",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_equal",
"sklearn.model_selection.GroupKFold",
"sklearn.datasets.make_multilabel_classification",
"numpy.diff",
"numpy.allclose",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.mean_squared_error",
"numpy.isnan",
"numpy.random.seed",
"numpy.sum",
"numpy.random.permutation",
"numpy.ones",
"numpy.testing.assert_array_equal",
"sklearn.metrics.log_loss",
"numpy.abs",
"numpy.all",
"numpy.linspace",
"sklearn.model_selection.TimeSeriesSplit",
"numpy.unique"
]
] |
eo1989/VectorBTanalysis
|
[
"bea3deaf2ee3fc114b308146f2af3e4f35f70197",
"bea3deaf2ee3fc114b308146f2af3e4f35f70197",
"3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9",
"bea3deaf2ee3fc114b308146f2af3e4f35f70197",
"bea3deaf2ee3fc114b308146f2af3e4f35f70197"
] |
[
".venv/lib/python3.8/site-packages/pypfopt/efficient_frontier.py",
".venv/lib/python3.8/site-packages/pandas_datareader/compat/__init__.py",
".venv/lib/python3.8/site-packages/dill/tests/test_source.py",
".venv/lib/python3.8/site-packages/vectorbt/utils/array.py",
".venv/lib/python3.8/site-packages/findatapy/market/marketdatagenerator.py"
] |
[
"\"\"\"\nThe ``efficient_frontier`` module houses the EfficientFrontier class, which\ngenerates optimal portfolios for various possible objective functions and parameters.\n\"\"\"\n\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport cvxpy as cp\n\nfrom . import objective_functions, base_optimizer\n\n\nclass EfficientFrontier(base_optimizer.BaseConvexOptimizer):\n\n \"\"\"\n An EfficientFrontier object (inheriting from BaseConvexOptimizer) contains multiple\n optimisation methods that can be called (corresponding to different objective\n functions) with various parameters. Note: a new EfficientFrontier object should\n be instantiated if you want to make any change to objectives/constraints/bounds/parameters.\n\n Instance variables:\n\n - Inputs:\n\n - ``n_assets`` - int\n - ``tickers`` - str list\n - ``bounds`` - float tuple OR (float tuple) list\n - ``cov_matrix`` - np.ndarray\n - ``expected_returns`` - np.ndarray\n - ``solver`` - str\n\n - Output: ``weights`` - np.ndarray\n\n Public methods:\n\n - ``max_sharpe()`` optimises for maximal Sharpe ratio (a.k.a the tangency portfolio)\n - ``min_volatility()`` optimises for minimum volatility\n - ``max_quadratic_utility()`` maximises the quadratic utility, given some risk aversion.\n - ``efficient_risk()`` maximises Sharpe for a given target risk\n - ``efficient_return()`` minimises risk for a given target return\n\n - ``add_objective()`` adds a (convex) objective to the optimisation problem\n - ``add_constraint()`` adds a (linear) constraint to the optimisation problem\n - ``convex_objective()`` solves for a generic convex objective with linear constraints\n - ``nonconvex_objective()`` solves for a generic nonconvex objective using the scipy backend.\n This is prone to getting stuck in local minima and is generally *not* recommended.\n\n - ``portfolio_performance()`` calculates the expected return, volatility and Sharpe ratio for\n the optimised portfolio.\n - ``set_weights()`` creates self.weights (np.ndarray) from a weights dict\n - ``clean_weights()`` rounds the weights and clips near-zeros.\n - ``save_weights_to_file()`` saves the weights to csv, json, or txt.\n \"\"\"\n\n def __init__(\n self,\n expected_returns,\n cov_matrix,\n weight_bounds=(0, 1),\n gamma=0,\n solver=None,\n verbose=False,\n ):\n \"\"\"\n :param expected_returns: expected returns for each asset. Can be None if\n optimising for volatility only (but not recommended).\n :type expected_returns: pd.Series, list, np.ndarray\n :param cov_matrix: covariance of returns for each asset. This **must** be\n positive semidefinite, otherwise optimisation will fail.\n :type cov_matrix: pd.DataFrame or np.array\n :param weight_bounds: minimum and maximum weight of each asset OR single min/max pair\n if all identical, defaults to (0, 1). Must be changed to (-1, 1)\n for portfolios with shorting.\n :type weight_bounds: tuple OR tuple list, optional\n :param gamma: L2 regularisation parameter, defaults to 0. Increase if you want more\n non-negligible weights\n :type gamma: float, optional\n :param solver: name of solver. list available solvers with: `cvxpy.installed_solvers()`\n :type solver: str\n :param verbose: whether performance and debugging info should be printed, defaults to False\n :type verbose: bool, optional\n :raises TypeError: if ``expected_returns`` is not a series, list or array\n :raises TypeError: if ``cov_matrix`` is not a dataframe or array\n \"\"\"\n # Inputs\n self.cov_matrix = EfficientFrontier._validate_cov_matrix(cov_matrix)\n self.expected_returns = EfficientFrontier._validate_expected_returns(\n expected_returns\n )\n\n # Labels\n if isinstance(expected_returns, pd.Series):\n tickers = list(expected_returns.index)\n elif isinstance(cov_matrix, pd.DataFrame):\n tickers = list(cov_matrix.columns)\n else: # use integer labels\n tickers = list(range(len(expected_returns)))\n\n if expected_returns is not None:\n if cov_matrix.shape != (len(expected_returns), len(expected_returns)):\n raise ValueError(\"Covariance matrix does not match expected returns\")\n\n super().__init__(\n len(tickers), tickers, weight_bounds, solver=solver, verbose=verbose\n )\n\n @staticmethod\n def _validate_expected_returns(expected_returns):\n if expected_returns is None:\n warnings.warn(\n \"No expected returns provided. You may only use ef.min_volatility()\"\n )\n return None\n elif isinstance(expected_returns, pd.Series):\n return expected_returns.values\n elif isinstance(expected_returns, list):\n return np.array(expected_returns)\n elif isinstance(expected_returns, np.ndarray):\n return expected_returns.ravel()\n else:\n raise TypeError(\"expected_returns is not a series, list or array\")\n\n @staticmethod\n def _validate_cov_matrix(cov_matrix):\n if cov_matrix is None:\n raise ValueError(\"cov_matrix must be provided\")\n elif isinstance(cov_matrix, pd.DataFrame):\n return cov_matrix.values\n elif isinstance(cov_matrix, np.ndarray):\n return cov_matrix\n else:\n raise TypeError(\"cov_matrix is not a series, list or array\")\n\n def _market_neutral_bounds_check(self):\n \"\"\"\n Helper method to make sure bounds are suitable for a market neutral\n optimisation.\n \"\"\"\n portfolio_possible = np.any(self._lower_bounds < 0)\n if not portfolio_possible:\n warnings.warn(\n \"Market neutrality requires shorting - bounds have been amended\",\n RuntimeWarning,\n )\n self._map_bounds_to_constraints((-1, 1))\n # Delete original constraints\n del self._constraints[0]\n del self._constraints[0]\n\n def min_volatility(self):\n \"\"\"\n Minimise volatility.\n\n :return: asset weights for the volatility-minimising portfolio\n :rtype: OrderedDict\n \"\"\"\n self._objective = objective_functions.portfolio_variance(\n self._w, self.cov_matrix\n )\n for obj in self._additional_objectives:\n self._objective += obj\n\n self._constraints.append(cp.sum(self._w) == 1)\n\n return self._solve_cvxpy_opt_problem()\n\n def max_sharpe(self, risk_free_rate=0.02):\n \"\"\"\n Maximise the Sharpe Ratio. The result is also referred to as the tangency portfolio,\n as it is the portfolio for which the capital market line is tangent to the efficient frontier.\n\n This is a convex optimisation problem after making a certain variable substitution. See\n `Cornuejols and Tutuncu (2006) <http://web.math.ku.dk/~rolf/CT_FinOpt.pdf>`_ for more.\n\n :param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02.\n The period of the risk-free rate should correspond to the\n frequency of expected returns.\n :type risk_free_rate: float, optional\n :raises ValueError: if ``risk_free_rate`` is non-numeric\n :return: asset weights for the Sharpe-maximising portfolio\n :rtype: OrderedDict\n \"\"\"\n if not isinstance(risk_free_rate, (int, float)):\n raise ValueError(\"risk_free_rate should be numeric\")\n\n # max_sharpe requires us to make a variable transformation.\n # Here we treat w as the transformed variable.\n self._objective = cp.quad_form(self._w, self.cov_matrix)\n k = cp.Variable()\n\n # Note: objectives are not scaled by k. Hence there are subtle differences\n # between how these objectives work for max_sharpe vs min_volatility\n if len(self._additional_objectives) > 0:\n warnings.warn(\n \"max_sharpe transforms the optimisation problem so additional objectives may not work as expected.\"\n )\n for obj in self._additional_objectives:\n self._objective += obj\n\n new_constraints = []\n # Must rebuild the constraints\n for constr in self._constraints:\n if isinstance(constr, cp.constraints.nonpos.Inequality):\n # Either the first or second item is the expression\n if isinstance(\n constr.args[0], cp.expressions.constants.constant.Constant\n ):\n new_constraints.append(constr.args[1] >= constr.args[0] * k)\n else:\n new_constraints.append(constr.args[0] <= constr.args[1] * k)\n elif isinstance(constr, cp.constraints.zero.Equality):\n new_constraints.append(constr.args[0] == constr.args[1] * k)\n else:\n raise TypeError(\n \"Please check that your constraints are in a suitable format\"\n )\n\n # Transformed max_sharpe convex problem:\n self._constraints = [\n (self.expected_returns - risk_free_rate).T @ self._w == 1,\n cp.sum(self._w) == k,\n k >= 0,\n ] + new_constraints\n\n self._solve_cvxpy_opt_problem()\n # Inverse-transform\n self.weights = (self._w.value / k.value).round(16) + 0.0\n return self._make_output_weights()\n\n def max_quadratic_utility(self, risk_aversion=1, market_neutral=False):\n r\"\"\"\n Maximise the given quadratic utility, i.e:\n\n .. math::\n\n \\max_w w^T \\mu - \\frac \\delta 2 w^T \\Sigma w\n\n :param risk_aversion: risk aversion parameter (must be greater than 0),\n defaults to 1\n :type risk_aversion: positive float\n :param market_neutral: whether the portfolio should be market neutral (weights sum to zero),\n defaults to False. Requires negative lower weight bound.\n :param market_neutral: bool, optional\n :return: asset weights for the maximum-utility portfolio\n :rtype: OrderedDict\n \"\"\"\n if risk_aversion <= 0:\n raise ValueError(\"risk aversion coefficient must be greater than zero\")\n\n self._objective = objective_functions.quadratic_utility(\n self._w, self.expected_returns, self.cov_matrix, risk_aversion=risk_aversion\n )\n for obj in self._additional_objectives:\n self._objective += obj\n\n if market_neutral:\n self._market_neutral_bounds_check()\n self._constraints.append(cp.sum(self._w) == 0)\n else:\n self._constraints.append(cp.sum(self._w) == 1)\n\n return self._solve_cvxpy_opt_problem()\n\n def efficient_risk(self, target_volatility, market_neutral=False):\n \"\"\"\n Maximise return for a target risk. The resulting portfolio will have a volatility\n less than the target (but not guaranteed to be equal).\n\n :param target_volatility: the desired maximum volatility of the resulting portfolio.\n :type target_volatility: float\n :param market_neutral: whether the portfolio should be market neutral (weights sum to zero),\n defaults to False. Requires negative lower weight bound.\n :param market_neutral: bool, optional\n :raises ValueError: if ``target_volatility`` is not a positive float\n :raises ValueError: if no portfolio can be found with volatility equal to ``target_volatility``\n :raises ValueError: if ``risk_free_rate`` is non-numeric\n :return: asset weights for the efficient risk portfolio\n :rtype: OrderedDict\n \"\"\"\n if not isinstance(target_volatility, (float, int)) or target_volatility < 0:\n raise ValueError(\"target_volatility should be a positive float\")\n\n global_min_volatility = np.sqrt(1 / np.sum(np.linalg.inv(self.cov_matrix)))\n\n if target_volatility < global_min_volatility:\n raise ValueError(\n \"The minimum volatility is {:.3f}. Please use a higher target_volatility\".format(\n global_min_volatility\n )\n )\n\n self._objective = objective_functions.portfolio_return(\n self._w, self.expected_returns\n )\n variance = objective_functions.portfolio_variance(self._w, self.cov_matrix)\n\n for obj in self._additional_objectives:\n self._objective += obj\n\n self._constraints.append(variance <= target_volatility ** 2)\n\n # The equality constraint is either \"weights sum to 1\" (default), or\n # \"weights sum to 0\" (market neutral).\n if market_neutral:\n self._market_neutral_bounds_check()\n self._constraints.append(cp.sum(self._w) == 0)\n else:\n self._constraints.append(cp.sum(self._w) == 1)\n\n return self._solve_cvxpy_opt_problem()\n\n def efficient_return(self, target_return, market_neutral=False):\n \"\"\"\n Calculate the 'Markowitz portfolio', minimising volatility for a given target return.\n\n :param target_return: the desired return of the resulting portfolio.\n :type target_return: float\n :param market_neutral: whether the portfolio should be market neutral (weights sum to zero),\n defaults to False. Requires negative lower weight bound.\n :type market_neutral: bool, optional\n :raises ValueError: if ``target_return`` is not a positive float\n :raises ValueError: if no portfolio can be found with return equal to ``target_return``\n :return: asset weights for the Markowitz portfolio\n :rtype: OrderedDict\n \"\"\"\n if not isinstance(target_return, float) or target_return < 0:\n raise ValueError(\"target_return should be a positive float\")\n if target_return > self.expected_returns.max():\n raise ValueError(\n \"target_return must be lower than the largest expected return\"\n )\n\n self._objective = objective_functions.portfolio_variance(\n self._w, self.cov_matrix\n )\n ret = objective_functions.portfolio_return(\n self._w, self.expected_returns, negative=False\n )\n\n self.objective = cp.quad_form(self._w, self.cov_matrix)\n ret = self.expected_returns.T @ self._w\n\n for obj in self._additional_objectives:\n self._objective += obj\n\n self._constraints.append(ret >= target_return)\n\n # The equality constraint is either \"weights sum to 1\" (default), or\n # \"weights sum to 0\" (market neutral).\n if market_neutral:\n self._market_neutral_bounds_check()\n self._constraints.append(cp.sum(self._w) == 0)\n else:\n self._constraints.append(cp.sum(self._w) == 1)\n\n return self._solve_cvxpy_opt_problem()\n\n def portfolio_performance(self, verbose=False, risk_free_rate=0.02):\n \"\"\"\n After optimising, calculate (and optionally print) the performance of the optimal\n portfolio. Currently calculates expected return, volatility, and the Sharpe ratio.\n\n :param verbose: whether performance should be printed, defaults to False\n :type verbose: bool, optional\n :param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02.\n The period of the risk-free rate should correspond to the\n frequency of expected returns.\n :type risk_free_rate: float, optional\n :raises ValueError: if weights have not been calcualted yet\n :return: expected return, volatility, Sharpe ratio.\n :rtype: (float, float, float)\n \"\"\"\n return base_optimizer.portfolio_performance(\n self.weights,\n self.expected_returns,\n self.cov_matrix,\n verbose,\n risk_free_rate,\n )\n",
"from distutils.version import LooseVersion\nfrom functools import reduce\nfrom io import StringIO\nfrom urllib.error import HTTPError\n\nimport pandas as pd\nfrom pandas.api.types import is_list_like, is_number\nimport pandas.io.common as com\nfrom pandas.testing import assert_frame_equal\n\nPANDAS_VERSION = LooseVersion(pd.__version__)\n\nPANDAS_0210 = PANDAS_VERSION >= LooseVersion(\"0.21.0\")\nPANDAS_0220 = PANDAS_VERSION >= LooseVersion(\"0.22.0\")\nPANDAS_0230 = PANDAS_VERSION >= LooseVersion(\"0.23.0\")\n\n__all__ = [\n \"HTTPError\",\n \"StringIO\",\n \"PANDAS_0210\",\n \"PANDAS_0220\",\n \"PANDAS_0230\",\n \"get_filepath_or_buffer\",\n \"str_to_bytes\",\n \"string_types\",\n \"assert_frame_equal\",\n \"is_list_like\",\n \"is_number\",\n \"lmap\",\n \"lrange\",\n \"concat\",\n \"reduce\",\n]\n\n\ndef get_filepath_or_buffer(filepath_or_buffer, encoding=None, compression=None):\n\n # Dictionaries are no longer considered valid inputs\n # for \"get_filepath_or_buffer\" starting in pandas >= 0.20.0\n if isinstance(filepath_or_buffer, dict):\n return filepath_or_buffer, encoding, compression\n\n return com.get_filepath_or_buffer(\n filepath_or_buffer, encoding=encoding, compression=None\n )\n\n\nstring_types = (str,)\nbinary_type = bytes\n\n\ndef str_to_bytes(s, encoding=None):\n if isinstance(s, bytes):\n return s\n return s.encode(encoding or \"ascii\")\n\n\ndef bytes_to_str(b, encoding=None):\n return b.decode(encoding or \"utf-8\")\n\n\ndef lmap(*args, **kwargs):\n return list(map(*args, **kwargs))\n\n\ndef lrange(*args, **kwargs):\n return list(range(*args, **kwargs))\n\n\ndef concat(*args, **kwargs):\n \"\"\"\n Shim to wokr around sort keyword\n \"\"\"\n if not PANDAS_0230 and \"sort\" in kwargs:\n del kwargs[\"sort\"]\n return pd.concat(*args, **kwargs)\n",
"#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 2008-2016 California Institute of Technology.\n# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.\n# License: 3-clause BSD. The full license text is available at:\n# - https://github.com/uqfoundation/dill/blob/master/LICENSE\n\nfrom dill.source import getsource, getname, _wrap, likely_import\nfrom dill.source import getimportable\n\n\nimport sys\nPY3 = sys.version_info[0] >= 3\n\nf = lambda x: x**2\ndef g(x): return f(x) - x\n\ndef h(x):\n def g(x): return x\n return g(x) - x\n\nclass Foo(object):\n def bar(self, x):\n return x*x+x\n_foo = Foo()\n\ndef add(x,y):\n return x+y\n\n# yes, same as 'f', but things are tricky when it comes to pointers\nsquared = lambda x:x**2\n\nclass Bar:\n pass\n_bar = Bar()\n\n # inspect.getsourcelines # dill.source.getblocks\ndef test_getsource():\n assert getsource(f) == 'f = lambda x: x**2\\n'\n assert getsource(g) == 'def g(x): return f(x) - x\\n'\n assert getsource(h) == 'def h(x):\\n def g(x): return x\\n return g(x) - x\\n'\n assert getname(f) == 'f'\n assert getname(g) == 'g'\n assert getname(h) == 'h'\n assert _wrap(f)(4) == 16\n assert _wrap(g)(4) == 12\n assert _wrap(h)(4) == 0\n\n assert getname(Foo) == 'Foo'\n assert getname(Bar) == 'Bar'\n assert getsource(Bar) == 'class Bar:\\n pass\\n'\n assert getsource(Foo) == 'class Foo(object):\\n def bar(self, x):\\n return x*x+x\\n'\n #XXX: add getsource for _foo, _bar\n\n# test itself\ndef test_itself():\n assert likely_import(likely_import)=='from dill.source import likely_import\\n'\n\n# builtin functions and objects\ndef test_builtin():\n if PY3: builtin = 'builtins'\n else: builtin = '__builtin__'\n assert likely_import(pow) == 'pow\\n'\n assert likely_import(100) == '100\\n'\n assert likely_import(True) == 'True\\n'\n assert likely_import(pow, explicit=True) == 'from %s import pow\\n' % builtin\n assert likely_import(100, explicit=True) == '100\\n'\n assert likely_import(True, explicit=True) == 'True\\n' if PY3 else 'from %s import True\\n' % builtin\n # this is kinda BS... you can't import a None\n assert likely_import(None) == 'None\\n'\n assert likely_import(None, explicit=True) == 'None\\n'\n\n\n# other imported functions\ndef test_imported():\n from math import sin\n assert likely_import(sin) == 'from math import sin\\n'\n\n# interactively defined functions\ndef test_dynamic():\n assert likely_import(add) == 'from %s import add\\n' % __name__\n # interactive lambdas\n assert likely_import(squared) == 'from %s import squared\\n' % __name__\n\n# classes and class instances\ndef test_classes():\n try: #XXX: should this be a 'special case'?\n from StringIO import StringIO\n x = \"from StringIO import StringIO\\n\"\n y = x\n except ImportError:\n from io import BytesIO as StringIO\n x = \"from io import BytesIO\\n\"\n y = \"from _io import BytesIO\\n\"\n s = StringIO()\n\n assert likely_import(StringIO) == x\n assert likely_import(s) == y\n # interactively defined classes and class instances\n assert likely_import(Foo) == 'from %s import Foo\\n' % __name__\n assert likely_import(_foo) == 'from %s import Foo\\n' % __name__\n\n\n# test getimportable\ndef test_importable():\n assert getimportable(add) == 'from %s import add\\n' % __name__\n assert getimportable(squared) == 'from %s import squared\\n' % __name__\n assert getimportable(Foo) == 'from %s import Foo\\n' % __name__\n assert getimportable(Foo.bar) == 'from %s import bar\\n' % __name__\n assert getimportable(_foo.bar) == 'from %s import bar\\n' % __name__\n assert getimportable(None) == 'None\\n'\n assert getimportable(100) == '100\\n'\n\n assert getimportable(add, byname=False) == 'def add(x,y):\\n return x+y\\n'\n assert getimportable(squared, byname=False) == 'squared = lambda x:x**2\\n'\n assert getimportable(None, byname=False) == 'None\\n'\n assert getimportable(Bar, byname=False) == 'class Bar:\\n pass\\n'\n assert getimportable(Foo, byname=False) == 'class Foo(object):\\n def bar(self, x):\\n return x*x+x\\n'\n assert getimportable(Foo.bar, byname=False) == 'def bar(self, x):\\n return x*x+x\\n'\n assert getimportable(Foo.bar, byname=True) == 'from %s import bar\\n' % __name__\n assert getimportable(Foo.bar, alias='memo', byname=True) == 'from %s import bar as memo\\n' % __name__\n assert getimportable(Foo, alias='memo', byname=True) == 'from %s import Foo as memo\\n' % __name__\n assert getimportable(squared, alias='memo', byname=True) == 'from %s import squared as memo\\n' % __name__\n assert getimportable(squared, alias='memo', byname=False) == 'memo = squared = lambda x:x**2\\n'\n assert getimportable(add, alias='memo', byname=False) == 'def add(x,y):\\n return x+y\\n\\nmemo = add\\n'\n assert getimportable(None, alias='memo', byname=False) == 'memo = None\\n'\n assert getimportable(100, alias='memo', byname=False) == 'memo = 100\\n'\n assert getimportable(add, explicit=True) == 'from %s import add\\n' % __name__\n assert getimportable(squared, explicit=True) == 'from %s import squared\\n' % __name__\n assert getimportable(Foo, explicit=True) == 'from %s import Foo\\n' % __name__\n assert getimportable(Foo.bar, explicit=True) == 'from %s import bar\\n' % __name__\n assert getimportable(_foo.bar, explicit=True) == 'from %s import bar\\n' % __name__\n assert getimportable(None, explicit=True) == 'None\\n'\n assert getimportable(100, explicit=True) == '100\\n'\n\n\ndef test_numpy():\n try:\n from numpy import array\n x = array([1,2,3])\n assert getimportable(x) == 'from numpy import array\\narray([1, 2, 3])\\n'\n assert getimportable(array) == 'from %s import array\\n' % array.__module__\n assert getimportable(x, byname=False) == 'from numpy import array\\narray([1, 2, 3])\\n'\n assert getimportable(array, byname=False) == 'from %s import array\\n' % array.__module__\n except ImportError: pass\n\n#NOTE: if before likely_import(pow), will cause pow to throw AssertionError\ndef test_foo():\n assert getimportable(_foo, byname=False).startswith(\"import dill\\nclass Foo(object):\\n def bar(self, x):\\n return x*x+x\\ndill.loads(\")\n\nif __name__ == '__main__':\n test_getsource()\n test_itself()\n test_builtin()\n test_imported()\n test_dynamic()\n test_classes()\n test_importable()\n test_numpy()\n test_foo()\n",
"\"\"\"Numba-compiled utility functions for working with arrays.\"\"\"\n\nimport numpy as np\nfrom numba import njit\n\n\ndef is_sorted(a):\n \"\"\"Checks if array is sorted.\"\"\"\n return np.all(a[:-1] <= a[1:])\n\n\n@njit(cache=True)\ndef is_sorted_nb(a):\n \"\"\"Numba-compiled version of `is_sorted`.\"\"\"\n for i in range(a.size-1):\n if a[i+1] < a[i]:\n return False\n return True\n",
"__author__ = 'saeedamen' # Saeed Amen\r\n\r\n#\r\n# Copyright 2016 Cuemacro\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the\r\n# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n#\r\n# See the License for the specific language governing permissions and limitations under the License.\r\n#\r\n\r\nimport copy\r\n\r\nimport pandas\r\n\r\nfrom findatapy.market.ioengine import IOEngine\r\nfrom findatapy.market.marketdatarequest import MarketDataRequest\r\nfrom findatapy.timeseries import Filter, Calculations\r\nfrom findatapy.util import DataConstants, LoggerManager, ConfigManager, SwimPool\r\n\r\nconstants = DataConstants()\r\n\r\nclass MarketDataGenerator(object):\r\n \"\"\"Returns market data time series by directly calling market data sources.\r\n\r\n At present it supports Bloomberg (bloomberg), Yahoo (yahoo), Quandl (quandl), FRED (fred) etc. which are implemented\r\n in subclasses of DataVendor class. This provides a common wrapper for all these data sources.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.config = ConfigManager().get_instance()\r\n self.logger = LoggerManager().getLogger(__name__)\r\n self.filter = Filter()\r\n self.calculations = Calculations()\r\n self.io_engine = IOEngine()\r\n self._intraday_code = -1\r\n self.days_expired_intraday_contract_download = -1\r\n\r\n return\r\n\r\n def set_intraday_code(self, code):\r\n self._intraday_code = code\r\n\r\n def get_data_vendor(self, source):\r\n \"\"\"Loads appropriate data service class\r\n\r\n Parameters\r\n ----------\r\n source : str\r\n the data service to use \"bloomberg\", \"quandl\", \"yahoo\", \"google\", \"fred\" etc.\r\n we can also have forms like \"bloomberg-boe\" separated by hyphens\r\n\r\n Returns\r\n -------\r\n DataVendor\r\n \"\"\"\r\n\r\n data_vendor = None\r\n\r\n try:\r\n source = source.split(\"-\")[0]\r\n except:\r\n self.logger.error(\"Was data source specified?\")\r\n\r\n return None\r\n\r\n if source == 'bloomberg':\r\n try:\r\n from findatapy.market.datavendorbbg import DataVendorBBGOpen\r\n data_vendor = DataVendorBBGOpen()\r\n except:\r\n self.logger.warn(\"Bloomberg needs to be installed\")\r\n\r\n elif source == 'quandl':\r\n from findatapy.market.datavendorweb import DataVendorQuandl\r\n data_vendor = DataVendorQuandl()\r\n\r\n elif source == 'ons':\r\n from findatapy.market.datavendorweb import DataVendorONS\r\n data_vendor = DataVendorONS()\r\n\r\n elif source == 'boe':\r\n from findatapy.market.datavendorweb import DataVendorBOE\r\n data_vendor = DataVendorBOE()\r\n\r\n elif source == 'dukascopy':\r\n from findatapy.market.datavendorweb import DataVendorDukasCopy\r\n data_vendor = DataVendorDukasCopy()\r\n\r\n elif source == 'fxcm':\r\n from findatapy.market.datavendorweb import DataVendorFXCM\r\n data_vendor = DataVendorFXCM()\r\n\r\n elif source == 'alfred':\r\n from findatapy.market.datavendorweb import DataVendorALFRED\r\n data_vendor = DataVendorALFRED()\r\n\r\n elif source == 'yahoo':\r\n from findatapy.market.datavendorweb import DataVendorYahoo\r\n data_vendor = DataVendorYahoo()\r\n\r\n elif source in ['google', 'fred', 'oecd', 'eurostat', 'edgar-index']:\r\n from findatapy.market.datavendorweb import DataVendorPandasWeb\r\n data_vendor = DataVendorPandasWeb()\r\n\r\n elif source == 'bitcoincharts':\r\n from findatapy.market.datavendorweb import DataVendorBitcoincharts\r\n data_vendor = DataVendorBitcoincharts()\r\n elif source == 'poloniex':\r\n from findatapy.market.datavendorweb import DataVendorPoloniex\r\n data_vendor = DataVendorPoloniex()\r\n elif source == 'binance':\r\n from findatapy.market.datavendorweb import DataVendorBinance\r\n data_vendor = DataVendorBinance()\r\n elif source == 'bitfinex':\r\n from findatapy.market.datavendorweb import DataVendorBitfinex\r\n data_vendor = DataVendorBitfinex()\r\n elif source == 'gdax':\r\n from findatapy.market.datavendorweb import DataVendorGdax\r\n data_vendor = DataVendorGdax()\r\n elif source == 'kraken':\r\n from findatapy.market.datavendorweb import DataVendorKraken\r\n data_vendor = DataVendorKraken()\r\n elif source == 'bitmex':\r\n from findatapy.market.datavendorweb import DataVendorBitmex\r\n data_vendor = DataVendorBitmex()\r\n elif '.csv' in source or '.h5' in source:\r\n from findatapy.market.datavendorweb import DataVendorFlatFile\r\n data_vendor = DataVendorFlatFile()\r\n elif source == 'alphavantage':\r\n from findatapy.market.datavendorweb import DataVendorAlphaVantage\r\n data_vendor = DataVendorAlphaVantage()\r\n elif source == 'huobi':\r\n from findatapy.market.datavendorweb import DataVendorHuobi\r\n data_vendor = DataVendorHuobi()\r\n\r\n # TODO add support for other data sources (like Reuters)\r\n\r\n return data_vendor\r\n\r\n def fetch_market_data(self, market_data_request, kill_session = True):\r\n \"\"\"Loads time series from specified data provider\r\n\r\n Parameters\r\n ----------\r\n market_data_request : MarketDataRequest\r\n contains various properties describing time series to fetched, including ticker, start & finish date etc.\r\n\r\n Returns\r\n -------\r\n pandas.DataFrame\r\n \"\"\"\r\n\r\n # data_vendor = self.get_data_vendor(market_data_request.data_source)\r\n\r\n # check if tickers have been specified (if not load all of them for a category)\r\n # also handle single tickers/list tickers\r\n create_tickers = False\r\n\r\n if market_data_request.vendor_tickers is not None and market_data_request.tickers is None:\r\n market_data_request.tickers = market_data_request.vendor_tickers\r\n\r\n tickers = market_data_request.tickers\r\n\r\n if tickers is None :\r\n create_tickers = True\r\n elif isinstance(tickers, str):\r\n if tickers == '': create_tickers = True\r\n elif isinstance(tickers, list):\r\n if tickers == []: create_tickers = True\r\n\r\n if create_tickers:\r\n market_data_request.tickers = ConfigManager().get_instance().get_tickers_list_for_category(\r\n market_data_request.category, market_data_request.data_source, market_data_request.freq, market_data_request.cut)\r\n\r\n # intraday or tick: only one ticker per cache file\r\n if (market_data_request.freq in ['intraday', 'tick', 'second', 'hour', 'minute']):\r\n data_frame_agg = self.download_intraday_tick(market_data_request)\r\n # return data_frame_agg\r\n\r\n # daily: multiple tickers per cache file - assume we make one API call to vendor library\r\n else:\r\n data_frame_agg = self.download_daily(market_data_request)\r\n\r\n if('internet_load' in market_data_request.cache_algo):\r\n self.logger.debug(\"Internet loading.. \")\r\n\r\n # Signal to data_vendor template to exit session\r\n # if data_vendor is not None and kill_session == True: data_vendor.kill_session()\r\n\r\n if(market_data_request.cache_algo == 'cache_algo'):\r\n self.logger.debug(\"Only caching data in memory, do not return any time series.\"); return\r\n\r\n # Only return time series if specified in the algo\r\n if 'return' in market_data_request.cache_algo:\r\n # Special case for events/events-dt which is not indexed like other tables (also same for downloading futures\r\n # contracts dates)\r\n if market_data_request.category is not None:\r\n if 'events' in market_data_request.category:\r\n return data_frame_agg\r\n\r\n # Pad columns a second time (is this necessary to do here again?)\r\n # TODO only do this for not daily data?\r\n try:\r\n if data_frame_agg is not None:\r\n data_frame_agg = self.filter.filter_time_series(market_data_request, data_frame_agg, pad_columns=True)\\\r\n .dropna(how = 'all')\r\n\r\n # Resample data using pandas if specified in the MarketDataRequest\r\n if market_data_request.resample is not None:\r\n if 'last' in market_data_request.resample_how:\r\n data_frame_agg = data_frame_agg.resample(market_data_request.resample).last()\r\n elif 'first' in market_data_request.resample_how:\r\n data_frame_agg = data_frame_agg.resample(market_data_request.resample).first()\r\n\r\n if 'dropna' in market_data_request.resample_how:\r\n data_frame_agg = data_frame_agg.dropna(how = 'all')\r\n else:\r\n self.logger.warn(\"No data returned for \" + str(market_data_request.tickers))\r\n\r\n return data_frame_agg\r\n except Exception as e:\r\n print(str(e))\r\n if data_frame_agg is not None:\r\n return data_frame_agg\r\n\r\n import traceback\r\n\r\n self.logger.warn(\"No data returned for \" + str(market_data_request.tickers))\r\n\r\n return None\r\n\r\n def create_time_series_hash_key(self, market_data_request, ticker = None):\r\n \"\"\"Creates a hash key for retrieving the time series\r\n\r\n Parameters\r\n ----------\r\n market_data_request : MarketDataRequest\r\n contains various properties describing time series to fetched, including ticker, start & finish date etc.\r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n\r\n if(isinstance(ticker, list)):\r\n ticker = ticker[0]\r\n\r\n return self.create_cache_file_name(MarketDataRequest().create_category_key(market_data_request, ticker))\r\n\r\n def download_intraday_tick(self, market_data_request):\r\n \"\"\"Loads intraday time series from specified data provider\r\n\r\n Parameters\r\n ----------\r\n market_data_request : MarketDataRequest\r\n contains various properties describing time series to fetched, including ticker, start & finish date etc.\r\n\r\n Returns\r\n -------\r\n pandas.DataFrame\r\n \"\"\"\r\n\r\n data_frame_agg = None\r\n calcuations = Calculations()\r\n\r\n ticker_cycle = 0\r\n\r\n data_frame_group = []\r\n\r\n # Single threaded version\r\n # handle intraday ticker calls separately one by one\r\n if len(market_data_request.tickers) == 1 or constants.market_thread_no['other'] == 1:\r\n for ticker in market_data_request.tickers:\r\n market_data_request_single = copy.copy(market_data_request)\r\n market_data_request_single.tickers = ticker\r\n\r\n if market_data_request.vendor_tickers is not None:\r\n market_data_request_single.vendor_tickers = [market_data_request.vendor_tickers[ticker_cycle]]\r\n ticker_cycle = ticker_cycle + 1\r\n\r\n # We downscale into float32, to avoid memory problems in Python (32 bit)\r\n # data is stored on disk as float32 anyway\r\n # old_finish_date = market_data_request_single.finish_date\r\n #\r\n # market_data_request_single.finish_date = self.refine_expiry_date(market_data_request)\r\n #\r\n # if market_data_request_single.finish_date >= market_data_request_single.start_date:\r\n # data_frame_single = data_vendor.load_ticker(market_data_request_single)\r\n # else:\r\n # data_frame_single = None\r\n #\r\n # market_data_request_single.finish_date = old_finish_date\r\n #\r\n # data_frame_single = data_vendor.load_ticker(market_data_request_single)\r\n\r\n data_frame_single = self.fetch_single_time_series(market_data_request)\r\n\r\n # If the vendor doesn't provide any data, don't attempt to append\r\n if data_frame_single is not None:\r\n if data_frame_single.empty == False:\r\n data_frame_single.index.name = 'Date'\r\n data_frame_single = data_frame_single.astype('float32')\r\n\r\n data_frame_group.append(data_frame_single)\r\n\r\n # # if you call for returning multiple tickers, be careful with memory considerations!\r\n # if data_frame_agg is not None:\r\n # data_frame_agg = data_frame_agg.join(data_frame_single, how='outer')\r\n # else:\r\n # data_frame_agg = data_frame_single\r\n\r\n # key = self.create_category_key(market_data_request, ticker)\r\n # fname = self.create_cache_file_name(key)\r\n # self._time_series_cache[fname] = data_frame_agg # cache in memory (disable for intraday)\r\n\r\n\r\n # If you call for returning multiple tickers, be careful with memory considerations!\r\n if data_frame_group is not None:\r\n data_frame_agg = calcuations.pandas_outer_join(data_frame_group)\r\n\r\n return data_frame_agg\r\n\r\n else:\r\n market_data_request_list = []\r\n\r\n # Create a list of MarketDataRequests\r\n for ticker in market_data_request.tickers:\r\n market_data_request_single = copy.copy(market_data_request)\r\n market_data_request_single.tickers = ticker\r\n\r\n if market_data_request.vendor_tickers is not None:\r\n market_data_request_single.vendor_tickers = [market_data_request.vendor_tickers[ticker_cycle]]\r\n ticker_cycle = ticker_cycle + 1\r\n\r\n market_data_request_list.append(market_data_request_single)\r\n\r\n return self.fetch_group_time_series(market_data_request_list)\r\n\r\n def fetch_single_time_series(self, market_data_request):\r\n\r\n market_data_request = MarketDataRequest(md_request=market_data_request)\r\n\r\n # Only includes those tickers have not expired yet!\r\n start_date = pandas.Timestamp(market_data_request.start_date).date()\r\n\r\n import datetime\r\n\r\n current_date = datetime.datetime.utcnow().date()\r\n\r\n from datetime import timedelta\r\n\r\n tickers = market_data_request.tickers\r\n vendor_tickers = market_data_request.vendor_tickers\r\n\r\n expiry_date = market_data_request.expiry_date\r\n\r\n config = ConfigManager().get_instance()\r\n\r\n # In many cases no expiry is defined so skip them\r\n for i in range(0, len(tickers)):\r\n try:\r\n expiry_date = config.get_expiry_for_ticker(market_data_request.data_source, tickers[i])\r\n except:\r\n pass\r\n\r\n if expiry_date is not None:\r\n expiry_date = pandas.Timestamp(expiry_date).date()\r\n\r\n # Use pandas Timestamp, a bit more robust with weird dates (can fail if comparing date vs datetime)\r\n # if the expiry is before the start date of our download don't bother downloading this ticker\r\n if expiry_date < start_date:\r\n tickers[i] = None\r\n\r\n # Special case for futures-contracts which are intraday\r\n # avoid downloading if the expiry date is very far in the past\r\n # (we need this before there might be odd situations where we run on an expiry date, but still want to get\r\n # data right till expiry time)\r\n if market_data_request.category == 'futures-contracts' and market_data_request.freq == 'intraday' \\\r\n and self.days_expired_intraday_contract_download > 0:\r\n\r\n if expiry_date + timedelta(days=self.days_expired_intraday_contract_download) < current_date:\r\n tickers[i] = None\r\n\r\n if vendor_tickers is not None and tickers[i] is None:\r\n vendor_tickers[i] = None\r\n\r\n market_data_request.tickers = [e for e in tickers if e != None]\r\n\r\n if vendor_tickers is not None:\r\n market_data_request.vendor_tickers = [e for e in vendor_tickers if e != None]\r\n\r\n data_frame_single = None\r\n\r\n if len(market_data_request.tickers) > 0:\r\n data_frame_single = self.get_data_vendor(market_data_request.data_source).load_ticker(market_data_request)\r\n #print(data_frame_single.head(n=10))\r\n\r\n if data_frame_single is not None:\r\n if data_frame_single.empty == False:\r\n data_frame_single.index.name = 'Date'\r\n\r\n # Will fail for DataFrames which includes dates/strings (eg. futures contract names)\r\n try:\r\n data_frame_single = data_frame_single.astype('float32')\r\n except:\r\n self.logger.warning('Could not convert to float')\r\n\r\n if market_data_request.freq == \"second\":\r\n data_frame_single = data_frame_single.resample(\"1s\")\r\n\r\n return data_frame_single\r\n\r\n def fetch_group_time_series(self, market_data_request_list):\r\n\r\n data_frame_agg = None\r\n\r\n thread_no = constants.market_thread_no['other']\r\n\r\n if market_data_request_list[0].data_source in constants.market_thread_no:\r\n thread_no = constants.market_thread_no[market_data_request_list[0].data_source]\r\n\r\n if thread_no > 0:\r\n pool = SwimPool().create_pool(thread_technique = constants.market_thread_technique, thread_no=thread_no)\r\n\r\n # Open the market data downloads in their own threads and return the results\r\n result = pool.map_async(self.fetch_single_time_series, market_data_request_list)\r\n data_frame_group = result.get()\r\n\r\n pool.close()\r\n pool.join()\r\n else:\r\n data_frame_group = []\r\n\r\n for md_request in market_data_request_list:\r\n data_frame_group.append(self.fetch_single_time_series(md_request))\r\n\r\n # Collect together all the time series\r\n if data_frame_group is not None:\r\n data_frame_group = [i for i in data_frame_group if i is not None]\r\n\r\n # For debugging!\r\n # import pickle\r\n # import datetime\r\n # pickle.dump(data_frame_group, open(str(datetime.datetime.now()).replace(':', '-').replace(' ', '-').replace(\".\", \"-\") + \".p\", \"wb\"))\r\n\r\n if data_frame_group is not None:\r\n try:\r\n data_frame_agg = self.calculations.pandas_outer_join(data_frame_group)\r\n except Exception as e:\r\n self.logger.warning('Possible overlap of columns? Have you specifed same ticker several times: ' + str(e))\r\n\r\n return data_frame_agg\r\n\r\n def download_daily(self, market_data_request):\r\n \"\"\"Loads daily time series from specified data provider\r\n\r\n Parameters\r\n ----------\r\n market_data_request : MarketDataRequest\r\n contains various properties describing time series to fetched, including ticker, start & finish date etc.\r\n\r\n Returns\r\n -------\r\n pandas.DataFrame\r\n \"\"\"\r\n\r\n key = MarketDataRequest().create_category_key(market_data_request)\r\n\r\n is_key_overriden = False\r\n\r\n for k in constants.override_multi_threading_for_categories:\r\n if k in key:\r\n is_key_overriden = True\r\n break\r\n\r\n # By default use other\r\n thread_no = constants.market_thread_no['other']\r\n\r\n if market_data_request.data_source in constants.market_thread_no:\r\n thread_no = constants.market_thread_no[market_data_request.data_source]\r\n\r\n # daily data does not include ticker in the key, as multiple tickers in the same file\r\n if thread_no == 1:\r\n # data_frame_agg = data_vendor.load_ticker(market_data_request)\r\n data_frame_agg = self.fetch_single_time_series(market_data_request)\r\n else:\r\n market_data_request_list = []\r\n \r\n # When trying your example 'equitiesdata_example' I had a -1 result so it went out of the comming loop and I had errors in execution\r\n group_size = max(int(len(market_data_request.tickers) / thread_no - 1),0)\r\n\r\n if group_size == 0: group_size = 1\r\n\r\n # Split up tickers into groups related to number of threads to call\r\n for i in range(0, len(market_data_request.tickers), group_size):\r\n market_data_request_single = copy.copy(market_data_request)\r\n market_data_request_single.tickers = market_data_request.tickers[i:i + group_size]\r\n\r\n if market_data_request.vendor_tickers is not None:\r\n market_data_request_single.vendor_tickers = \\\r\n market_data_request.vendor_tickers[i:i + group_size]\r\n\r\n market_data_request_list.append(market_data_request_single)\r\n\r\n # Special case where we make smaller calls one after the other\r\n if is_key_overriden:\r\n\r\n data_frame_list = []\r\n\r\n for md in market_data_request_list:\r\n data_frame_list.append(self.fetch_single_time_series(md))\r\n\r\n data_frame_agg = self.calculations.pandas_outer_join(data_frame_list)\r\n else:\r\n data_frame_agg = self.fetch_group_time_series(market_data_request_list)\r\n\r\n # fname = self.create_cache_file_name(key)\r\n # self._time_series_cache[fname] = data_frame_agg # cache in memory (ok for daily data)\r\n\r\n return data_frame_agg\r\n\r\n def refine_expiry_date(self, market_data_request):\r\n\r\n # Expiry date\r\n if market_data_request.expiry_date is None:\r\n ConfigManager().get_instance().get_expiry_for_ticker(market_data_request.data_source, market_data_request.ticker)\r\n\r\n return market_data_request\r\n\r\n def create_cache_file_name(self, filename):\r\n return constants.folder_time_series_data + \"/\" + filename\r\n"
] |
[
[
"numpy.any",
"numpy.array",
"numpy.linalg.inv"
],
[
"pandas.concat",
"pandas.io.common.get_filepath_or_buffer"
],
[
"numpy.array"
],
[
"numpy.all"
],
[
"pandas.Timestamp"
]
] |
gngdb/smplx
|
[
"ba72def2038712784458a91f94371de6550d7e65"
] |
[
"transfer_model/merge_output.py"
] |
[
"# merges the output of the main transfer_model script\n\nimport torch\nfrom pathlib import Path\nimport pickle\nfrom scipy.spatial.transform import Rotation as R\n\nKEYS = [\n\"transl\",\n\"global_orient\",\n\"body_pose\",\n\"betas\",\n\"left_hand_pose\",\n\"right_hand_pose\",\n\"jaw_pose\",\n\"leye_pose\",\n\"reye_pose\",\n\"expression\",\n\"vertices\",\n\"joints\",\n\"full_pose\",\n\"v_shaped\",\n\"faces\"\n]\n\nIGNORED_KEYS = [\n\"vertices\",\n\"faces\",\n\"v_shaped\"\n]\n\ndef aggregate_rotmats(x):\n x = torch.cat(x, dim=0).detach().numpy()\n s = x.shape[:-2]\n x = R.from_matrix(x.reshape(-1, 3, 3)).as_rotvec()\n x = x.reshape(s[0], -1)\n return x\n\naggregate_function = {k: lambda x: torch.cat(x, 0).detach().numpy() for k in KEYS}\naggregate_function[\"betas\"] = lambda x: torch.cat(x, 0).mean(0).detach().numpy()\n\nfor k in [\"global_orient\", \"body_pose\", \"left_hand_pose\", \"right_hand_pose\", \"jaw_pose\", \"full_pose\"]:\n aggregate_function[k] = aggregate_rotmats\n\ndef merge(output_dir, gender):\n output_dir = Path(output_dir)\n assert output_dir.exists()\n assert output_dir.is_dir()\n\n # get list of all pkl files in output_dir with fixed length numeral names\n pkl_files = [f for f in output_dir.glob(\"*.pkl\") if f.stem != \"merged\"]\n pkl_files = [f for f in sorted(pkl_files, key=lambda x: int(x.stem))]\n assert \"merged.pkl\" not in [f.name for f in pkl_files]\n\n merged = {}\n # iterate over keys and put all values in lists\n keys = set(KEYS) - set(IGNORED_KEYS)\n for k in keys:\n merged[k] = []\n for pkl_file in pkl_files:\n with open(pkl_file, \"rb\") as f:\n data = pickle.load(f)\n for k in keys:\n if k in data:\n merged[k].append(data[k])\n b = torch.cat(merged[\"betas\"], 0)\n print(\"betas:\")\n for mu, sigma in zip(b.mean(0), b.std(0)):\n print(\" {:.3f} +/- {:.3f}\".format(mu, sigma))\n\n # aggregate all values\n for k in keys:\n merged[k] = aggregate_function[k](merged[k])\n\n # add gender\n merged[\"gender\"] = gender\n\n # save merged data to same output_dir\n with open(output_dir / \"merged.pkl\", \"wb\") as f:\n pickle.dump(merged, f)\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Merge output of transfer_model script')\n parser.add_argument('output_dir', type=str, help='output directory of transfer_model script')\n parser.add_argument('--gender', type=str, choices=['male', 'female', 'neutral'], help='gender of actor in motion sequence')\n args = parser.parse_args()\n merge(args.output_dir, args.gender)\n"
] |
[
[
"torch.cat"
]
] |
kolojoe/scipy
|
[
"3b4a30cbf580a1ea07bf48abf4bbea708b2018dd"
] |
[
"scipy/stats/tests/test_distributions.py"
] |
[
"\"\"\"\nTest functions for stats module\n\"\"\"\n\nimport warnings\nimport re\nimport sys\nimport pickle\nimport os\n\nfrom numpy.testing import (assert_equal, assert_array_equal,\n assert_almost_equal, assert_array_almost_equal,\n assert_allclose, assert_, assert_warns,\n assert_array_less, suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\n\nimport numpy\nimport numpy as np\nfrom numpy import typecodes, array\nfrom numpy.lib.recfunctions import rec_append_fields\nfrom scipy import special\nfrom scipy._lib._util import check_random_state\nfrom scipy.integrate import IntegrationWarning\nimport scipy.stats as stats\nfrom scipy.stats._distn_infrastructure import argsreduce\nimport scipy.stats.distributions\n\nfrom scipy.special import xlogy\nfrom .test_continuous_basic import distcont\nfrom scipy.stats._continuous_distns import FitDataError\n\n# python -OO strips docstrings\nDOCSTRINGS_STRIPPED = sys.flags.optimize > 1\n\n\ndef _assert_hasattr(a, b, msg=None):\n if msg is None:\n msg = '%s does not have attribute %s' % (a, b)\n assert_(hasattr(a, b), msg=msg)\n\n\ndef test_api_regression():\n # https://github.com/scipy/scipy/issues/3802\n _assert_hasattr(scipy.stats.distributions, 'f_gen')\n\n\ndef check_vonmises_pdf_periodic(k, L, s, x):\n vm = stats.vonmises(k, loc=L, scale=s)\n assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))\n\n\ndef check_vonmises_cdf_periodic(k, L, s, x):\n vm = stats.vonmises(k, loc=L, scale=s)\n assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)\n\n\ndef test_vonmises_pdf_periodic():\n for k in [0.1, 1, 101]:\n for x in [0, 1, numpy.pi, 10, 100]:\n check_vonmises_pdf_periodic(k, 0, 1, x)\n check_vonmises_pdf_periodic(k, 1, 1, x)\n check_vonmises_pdf_periodic(k, 0, 10, x)\n\n check_vonmises_cdf_periodic(k, 0, 1, x)\n check_vonmises_cdf_periodic(k, 1, 1, x)\n check_vonmises_cdf_periodic(k, 0, 10, x)\n\n\ndef test_vonmises_line_support():\n assert_equal(stats.vonmises_line.a, -np.pi)\n assert_equal(stats.vonmises_line.b, np.pi)\n\n\ndef test_vonmises_numerical():\n vm = stats.vonmises(800)\n assert_almost_equal(vm.cdf(0), 0.5)\n\n\ndef _assert_lessthan_loglike(dist, data, func, **kwds):\n mle_analytical = dist.fit(data, **kwds)\n numerical_opt = super(type(dist), dist).fit(data, **kwds)\n ll_mle_analytical = func(mle_analytical, data)\n ll_numerical_opt = func(numerical_opt, data)\n assert ll_mle_analytical < ll_numerical_opt\n\n\ndef assert_fit_warnings(dist):\n param = ['floc', 'fscale']\n if dist.shapes:\n nshapes = len(dist.shapes.split(\",\"))\n param += ['f0', 'f1', 'f2'][:nshapes]\n all_fixed = dict(zip(param, np.arange(len(param))))\n data = [1, 2, 3]\n with pytest.raises(RuntimeError,\n match=\"All parameters fixed. There is nothing \"\n \"to optimize.\"):\n dist.fit(data, **all_fixed)\n with pytest.raises(RuntimeError,\n match=\"The data contains non-finite values\"):\n dist.fit([np.nan])\n with pytest.raises(RuntimeError,\n match=\"The data contains non-finite values\"):\n dist.fit([np.inf])\n with pytest.raises(TypeError, match=\"Unknown keyword arguments:\"):\n dist.fit(data, extra_keyword=2)\n with pytest.raises(TypeError, match=\"Too many positional arguments.\"):\n dist.fit(data, *[1]*(len(param) - 1))\n\n\n@pytest.mark.parametrize('dist',\n ['alpha', 'betaprime',\n 'fatiguelife', 'invgamma', 'invgauss', 'invweibull',\n 'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',\n 'powerlognorm', 'rayleigh', 'wald'])\ndef test_support(dist):\n \"\"\"gh-6235\"\"\"\n dct = dict(distcont)\n args = dct[dist]\n\n dist = getattr(stats, dist)\n\n assert_almost_equal(dist.pdf(dist.a, *args), 0)\n assert_equal(dist.logpdf(dist.a, *args), -np.inf)\n assert_almost_equal(dist.pdf(dist.b, *args), 0)\n assert_equal(dist.logpdf(dist.b, *args), -np.inf)\n\n\nclass TestRandInt(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.randint.rvs(5, 30, size=100)\n assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))\n assert_(len(vals) == 100)\n vals = stats.randint.rvs(5, 30, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.randint.rvs(15, 46)\n assert_((val >= 15) & (val < 46))\n assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))\n val = stats.randint(15, 46).rvs(3)\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pdf(self):\n k = numpy.r_[0:36]\n out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)\n vals = stats.randint.pmf(k, 5, 30)\n assert_array_almost_equal(vals, out)\n\n def test_cdf(self):\n x = np.linspace(0, 36, 100)\n k = numpy.floor(x)\n out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)\n vals = stats.randint.cdf(x, 5, 30)\n assert_array_almost_equal(vals, out, decimal=12)\n\n\nclass TestBinom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.binom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.binom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.binom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for Ticket #1842\n vals1 = stats.binom.pmf(100, 100, 1)\n vals2 = stats.binom.pmf(0, 100, 0)\n assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)\n assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)\n\n def test_entropy(self):\n # Basic entropy tests.\n b = stats.binom(2, 0.5)\n expected_p = np.array([0.25, 0.5, 0.25])\n expected_h = -sum(xlogy(expected_p, expected_p))\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.binom(2, 0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.binom(2, 1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n def test_warns_p0(self):\n # no spurious warnigns are generated for p=0; gh-3817\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n assert_equal(stats.binom(n=2, p=0).mean(), 0)\n assert_equal(stats.binom(n=2, p=0).std(), 0)\n\n\nclass TestBernoulli(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.bernoulli.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.bernoulli.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.bernoulli(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_entropy(self):\n # Simple tests of entropy.\n b = stats.bernoulli(0.25)\n expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.bernoulli(0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.bernoulli(1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestBradford(object):\n # gh-6216\n def test_cdf_ppf(self):\n c = 0.1\n x = np.logspace(-20, -4)\n q = stats.bradford.cdf(x, c)\n xx = stats.bradford.ppf(q, c)\n assert_allclose(x, xx)\n\n\nclass TestNBinom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.nbinom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.nbinom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for ticket 1779\n assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),\n stats.nbinom.pmf(700, 721, 0.52))\n # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)\n val = scipy.stats.nbinom.logpmf(0, 1, 1)\n assert_equal(val, 0)\n\n\nclass TestGenInvGauss(object):\n def setup_method(self):\n np.random.seed(1234)\n\n @pytest.mark.slow\n def test_rvs_with_mode_shift(self):\n # ratio_unif w/ mode shift\n gig = stats.geninvgauss(2.3, 1.5)\n _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)\n assert_equal(p > 0.05, True)\n\n @pytest.mark.slow\n def test_rvs_without_mode_shift(self):\n # ratio_unif w/o mode shift\n gig = stats.geninvgauss(0.9, 0.75)\n _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)\n assert_equal(p > 0.05, True)\n\n @pytest.mark.slow\n def test_rvs_new_method(self):\n # new algorithm of Hoermann / Leydold\n gig = stats.geninvgauss(0.1, 0.2)\n _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)\n assert_equal(p > 0.05, True)\n\n @pytest.mark.slow\n def test_rvs_p_zero(self):\n def my_ks_check(p, b):\n gig = stats.geninvgauss(p, b)\n rvs = gig.rvs(size=1500, random_state=1234)\n return stats.kstest(rvs, gig.cdf)[1] > 0.05\n # boundary cases when p = 0\n assert_equal(my_ks_check(0, 0.2), True) # new algo\n assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift\n assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift\n\n def test_rvs_negative_p(self):\n # if p negative, return inverse\n assert_equal(\n stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),\n 1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))\n\n def test_invgauss(self):\n # test that invgauss is special case\n ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)\n assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)\n # test pdf and cdf\n mu, x = 100, np.linspace(0.01, 1, 10)\n pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)\n assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))\n cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)\n assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))\n\n def test_pdf_R(self):\n # test against R package GIGrvg\n # x <- seq(0.01, 5, length.out = 10)\n # GIGrvg::dgig(x, 0.5, 1, 1)\n vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,\n 2.693297528e-01, 1.905637275e-01, 1.351476913e-01,\n 9.636538981e-02, 6.909040154e-02, 4.978006801e-02,\n 3.602084467e-02])\n x = np.linspace(0.01, 5, 10)\n assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))\n\n def test_pdf_zero(self):\n # pdf at 0 is 0, needs special treatment to avoid 1/x in pdf\n assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)\n # if x is large and p is moderate, make sure that pdf does not\n # overflow because of x**(p-1); exp(-b*x) forces pdf to zero\n assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)\n\n\nclass TestNormInvGauss(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_cdf_R(self):\n # test pdf and cdf vals against R\n # require(\"GeneralizedHyperbolic\")\n # x_test <- c(-7, -5, 0, 8, 15)\n # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)\n # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)\n r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,\n 9.988650664e-01, 9.999848769e-01])\n x_test = np.array([-7, -5, 0, 8, 15])\n vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)\n assert_allclose(vals_cdf, r_cdf, atol=1e-9)\n\n def test_pdf_R(self):\n # values from R as defined in test_cdf_R\n r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,\n 7.450485342e-04, 8.917889931e-06])\n x_test = np.array([-7, -5, 0, 8, 15])\n vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)\n assert_allclose(vals_pdf, r_pdf, atol=1e-9)\n\n def test_stats(self):\n a, b = 1, 0.5\n gamma = np.sqrt(a**2 - b**2)\n v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),\n 3.0 * (1 + 4 * b**2 / a**2) / gamma)\n assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))\n\n def test_ppf(self):\n a, b = 1, 0.5\n x_test = np.array([0.001, 0.5, 0.999])\n vals = stats.norminvgauss.ppf(x_test, a, b)\n assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))\n\n\nclass TestGeom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.geom.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.geom.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.geom(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n vals = stats.geom.pmf([1, 2, 3], 0.5)\n assert_array_almost_equal(vals, [0.5, 0.25, 0.125])\n\n def test_logpmf(self):\n # regression test for ticket 1793\n vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))\n vals2 = stats.geom.logpmf([1, 2, 3], 0.5)\n assert_allclose(vals1, vals2, rtol=1e-15, atol=0)\n\n # regression test for gh-4028\n val = stats.geom.logpmf(1, 1)\n assert_equal(val, 0.0)\n\n def test_cdf_sf(self):\n vals = stats.geom.cdf([1, 2, 3], 0.5)\n vals_sf = stats.geom.sf([1, 2, 3], 0.5)\n expected = array([0.5, 0.75, 0.875])\n assert_array_almost_equal(vals, expected)\n assert_array_almost_equal(vals_sf, 1-expected)\n\n def test_logcdf_logsf(self):\n vals = stats.geom.logcdf([1, 2, 3], 0.5)\n vals_sf = stats.geom.logsf([1, 2, 3], 0.5)\n expected = array([0.5, 0.75, 0.875])\n assert_array_almost_equal(vals, np.log(expected))\n assert_array_almost_equal(vals_sf, np.log1p(-expected))\n\n def test_ppf(self):\n vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)\n expected = array([1.0, 2.0, 3.0])\n assert_array_almost_equal(vals, expected)\n\n def test_ppf_underflow(self):\n # this should not underflow\n assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)\n\n\nclass TestPlanck(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_sf(self):\n vals = stats.planck.sf([1, 2, 3], 5.)\n expected = array([4.5399929762484854e-05,\n 3.0590232050182579e-07,\n 2.0611536224385579e-09])\n assert_array_almost_equal(vals, expected)\n\n def test_logsf(self):\n vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)\n expected = array([-1001000., -2001000., -3001000.])\n assert_array_almost_equal(vals, expected)\n\n\nclass TestGennorm(object):\n def test_laplace(self):\n # test against Laplace (special case for beta=1)\n points = [1, 2, 3]\n pdf1 = stats.gennorm.pdf(points, 1)\n pdf2 = stats.laplace.pdf(points)\n assert_almost_equal(pdf1, pdf2)\n\n def test_norm(self):\n # test against normal (special case for beta=2)\n points = [1, 2, 3]\n pdf1 = stats.gennorm.pdf(points, 2)\n pdf2 = stats.norm.pdf(points, scale=2**-.5)\n assert_almost_equal(pdf1, pdf2)\n\n\nclass TestHalfgennorm(object):\n def test_expon(self):\n # test against exponential (special case for beta=1)\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, 1)\n pdf2 = stats.expon.pdf(points)\n assert_almost_equal(pdf1, pdf2)\n\n def test_halfnorm(self):\n # test against half normal (special case for beta=2)\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, 2)\n pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)\n assert_almost_equal(pdf1, pdf2)\n\n def test_gennorm(self):\n # test against generalized normal\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, .497324)\n pdf2 = stats.gennorm.pdf(points, .497324)\n assert_almost_equal(pdf1, 2*pdf2)\n\n\nclass TestTruncnorm(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_ppf_ticket1131(self):\n vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_isf_ticket1131(self):\n vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_gh_2477_small_values(self):\n # Check a case that worked in the original issue.\n low, high = -11, -10\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n # Check a case that failed in the original issue.\n low, high = 10, 11\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_2477_large_values(self):\n # Check a case that used to fail because of extreme tailness.\n low, high = 100, 101\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low <= x.min() <= x.max() <= high), str([low, high, x])\n\n # Check some additional extreme tails\n low, high = 1000, 1001\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n low, high = 10000, 10001\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n low, high = -10001, -10000\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_9403_nontail_values(self):\n for low, high in [[3, 4], [-4, -3]]:\n xvals = np.array([-np.inf, low, high, np.inf])\n xmid = (high+low)/2.0\n cdfs = stats.truncnorm.cdf(xvals, low, high)\n sfs = stats.truncnorm.sf(xvals, low, high)\n pdfs = stats.truncnorm.pdf(xvals, low, high)\n expected_cdfs = np.array([0, 0, 1, 1])\n expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])\n expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])\n if low < 0:\n expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])\n assert_almost_equal(cdfs, expected_cdfs)\n assert_almost_equal(sfs, expected_sfs)\n assert_almost_equal(pdfs, expected_pdfs)\n assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),\n low + 0.5)\n pvals = np.array([0, 0.5, 1.0])\n ppfs = stats.truncnorm.ppf(pvals, low, high)\n expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])\n assert_almost_equal(ppfs, expected_ppfs)\n\n if low < 0:\n assert_almost_equal(stats.truncnorm.sf(xmid, low, high),\n 0.8475544278436675)\n assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),\n 0.1524455721563326)\n else:\n assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),\n 0.8475544278436675)\n assert_almost_equal(stats.truncnorm.sf(xmid, low, high),\n 0.1524455721563326)\n pdf = stats.truncnorm.pdf(xmid, low, high)\n assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)\n\n def test_gh_9403_medium_tail_values(self):\n for low, high in [[39, 40], [-40, -39]]:\n xvals = np.array([-np.inf, low, high, np.inf])\n xmid = (high+low)/2.0\n cdfs = stats.truncnorm.cdf(xvals, low, high)\n sfs = stats.truncnorm.sf(xvals, low, high)\n pdfs = stats.truncnorm.pdf(xvals, low, high)\n expected_cdfs = np.array([0, 0, 1, 1])\n expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])\n expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])\n if low < 0:\n expected_pdfs = np.array([0, 2.73349092e-16,\n 3.90256074e+01, 0])\n assert_almost_equal(cdfs, expected_cdfs)\n assert_almost_equal(sfs, expected_sfs)\n assert_almost_equal(pdfs, expected_pdfs)\n assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),\n low + 0.5)\n pvals = np.array([0, 0.5, 1.0])\n ppfs = stats.truncnorm.ppf(pvals, low, high)\n expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])\n assert_almost_equal(ppfs, expected_ppfs)\n cdfs = stats.truncnorm.cdf(ppfs, low, high)\n assert_almost_equal(cdfs, pvals)\n\n if low < 0:\n assert_almost_equal(stats.truncnorm.sf(xmid, low, high),\n 0.9999999970389126)\n assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),\n 2.961048103554866e-09)\n else:\n assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),\n 0.9999999970389126)\n assert_almost_equal(stats.truncnorm.sf(xmid, low, high),\n 2.961048103554866e-09)\n pdf = stats.truncnorm.pdf(xmid, low, high)\n assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)\n\n xvals = np.linspace(low, high, 11)\n xvals2 = -xvals[::-1]\n assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),\n stats.truncnorm.sf(xvals2, -high, -low)[::-1])\n assert_almost_equal(stats.truncnorm.sf(xvals, low, high),\n stats.truncnorm.cdf(xvals2, -high, -low)[::-1])\n assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),\n stats.truncnorm.pdf(xvals2, -high, -low)[::-1])\n\n def _test_moments_one_range(self, a, b, expected, decimal_s=7):\n m0, v0, s0, k0 = expected[:4]\n m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')\n assert_almost_equal(m, m0)\n assert_almost_equal(v, v0)\n assert_almost_equal(s, s0, decimal=decimal_s)\n assert_almost_equal(k, k0)\n\n @pytest.mark.xfail_on_32bit(\"reduced accuracy with 32bit platforms.\")\n def test_moments(self):\n # Values validated by changing TRUNCNORM_TAIL_X so as to evaluate\n # using both the _norm_XXX() and _norm_logXXX() functions, and by\n # removing the _stats and _munp methods in truncnorm tp force\n # numerical quadrature.\n # For m,v,s,k expect k to have the largest error as it is\n # constructed from powers of lower moments\n\n self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])\n self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])\n self._test_moments_one_range(-3, 3, [0.0, 0.9733369246625415,\n 0.0, -0.1711144363977444])\n self._test_moments_one_range(-2, 2, [0.0, 0.7737413035499232,\n 0.0, -0.6344632828703505])\n\n self._test_moments_one_range(0, np.inf, [0.7978845608028654,\n 0.3633802276324186,\n 0.9952717464311565,\n 0.8691773036059725])\n self._test_moments_one_range(-np.inf, 0, [-0.7978845608028654,\n 0.3633802276324186,\n -0.9952717464311565,\n 0.8691773036059725])\n\n self._test_moments_one_range(-1, 3, [0.2827861107271540,\n 0.6161417353578292,\n 0.5393018494027878,\n -0.2058206513527461])\n self._test_moments_one_range(-3, 1, [-0.2827861107271540,\n 0.6161417353578292,\n -0.5393018494027878,\n -0.2058206513527461])\n\n self._test_moments_one_range(-10, -9, [-9.1084562880124764,\n 0.0114488058210104,\n -1.8985607337519652,\n 5.0733457094223553])\n self._test_moments_one_range(-20, -19, [-19.0523439459766628,\n 0.0027250730180314,\n -1.9838694022629291,\n 5.8717850028287586])\n self._test_moments_one_range(-30, -29, [-29.0344012377394698,\n 0.0011806603928891,\n -1.9930304534611458,\n 5.8854062968996566],\n decimal_s=6)\n self._test_moments_one_range(-40, -39, [-39.0256074199326264,\n 0.0006548826719649,\n -1.9963146354109957,\n 5.6167758371700494])\n self._test_moments_one_range(39, 40, [39.0256074199326264,\n 0.0006548826719649,\n 1.9963146354109957,\n 5.6167758371700494])\n\n def test_9902_moments(self):\n m, v = stats.truncnorm.stats(0, np.inf, moments='mv')\n assert_almost_equal(m, 0.79788456)\n assert_almost_equal(v, 0.36338023)\n\n def test_gh_1489_trac_962_rvs(self):\n # Check the original example.\n low, high = 10, 15\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_11299_rvs(self):\n # Arose from investigating gh-11299\n # Test multiple shape parameters simultaneously.\n low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]\n high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]\n x = stats.truncnorm.rvs(low, high, size=(5, len(low)))\n assert np.shape(x) == (5, len(low))\n assert_(np.all(low <= x.min(axis=0)))\n assert_(np.all(x.max(axis=0) <= high))\n\n def test_rvs_Generator(self):\n # check that rvs can use a Generator\n if hasattr(np.random, \"default_rng\"):\n stats.truncnorm.rvs(-10, -5, size=5,\n random_state=np.random.default_rng())\n\n\nclass TestGenLogistic:\n\n # Expected values computed with mpmath with 50 digits of precision.\n @pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),\n (-125, -187.09453489189184),\n (0, -1.3274028432916989),\n (100, -99.59453489189184),\n (1000, -999.5945348918918)])\n def test_logpdf(self, x, expected):\n c = 1.5\n logp = stats.genlogistic.logpdf(x, c)\n assert_allclose(logp, expected, rtol=1e-13)\n\n\nclass TestHypergeom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))\n assert_(numpy.all(vals >= 0) &\n numpy.all(vals <= 3))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.hypergeom.rvs(20, 3, 10)\n assert_(isinstance(val, int))\n val = stats.hypergeom(20, 3, 10).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_precision(self):\n # comparison number from mpmath\n M = 2500\n n = 50\n N = 500\n tot = M\n good = n\n hgpmf = stats.hypergeom.pmf(2, tot, good, N)\n assert_almost_equal(hgpmf, 0.0010114963068932233, 11)\n\n def test_args(self):\n # test correct output for corner cases of arguments\n # see gh-2325\n assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)\n assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)\n\n assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)\n assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)\n\n def test_cdf_above_one(self):\n # for some values of parameters, hypergeom cdf was >1, see gh-2238\n assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)\n\n def test_precision2(self):\n # Test hypergeom precision for large numbers. See #1218.\n # Results compared with those from R.\n oranges = 9.9e4\n pears = 1.1e5\n fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4\n quantile = 2e4\n res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)\n for eaten in fruits_eaten]\n expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,\n 8.265601e-11, 0.1237904, 1])\n assert_allclose(res, expected, atol=0, rtol=5e-7)\n\n # Test with array_like first argument\n quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]\n res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)\n expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]\n assert_allclose(res2, expected2, atol=0, rtol=5e-7)\n\n def test_entropy(self):\n # Simple tests of entropy.\n hg = stats.hypergeom(4, 1, 1)\n h = hg.entropy()\n expected_p = np.array([0.75, 0.25])\n expected_h = -np.sum(xlogy(expected_p, expected_p))\n assert_allclose(h, expected_h)\n\n hg = stats.hypergeom(1, 1, 1)\n h = hg.entropy()\n assert_equal(h, 0.0)\n\n def test_logsf(self):\n # Test logsf for very large numbers. See issue #4982\n # Results compare with those from R (v3.2.0):\n # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)\n # -2239.771\n\n k = 1e4\n M = 1e7\n n = 1e6\n N = 5e4\n\n result = stats.hypergeom.logsf(k, M, n, N)\n expected = -2239.771 # From R\n assert_almost_equal(result, expected, decimal=3)\n\n k = 1\n M = 1600\n n = 600\n N = 300\n\n result = stats.hypergeom.logsf(k, M, n, N)\n expected = -2.566567e-68 # From R\n assert_almost_equal(result, expected, decimal=15)\n\n def test_logcdf(self):\n # Test logcdf for very large numbers. See issue #8692\n # Results compare with those from R (v3.3.2):\n # phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)\n # -5273.335\n\n k = 1\n M = 1e7\n n = 1e6\n N = 5e4\n\n result = stats.hypergeom.logcdf(k, M, n, N)\n expected = -5273.335 # From R\n assert_almost_equal(result, expected, decimal=3)\n\n # Same example as in issue #8692\n k = 40\n M = 1600\n n = 50\n N = 300\n\n result = stats.hypergeom.logcdf(k, M, n, N)\n expected = -7.565148879229e-23 # From R\n assert_almost_equal(result, expected, decimal=15)\n\n k = 125\n M = 1600\n n = 250\n N = 500\n\n result = stats.hypergeom.logcdf(k, M, n, N)\n expected = -4.242688e-12 # From R\n assert_almost_equal(result, expected, decimal=15)\n\n # test broadcasting robustness based on reviewer\n # concerns in PR 9603; using an array version of\n # the example from issue #8692\n k = np.array([40, 40, 40])\n M = 1600\n n = 50\n N = 300\n\n result = stats.hypergeom.logcdf(k, M, n, N)\n expected = np.full(3, -7.565148879229e-23) # filled from R result\n assert_almost_equal(result, expected, decimal=15)\n\n\nclass TestLoggamma(object):\n\n def test_stats(self):\n # The following precomputed values are from the table in section 2.2\n # of \"A Statistical Study of Log-Gamma Distribution\", by Ping Shing\n # Chan (thesis, McMaster University, 1993).\n table = np.array([\n # c, mean, var, skew, exc. kurt.\n 0.5, -1.9635, 4.9348, -1.5351, 4.0000,\n 1.0, -0.5772, 1.6449, -1.1395, 2.4000,\n 12.0, 2.4427, 0.0869, -0.2946, 0.1735,\n ]).reshape(-1, 5)\n for c, mean, var, skew, kurt in table:\n computed = stats.loggamma.stats(c, moments='msvk')\n assert_array_almost_equal(computed, [mean, var, skew, kurt],\n decimal=4)\n\n\nclass TestLogistic(object):\n # gh-6226\n def test_cdf_ppf(self):\n x = np.linspace(-20, 20)\n y = stats.logistic.cdf(x)\n xx = stats.logistic.ppf(y)\n assert_allclose(x, xx)\n\n def test_sf_isf(self):\n x = np.linspace(-20, 20)\n y = stats.logistic.sf(x)\n xx = stats.logistic.isf(y)\n assert_allclose(x, xx)\n\n def test_extreme_values(self):\n # p is chosen so that 1 - (1 - p) == p in double precision\n p = 9.992007221626409e-16\n desired = 34.53957599234088\n assert_allclose(stats.logistic.ppf(1 - p), desired)\n assert_allclose(stats.logistic.isf(p), desired)\n\n def test_logpdf_basic(self):\n logp = stats.logistic.logpdf([-15, 0, 10])\n # Expected values computed with mpmath with 50 digits of precision.\n expected = [-15.000000611804547,\n -1.3862943611198906,\n -10.000090797798434]\n assert_allclose(logp, expected, rtol=1e-13)\n\n def test_logpdf_extreme_values(self):\n logp = stats.logistic.logpdf([800, -800])\n # For such large arguments, logpdf(x) = -abs(x) when computed\n # with 64 bit floating point.\n assert_equal(logp, [-800, -800])\n\n\nclass TestLogser(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.logser.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.logser.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.logser(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf_small_p(self):\n m = stats.logser.pmf(4, 1e-20)\n # The expected value was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.mp.dps = 64\n # >>> k = 4\n # >>> p = mpmath.mpf('1e-20')\n # >>> float(-(p**k)/k/mpmath.log(1-p))\n # 2.5e-61\n # It is also clear from noticing that for very small p,\n # log(1-p) is approximately -p, and the formula becomes\n # p**(k-1) / k\n assert_allclose(m, 2.5e-61)\n\n def test_mean_small_p(self):\n m = stats.logser.mean(1e-8)\n # The expected mean was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.dps = 60\n # >>> p = mpmath.mpf('1e-8')\n # >>> float(-p / ((1 - p)*mpmath.log(1 - p)))\n # 1.000000005\n assert_allclose(m, 1.000000005)\n\n\nclass TestPareto(object):\n def test_stats(self):\n # Check the stats() method with some simple values. Also check\n # that the calculations do not trigger RuntimeWarnings.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')\n assert_equal(m, 3.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')\n assert_equal(m, 2.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')\n assert_allclose(m, 2.5 / 1.5)\n assert_allclose(v, 2.5 / (1.5*1.5*0.5))\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')\n assert_allclose(m, 1.5)\n assert_allclose(v, 0.75)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')\n assert_allclose(m, 3.5 / 2.5)\n assert_allclose(v, 3.5 / (2.5*2.5*1.5))\n assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')\n assert_allclose(m, 4.0 / 3.0)\n assert_allclose(v, 4.0 / 18.0)\n assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')\n assert_allclose(m, 4.5 / 3.5)\n assert_allclose(v, 4.5 / (3.5*3.5*2.5))\n assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))\n assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))\n\n def test_sf(self):\n x = 1e9\n b = 2\n scale = 1.5\n p = stats.pareto.sf(x, b, loc=0, scale=scale)\n expected = (scale/x)**b # 2.25e-18\n assert_allclose(p, expected)\n\n @pytest.mark.filterwarnings(\"ignore:invalid value encountered in \"\n \"double_scalars\")\n @pytest.mark.parametrize(\"rvs_shape\", [1, 2])\n @pytest.mark.parametrize(\"rvs_loc\", [0, 2])\n @pytest.mark.parametrize(\"rvs_scale\", [1, 5])\n def test_fit(self, rvs_shape, rvs_loc, rvs_scale):\n data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,\n loc=rvs_loc)\n\n # shape can still be fixed with multiple names\n shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]\n shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]\n shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]\n assert (shape_mle_analytical1 == shape_mle_analytical2 ==\n shape_mle_analytical3 == 1.04)\n\n # data can be shifted with changes to `loc`\n data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,\n loc=(rvs_loc + 2))\n shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)\n assert_equal(scale_mle_a + 2, data.min())\n assert_equal(shape_mle_a, 1/((1/len(data - 2)) *\n np.sum(np.log((data\n - 2)/(data.min() - 2)))))\n assert_equal(loc_mle_a, 2)\n\n @pytest.mark.filterwarnings(\"ignore:invalid value encountered in \"\n \"double_scalars\")\n @pytest.mark.parametrize(\"rvs_shape\", [1, 2])\n @pytest.mark.parametrize(\"rvs_loc\", [0, 2])\n @pytest.mark.parametrize(\"rvs_scale\", [1, 5])\n def test_fit_MLE_comp_optimzer(self, rvs_shape, rvs_loc, rvs_scale):\n data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,\n loc=rvs_loc)\n args = [data, (stats.pareto._fitstart(data), )]\n func = stats.pareto._reduce_func(args, {})[1]\n\n # fixed `floc` to actual location provides a better fit than the\n # super method\n _assert_lessthan_loglike(stats.pareto, data, func, floc=rvs_loc)\n\n # fixing `floc` to an arbitrary number, 0, still provides a better\n # fit than the super method\n _assert_lessthan_loglike(stats.pareto, data, func, floc=0)\n\n # fixed shape still uses MLE formula and provides a better fit than\n # the super method\n _assert_lessthan_loglike(stats.pareto, data, func, floc=0, f0=4)\n\n # valid fixed fscale still uses MLE formulas and provides a better\n # fit than the super method\n _assert_lessthan_loglike(stats.pareto, data, func, floc=0,\n fscale=rvs_scale/2)\n\n def test_fit_warnings(self):\n assert_fit_warnings(stats.pareto)\n # `floc` that causes invalid negative data\n assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)\n # `floc` and `fscale` combination causes invalid data\n assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,\n fscale=3)\n\n\nclass TestGenpareto(object):\n def test_ab(self):\n # c >= 0: a, b = [0, inf]\n for c in [1., 0.]:\n c = np.asarray(c)\n a, b = stats.genpareto._get_support(c)\n assert_equal(a, 0.)\n assert_(np.isposinf(b))\n\n # c < 0: a=0, b=1/|c|\n c = np.asarray(-2.)\n a, b = stats.genpareto._get_support(c)\n assert_allclose([a, b], [0., 0.5])\n\n def test_c0(self):\n # with c=0, genpareto reduces to the exponential distribution\n # rv = stats.genpareto(c=0.)\n rv = stats.genpareto(c=0.)\n x = np.linspace(0, 10., 30)\n assert_allclose(rv.pdf(x), stats.expon.pdf(x))\n assert_allclose(rv.cdf(x), stats.expon.cdf(x))\n assert_allclose(rv.sf(x), stats.expon.sf(x))\n\n q = np.linspace(0., 1., 10)\n assert_allclose(rv.ppf(q), stats.expon.ppf(q))\n\n def test_cm1(self):\n # with c=-1, genpareto reduces to the uniform distr on [0, 1]\n rv = stats.genpareto(c=-1.)\n x = np.linspace(0, 10., 30)\n assert_allclose(rv.pdf(x), stats.uniform.pdf(x))\n assert_allclose(rv.cdf(x), stats.uniform.cdf(x))\n assert_allclose(rv.sf(x), stats.uniform.sf(x))\n\n q = np.linspace(0., 1., 10)\n assert_allclose(rv.ppf(q), stats.uniform.ppf(q))\n\n # logpdf(1., c=-1) should be zero\n assert_allclose(rv.logpdf(1), 0)\n\n def test_x_inf(self):\n # make sure x=inf is handled gracefully\n rv = stats.genpareto(c=0.1)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n rv = stats.genpareto(c=0.)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n rv = stats.genpareto(c=-1.)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n def test_c_continuity(self):\n # pdf is continuous at c=0, -1\n x = np.linspace(0, 10, 30)\n for c in [0, -1]:\n pdf0 = stats.genpareto.pdf(x, c)\n for dc in [1e-14, -1e-14]:\n pdfc = stats.genpareto.pdf(x, c + dc)\n assert_allclose(pdf0, pdfc, atol=1e-12)\n\n cdf0 = stats.genpareto.cdf(x, c)\n for dc in [1e-14, 1e-14]:\n cdfc = stats.genpareto.cdf(x, c + dc)\n assert_allclose(cdf0, cdfc, atol=1e-12)\n\n def test_c_continuity_ppf(self):\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [0., -1.]:\n ppf0 = stats.genpareto.ppf(q, c)\n for dc in [1e-14, -1e-14]:\n ppfc = stats.genpareto.ppf(q, c + dc)\n assert_allclose(ppf0, ppfc, atol=1e-12)\n\n def test_c_continuity_isf(self):\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [0., -1.]:\n isf0 = stats.genpareto.isf(q, c)\n for dc in [1e-14, -1e-14]:\n isfc = stats.genpareto.isf(q, c + dc)\n assert_allclose(isf0, isfc, atol=1e-12)\n\n def test_cdf_ppf_roundtrip(self):\n # this should pass with machine precision. hat tip @pbrod\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [1e-8, -1e-18, 1e-15, -1e-15]:\n assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),\n q, atol=1e-15)\n\n def test_logsf(self):\n logp = stats.genpareto.logsf(1e10, .01, 0, 1)\n assert_allclose(logp, -1842.0680753952365)\n\n # Values in 'expected_stats' are\n # [mean, variance, skewness, excess kurtosis].\n @pytest.mark.parametrize(\n 'c, expected_stats',\n [(0, [1, 1, 2, 6]),\n (1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),\n (1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),\n (-1, [1/2, 1/12, 0, -6/5])])\n def test_stats(self, c, expected_stats):\n result = stats.genpareto.stats(c, moments='mvsk')\n assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)\n\n def test_var(self):\n # Regression test for gh-11168.\n v = stats.genpareto.var(1e-8)\n assert_allclose(v, 1.000000040000001, rtol=1e-13)\n\n\nclass TestPearson3(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.pearson3.rvs(0.1, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllFloat'])\n val = stats.pearson3.rvs(0.5)\n assert_(isinstance(val, float))\n val = stats.pearson3(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllFloat'])\n assert_(len(val) == 3)\n\n def test_pdf(self):\n vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),\n atol=1e-6)\n vals = stats.pearson3.pdf(-3, 0.1)\n assert_allclose(vals, np.array([0.00313791]), atol=1e-6)\n vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)\n assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,\n 0.39885918, 0.23413173]), atol=1e-6)\n\n def test_cdf(self):\n vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),\n atol=1e-6)\n vals = stats.pearson3.cdf(-3, 0.1)\n assert_allclose(vals, [0.00082256], atol=1e-6)\n vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)\n assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,\n 5.06649130e-01, 8.41442111e-01], atol=1e-6)\n\n\nclass TestKappa4(object):\n def test_cdf_genpareto(self):\n # h = 1 and k != 0 is generalized Pareto\n x = [0.0, 0.1, 0.2, 0.5]\n h = 1.0\n for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,\n 1.9]:\n vals = stats.kappa4.cdf(x, h, k)\n # shape parameter is opposite what is expected\n vals_comp = stats.genpareto.cdf(x, -k)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_genextreme(self):\n # h = 0 and k != 0 is generalized extreme value\n x = np.linspace(-5, 5, 10)\n h = 0.0\n k = np.linspace(-3, 3, 10)\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.genextreme.cdf(x, k)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_expon(self):\n # h = 1 and k = 0 is exponential\n x = np.linspace(0, 10, 10)\n h = 1.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.expon.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_gumbel_r(self):\n # h = 0 and k = 0 is gumbel_r\n x = np.linspace(-5, 5, 10)\n h = 0.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.gumbel_r.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_logistic(self):\n # h = -1 and k = 0 is logistic\n x = np.linspace(-5, 5, 10)\n h = -1.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.logistic.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_uniform(self):\n # h = 1 and k = 1 is uniform\n x = np.linspace(-5, 5, 10)\n h = 1.0\n k = 1.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.uniform.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_integers_ctor(self):\n # regression test for gh-7416: _argcheck fails for integer h and k\n # in numpy 1.12\n stats.kappa4(1, 2)\n\n\nclass TestPoisson(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_pmf_basic(self):\n # Basic case\n ln2 = np.log(2)\n vals = stats.poisson.pmf([0, 1, 2], ln2)\n expected = [0.5, ln2/2, ln2**2/4]\n assert_allclose(vals, expected)\n\n def test_mu0(self):\n # Edge case: mu=0\n vals = stats.poisson.pmf([0, 1, 2], 0)\n expected = [1, 0, 0]\n assert_array_equal(vals, expected)\n\n interval = stats.poisson.interval(0.95, 0)\n assert_equal(interval, (0, 0))\n\n def test_rvs(self):\n vals = stats.poisson.rvs(0.5, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.poisson.rvs(0.5)\n assert_(isinstance(val, int))\n val = stats.poisson(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_stats(self):\n mu = 16.0\n result = stats.poisson.stats(mu, moments='mvsk')\n assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])\n\n mu = np.array([0.0, 1.0, 2.0])\n result = stats.poisson.stats(mu, moments='mvsk')\n expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])\n assert_allclose(result, expected)\n\n\nclass TestKSTwo(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_cdf(self):\n for n in [1, 2, 3, 10, 100, 1000]:\n # Test x-values:\n # 0, 1/2n, where the cdf should be 0\n # 1/n, where the cdf should be n!/n^n\n # 0.5, where the cdf should match ksone.cdf\n # 1-1/n, where cdf = 1-2/n^n\n # 1, where cdf == 1\n # (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)\n x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])\n v1 = (1.0/n)**n\n lg = scipy.special.gammaln(n+1)\n elg = (np.exp(lg) if v1 != 0 else 0)\n expected = np.array([0, 0, v1 * elg,\n 1 - 2*stats.ksone.sf(0.5, n),\n max(1 - 2*v1, 0.0),\n 1.0])\n vals_cdf = stats.kstwo.cdf(x, n)\n assert_allclose(vals_cdf, expected)\n\n def test_sf(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n # Same x values as in test_cdf, and use sf = 1 - cdf\n x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])\n v1 = (1.0/n)**n\n lg = scipy.special.gammaln(n+1)\n elg = (np.exp(lg) if v1 != 0 else 0)\n expected = np.array([1.0, 1.0,\n 1 - v1 * elg,\n 2*stats.ksone.sf(0.5, n),\n min(2*v1, 1.0), 0])\n vals_sf = stats.kstwo.sf(x, n)\n assert_allclose(vals_sf, expected)\n\n def test_cdf_sqrtn(self):\n # For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity\n # cdf(a/sqrt(n), n) is an increasing function of n (and a)\n # Check that the function is indeed increasing (allowing for some\n # small floating point and algorithm differences.)\n x = np.linspace(0, 2, 11)[1:]\n ns = [50, 100, 200, 400, 1000, 2000]\n for _x in x:\n xn = _x / np.sqrt(ns)\n probs = stats.kstwo.cdf(xn, ns)\n diffs = np.diff(probs)\n assert_array_less(diffs, 1e-8)\n\n def test_cdf_sf(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n vals_cdf = stats.kstwo.cdf(x, n)\n vals_sf = stats.kstwo.sf(x, n)\n assert_array_almost_equal(vals_cdf, 1 - vals_sf)\n\n def test_cdf_sf_sqrtn(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = x / np.sqrt(n)\n vals_cdf = stats.kstwo.cdf(xn, n)\n vals_sf = stats.kstwo.sf(xn, n)\n assert_array_almost_equal(vals_cdf, 1 - vals_sf)\n\n def test_ppf_of_cdf(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = x[x > 0.5/n]\n vals_cdf = stats.kstwo.cdf(xn, n)\n # CDFs close to 1 are better dealt with using the SF\n cond = (0 < vals_cdf) & (vals_cdf < 0.99)\n vals = stats.kstwo.ppf(vals_cdf, n)\n assert_allclose(vals[cond], xn[cond], rtol=1e-4)\n\n def test_isf_of_sf(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = x[x > 0.5/n]\n vals_isf = stats.kstwo.isf(xn, n)\n cond = (0 < vals_isf) & (vals_isf < 1.0)\n vals = stats.kstwo.sf(vals_isf, n)\n assert_allclose(vals[cond], xn[cond], rtol=1e-4)\n\n def test_ppf_of_cdf_sqrtn(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = (x / np.sqrt(n))[x > 0.5/n]\n vals_cdf = stats.kstwo.cdf(xn, n)\n cond = (0 < vals_cdf) & (vals_cdf < 1.0)\n vals = stats.kstwo.ppf(vals_cdf, n)\n assert_allclose(vals[cond], xn[cond])\n\n def test_isf_of_sf_sqrtn(self):\n x = np.linspace(0, 1, 11)\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = (x / np.sqrt(n))[x > 0.5/n]\n vals_sf = stats.kstwo.sf(xn, n)\n # SFs close to 1 are better dealt with using the CDF\n cond = (0 < vals_sf) & (vals_sf < 0.95)\n vals = stats.kstwo.isf(vals_sf, n)\n assert_allclose(vals[cond], xn[cond])\n\n def test_ppf(self):\n probs = np.linspace(0, 1, 11)[1:]\n for n in [1, 2, 3, 10, 100, 1000]:\n xn = stats.kstwo.ppf(probs, n)\n vals_cdf = stats.kstwo.cdf(xn, n)\n assert_allclose(vals_cdf, probs)\n\n def test_simard_lecuyer_table1(self):\n # Compute the cdf for values near the mean of the distribution.\n # The mean u ~ log(2)*sqrt(pi/(2n))\n # Compute for x in [u/4, u/3, u/2, u, 2u, 3u]\n # This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)\n # \"Computing the Two-Sided Kolmogorov-Smirnov Distribution\".\n # Except that the values below are not from the published table, but\n # were generated using an independent SageMath implementation of\n # Durbin's algorithm (with the exponentiation and scaling of\n # Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.\n # Some of the values in the published table have relative\n # errors greater than 1e-4.\n ns = [10, 50, 100, 200, 500, 1000]\n ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])\n expected = np.array([\n [1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,\n 9.97685592e-01, 9.99999942e-01],\n [2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,\n 9.96177701e-01, 9.99998662e-01],\n [1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,\n 9.95866877e-01, 9.99998240e-01],\n [4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,\n 9.95661824e-01, 9.99997964e-01],\n [2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,\n 9.95491207e-01, 9.99997750e-01],\n [1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,\n 9.95409545e-01, 9.99997657e-01]\n ])\n for idx, n in enumerate(ns):\n x = ratios * np.log(2) * np.sqrt(np.pi/2/n)\n vals_cdf = stats.kstwo.cdf(x, n)\n assert_allclose(vals_cdf, expected[idx], rtol=1e-5)\n\n\nclass TestZipf(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.zipf.rvs(1.5, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.zipf.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.zipf(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_moments(self):\n # n-th moment is finite iff a > n + 1\n m, v = stats.zipf.stats(a=2.8)\n assert_(np.isfinite(m))\n assert_equal(v, np.inf)\n\n s, k = stats.zipf.stats(a=4.8, moments='sk')\n assert_(not np.isfinite([s, k]).all())\n\n\nclass TestDLaplace(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.dlaplace.rvs(1.5, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.dlaplace.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.dlaplace(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n assert_(stats.dlaplace.rvs(0.8) is not None)\n\n def test_stats(self):\n # compare the explicit formulas w/ direct summation using pmf\n a = 1.\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n\n N = 37\n xx = np.arange(-N, N+1)\n pp = dl.pmf(xx)\n m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)\n assert_equal((m, s), (0, 0))\n assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)\n\n def test_stats2(self):\n a = np.log(2.)\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n assert_equal((m, s), (0., 0.))\n assert_allclose((v, k), (4., 3.25))\n\n\nclass TestInvgauss(object):\n def setup_method(self):\n np.random.seed(1234)\n\n @pytest.mark.parametrize(\"rvs_mu,rvs_loc,rvs_scale\",\n [(2, 0, 1), (np.random.rand(3)*10)])\n def test_fit(self, rvs_mu, rvs_loc, rvs_scale):\n data = stats.invgauss.rvs(size=100, mu=rvs_mu,\n loc=rvs_loc, scale=rvs_scale)\n # Analytical MLEs are calculated with formula when `floc` is fixed\n mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)\n\n data = data - rvs_loc\n mu_temp = np.mean(data)\n scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))\n mu_mle = mu_temp/scale_mle\n\n # `mu` and `scale` match analytical formula\n assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)\n assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)\n assert_equal(loc, rvs_loc)\n data = stats.invgauss.rvs(size=100, mu=rvs_mu,\n loc=rvs_loc, scale=rvs_scale)\n # fixed parameters are returned\n mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,\n fscale=rvs_scale + 1)\n assert_equal(rvs_scale + 1, scale)\n assert_equal(rvs_loc - 1, loc)\n\n # shape can still be fixed with multiple names\n shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]\n shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]\n shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]\n assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04\n\n @pytest.mark.parametrize(\"rvs_mu,rvs_loc,rvs_scale\",\n [(2, 0, 1), (np.random.rand(3)*10)])\n def test_fit_MLE_comp_optimzer(self, rvs_mu, rvs_loc, rvs_scale):\n data = stats.invgauss.rvs(size=100, mu=rvs_mu,\n loc=rvs_loc, scale=rvs_scale)\n\n super_fit = super(type(stats.invgauss), stats.invgauss).fit\n # fitting without `floc` uses superclass fit method\n super_fitted = super_fit(data)\n invgauss_fit = stats.invgauss.fit(data)\n assert_equal(super_fitted, invgauss_fit)\n\n # fitting with `fmu` is uses superclass fit method\n super_fitted = super_fit(data, floc=0, fmu=2)\n invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)\n assert_equal(super_fitted, invgauss_fit)\n\n # obtain log-likelihood objective function to compare results\n args = [data, (stats.invgauss._fitstart(data), )]\n func = stats.invgauss._reduce_func(args, {})[1]\n\n # fixed `floc` uses analytical formula and provides better fit than\n # super method\n _assert_lessthan_loglike(stats.invgauss, data, func, floc=rvs_loc)\n\n # fixed `floc` not resulting in invalid data < 0 uses analytical\n # formulas and provides a better fit than the super method\n assert np.all((data - (rvs_loc - 1)) > 0)\n _assert_lessthan_loglike(stats.invgauss, data, func, floc=rvs_loc - 1)\n\n # fixed `floc` to an arbitrary number, 0, still provides a better fit\n # than the super method\n _assert_lessthan_loglike(stats.invgauss, data, func, floc=0)\n\n # fixed `fscale` to an arbitrary number still provides a better fit\n # than the super method\n _assert_lessthan_loglike(stats.invgauss, data, func, floc=rvs_loc,\n fscale=np.random.rand(1)[0])\n\n def test_fit_raise_errors(self):\n assert_fit_warnings(stats.invgauss)\n # FitDataError is raised when negative invalid data\n with pytest.raises(FitDataError):\n stats.invgauss.fit([1, 2, 3], floc=2)\n\n\nclass TestLaplace(object):\n @pytest.mark.parametrize(\"rvs_loc\", [-5, 0, 1, 2])\n @pytest.mark.parametrize(\"rvs_scale\", [1, 2, 3, 10])\n def test_fit(self, rvs_loc, rvs_scale):\n # tests that various inputs follow expected behavior\n # for a variety of `loc` and `scale`.\n data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)\n\n # MLE estimates are given by\n loc_mle = np.median(data)\n scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)\n\n # standard outputs should match analytical MLE formulas\n loc, scale = stats.laplace.fit(data)\n assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)\n assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)\n\n # fixed parameter should use analytical formula for other\n loc, scale = stats.laplace.fit(data, floc=loc_mle)\n assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)\n loc, scale = stats.laplace.fit(data, fscale=scale_mle)\n assert_allclose(loc, loc_mle)\n\n # test with non-mle fixed parameter\n # create scale with non-median loc\n loc = rvs_loc * 2\n scale_mle = np.sum(np.abs(data - loc)) / len(data)\n\n # fixed loc to non median, scale should match\n # scale calculation with modified loc\n loc, scale = stats.laplace.fit(data, floc=loc)\n assert_equal(scale_mle, scale)\n\n # fixed scale created with non median loc,\n # loc output should still be the data median.\n loc, scale = stats.laplace.fit(data, fscale=scale_mle)\n assert_equal(loc_mle, loc)\n\n # error raised when both `floc` and `fscale` are fixed\n assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,\n fscale=scale_mle)\n\n # error is raised with non-finite values\n assert_raises(RuntimeError, stats.laplace.fit, [np.nan])\n assert_raises(RuntimeError, stats.laplace.fit, [np.inf])\n\n @pytest.mark.parametrize(\"rvs_scale,rvs_loc\", [(10, -5),\n (5, 10),\n (.2, .5)])\n def test_fit_MLE_comp_optimzer(self, rvs_loc, rvs_scale):\n data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)\n\n # the log-likelihood function for laplace is given by\n def ll(loc, scale, data):\n return -1 * (- (len(data)) * np.log(2*scale) -\n (1/scale)*np.sum(np.abs(data - loc)))\n\n # test that the objective function result of the analytical MLEs is\n # less than or equal to that of the numerically optimized estimate\n loc, scale = stats.laplace.fit(data)\n loc_opt, scale_opt = super(type(stats.laplace),\n stats.laplace).fit(data)\n ll_mle = ll(loc, scale, data)\n ll_opt = ll(loc_opt, scale_opt, data)\n assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,\n atol=1e-15, rtol=1e-15)\n\n def test_fit_simple_non_random_data(self):\n data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])\n # with `floc` fixed to 6, scale should be 4.\n loc, scale = stats.laplace.fit(data, floc=6)\n assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)\n # with `fscale` fixed to 6, loc should be 4.\n loc, scale = stats.laplace.fit(data, fscale=6)\n assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)\n\n\nclass TestInvGamma(object):\n def test_invgamma_inf_gh_1866(self):\n # invgamma's moments are only finite for a>n\n # specific numbers checked w/ boost 1.54\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')\n expected = [0.05461496450, 0.0001723162534, 1.020362676,\n 2.055616582]\n assert_allclose(mvsk, expected)\n\n a = [1.1, 3.1, 5.6]\n mvsk = stats.invgamma.stats(a=a, moments='mvsk')\n expected = ([10., 0.476190476, 0.2173913043], # mmm\n [np.inf, 0.2061430632, 0.01312749422], # vvv\n [np.nan, 41.95235392, 2.919025532], # sss\n [np.nan, np.nan, 24.51923076]) # kkk\n for x, y in zip(mvsk, expected):\n assert_almost_equal(x, y)\n\n def test_cdf_ppf(self):\n # gh-6245\n x = np.logspace(-2.6, 0)\n y = stats.invgamma.cdf(x, 1)\n xx = stats.invgamma.ppf(y, 1)\n assert_allclose(x, xx)\n\n def test_sf_isf(self):\n # gh-6245\n if sys.maxsize > 2**32:\n x = np.logspace(2, 100)\n else:\n # Invgamme roundtrip on 32-bit systems has relative accuracy\n # ~1e-15 until x=1e+15, and becomes inf above x=1e+18\n x = np.logspace(2, 18)\n\n y = stats.invgamma.sf(x, 1)\n xx = stats.invgamma.isf(y, 1)\n assert_allclose(x, xx, rtol=1.0)\n\n\nclass TestF(object):\n def test_endpoints(self):\n # Compute the pdf at the left endpoint dst.a.\n data = [[stats.f, (2, 1), 1.0]]\n for _f, _args, _correct in data:\n ans = _f.pdf(_f.a, *_args)\n\n ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]\n correct = [_correct_ for _f, _args, _correct_ in data]\n assert_array_almost_equal(ans, correct)\n\n def test_f_moments(self):\n # n-th moment of F distributions is only finite for n < dfd / 2\n m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')\n assert_(np.isfinite(m))\n assert_(np.isfinite(v))\n assert_(np.isfinite(s))\n assert_(not np.isfinite(k))\n\n def test_moments_warnings(self):\n # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')\n\n def test_stats_broadcast(self):\n dfn = np.array([[3], [11]])\n dfd = np.array([11, 12])\n m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')\n m2 = [dfd / (dfd - 2)]*2\n assert_allclose(m, m2)\n v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)\n assert_allclose(v, v2)\n s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /\n ((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))\n assert_allclose(s, s2)\n k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +\n (dfd - 4) * (dfd - 2)**2)\n k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)\n k2 = k2num / k2den\n assert_allclose(k, k2)\n\n\ndef test_rvgeneric_std():\n # Regression test for #1191\n assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])\n\n\ndef test_moments_t():\n # regression test for #8786\n assert_equal(stats.t.stats(df=1, moments='mvsk'),\n (np.inf, np.nan, np.nan, np.nan))\n assert_equal(stats.t.stats(df=1.01, moments='mvsk'),\n (0.0, np.inf, np.nan, np.nan))\n assert_equal(stats.t.stats(df=2, moments='mvsk'),\n (0.0, np.inf, np.nan, np.nan))\n assert_equal(stats.t.stats(df=2.01, moments='mvsk'),\n (0.0, 2.01/(2.01-2.0), np.nan, np.inf))\n assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))\n assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))\n assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))\n assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))\n\n\nclass TestRvDiscrete(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n states = [-1, 0, 1, 2, 3, 4]\n probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]\n samples = 1000\n r = stats.rv_discrete(name='sample', values=(states, probability))\n x = r.rvs(size=samples)\n assert_(isinstance(x, numpy.ndarray))\n\n for s, p in zip(states, probability):\n assert_(abs(sum(x == s)/float(samples) - p) < 0.05)\n\n x = r.rvs()\n assert_(isinstance(x, int))\n\n def test_entropy(self):\n # Basic tests of entropy.\n pvals = np.array([0.25, 0.45, 0.3])\n p = stats.rv_discrete(values=([0, 1, 2], pvals))\n expected_h = -sum(xlogy(pvals, pvals))\n h = p.entropy()\n assert_allclose(h, expected_h)\n\n p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))\n h = p.entropy()\n assert_equal(h, 0.0)\n\n def test_pmf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n x = [[1., 4.],\n [3., 2]]\n assert_allclose(rv.pmf(x),\n [[0.5, 0.2],\n [0., 0.3]], atol=1e-14)\n\n def test_cdf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]\n expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]\n assert_allclose(rv.cdf(x_values), expected, atol=1e-14)\n\n # also check scalar arguments\n assert_allclose([rv.cdf(xx) for xx in x_values],\n expected, atol=1e-14)\n\n def test_ppf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]\n expected = [1, 1, 2, 2, 4, 4]\n assert_allclose(rv.ppf(q_values), expected, atol=1e-14)\n\n # also check scalar arguments\n assert_allclose([rv.ppf(q) for q in q_values],\n expected, atol=1e-14)\n\n def test_cdf_ppf_next(self):\n # copied and special cased from test_discrete_basic\n vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])\n rv = stats.rv_discrete(values=vals)\n\n assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),\n rv.xk[1:])\n\n def test_multidimension(self):\n xk = np.arange(12).reshape((3, 4))\n pk = np.array([[0.1, 0.1, 0.15, 0.05],\n [0.1, 0.1, 0.05, 0.05],\n [0.1, 0.1, 0.05, 0.05]])\n rv = stats.rv_discrete(values=(xk, pk))\n\n assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)\n\n def test_bad_input(self):\n xk = [1, 2, 3]\n pk = [0.5, 0.5]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n pk = [1, 2, 3]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n xk = [1, 2, 3]\n pk = [0.5, 1.2, -0.7]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n xk = [1, 2, 3, 4, 5]\n pk = [0.3, 0.3, 0.3, 0.3, -0.2]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n def test_shape_rv_sample(self):\n # tests added for gh-9565\n\n # mismatch of 2d inputs\n xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n # same number of elements, but shapes not compatible\n xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n # same shapes => no error\n xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)\n assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)\n\n def test_expect1(self):\n xk = [1, 2, 4, 6, 7, 11]\n pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]\n rv = stats.rv_discrete(values=(xk, pk))\n\n assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)\n\n def test_expect2(self):\n # rv_sample should override _expect. Bug report from\n # https://stackoverflow.com/questions/63199792\n y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,\n 1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,\n 1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,\n 2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,\n 3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,\n 4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]\n\n py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,\n 0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,\n 0.00019999999999997797, 0.0006000000000000449,\n 0.024499999999999966, 0.006400000000000072,\n 0.0043999999999999595, 0.019499999999999962,\n 0.03770000000000007, 0.01759999999999995, 0.015199999999999991,\n 0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,\n 0.0, 0.0041000000000001036, 0.005999999999999894,\n 0.0042000000000000925, 0.0050000000000000044,\n 0.0041999999999999815, 0.0004999999999999449,\n 0.009199999999999986, 0.008200000000000096,\n 0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,\n 0.0006000000000000449, 0.02510000000000001, 0.0,\n 0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,\n 0.008199999999999985, 0.005600000000000049, 0.0]\n\n rv = stats.rv_discrete(values=(y, py))\n\n # check the mean\n assert_allclose(rv.expect(), rv.mean(), atol=1e-14)\n assert_allclose(rv.expect(),\n sum(v * w for v, w in zip(y, py)), atol=1e-14)\n\n # also check the second moment\n assert_allclose(rv.expect(lambda x: x**2),\n sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)\n\n\nclass TestSkewNorm(object):\n def setup_method(self):\n self.rng = check_random_state(1234)\n\n def test_normal(self):\n # When the skewness is 0 the distribution is normal\n x = np.linspace(-5, 5, 100)\n assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),\n stats.norm.pdf(x))\n\n def test_rvs(self):\n shape = (3, 4, 5)\n x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)\n assert_equal(shape, x.shape)\n\n x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)\n assert_equal(shape, x.shape)\n\n def test_moments(self):\n X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,\n random_state=self.rng)\n expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]\n computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')\n assert_array_almost_equal(computed, expected, decimal=2)\n\n X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,\n random_state=self.rng)\n expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]\n computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')\n assert_array_almost_equal(computed, expected, decimal=2)\n\n def test_cdf_large_x(self):\n # Regression test for gh-7746.\n # The x values are large enough that the closest 64 bit floating\n # point representation of the exact CDF is 1.0.\n p = stats.skewnorm.cdf([10, 20, 30], -1)\n assert_allclose(p, np.ones(3), rtol=1e-14)\n p = stats.skewnorm.cdf(25, 2.5)\n assert_allclose(p, 1.0, rtol=1e-14)\n\n def test_cdf_sf_small_values(self):\n # Triples are [x, a, cdf(x, a)]. These values were computed\n # using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.\n cdfvals = [\n [-8, 1, 3.870035046664392611e-31],\n [-4, 2, 8.1298399188811398e-21],\n [-2, 5, 1.55326826787106273e-26],\n [-9, -1, 2.257176811907681295e-19],\n [-10, -4, 1.523970604832105213e-23],\n ]\n for x, a, cdfval in cdfvals:\n p = stats.skewnorm.cdf(x, a)\n assert_allclose(p, cdfval, rtol=1e-8)\n # For the skew normal distribution, sf(-x, -a) = cdf(x, a).\n p = stats.skewnorm.sf(-x, -a)\n assert_allclose(p, cdfval, rtol=1e-8)\n\n\nclass TestExpon(object):\n def test_zero(self):\n assert_equal(stats.expon.pdf(0), 1)\n\n def test_tail(self): # Regression test for ticket 807\n assert_equal(stats.expon.cdf(1e-18), 1e-18)\n assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)\n\n def test_nan_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])\n assert_raises(RuntimeError, stats.expon.fit, x)\n\n def test_inf_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])\n assert_raises(RuntimeError, stats.expon.fit, x)\n\n\nclass TestNorm(object):\n def test_nan_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])\n assert_raises(RuntimeError, stats.norm.fit, x)\n\n def test_inf_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])\n assert_raises(RuntimeError, stats.norm.fit, x)\n\n def test_bad_keyword_arg(self):\n x = [1, 2, 3]\n assert_raises(TypeError, stats.norm.fit, x, plate=\"shrimp\")\n\n\nclass TestUniform(object):\n \"\"\"gh-10300\"\"\"\n def test_nan_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])\n assert_raises(RuntimeError, stats.uniform.fit, x)\n\n def test_inf_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])\n assert_raises(RuntimeError, stats.uniform.fit, x)\n\n\nclass TestExponNorm(object):\n def test_moments(self):\n # Some moment test cases based on non-loc/scaled formula\n def get_moms(lam, sig, mu):\n # See wikipedia for these formulae\n # where it is listed as an exponentially modified gaussian\n opK2 = 1.0 + 1 / (lam*sig)**2\n exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)\n exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)\n return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]\n\n mu, sig, lam = 0, 1, 1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = -3, 2, 0.1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = 0, 3, 1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = -5, 11, 3.5\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n\n def test_nan_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])\n assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)\n\n def test_inf_raises_error(self):\n # see gh-issue 10300\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])\n assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)\n\n def test_extremes_x(self):\n # Test for extreme values against overflows\n assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)\n assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)\n assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)\n assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)\n\n # Expected values for the PDF were computed with mpmath, with\n # the following function, and with mpmath.mp.dps = 50.\n #\n # def exponnorm_stdpdf(x, K):\n # x = mpmath.mpf(x)\n # K = mpmath.mpf(K)\n # t1 = mpmath.exp(1/(2*K**2) - x/K)\n # erfcarg = -(x - 1/K)/mpmath.sqrt(2)\n # t2 = mpmath.erfc(erfcarg)\n # return t1 * t2 / (2*K)\n #\n @pytest.mark.parametrize('x, K, expected',\n [(20, 0.01, 6.90010764753618e-88),\n (1, 0.01, 0.24438994313247364),\n (-1, 0.01, 0.23955149623472075),\n (-20, 0.01, 4.6004708690125477e-88),\n (10, 1, 7.48518298877006e-05),\n (10, 10000, 9.990005048283775e-05)])\n def test_std_pdf(self, x, K, expected):\n assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=1e-12)\n\n\nclass TestGenExpon(object):\n def test_pdf_unity_area(self):\n from scipy.integrate import simps\n # PDF should integrate to one\n p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)\n assert_almost_equal(simps(p, dx=0.01), 1, 1)\n\n def test_cdf_bounds(self):\n # CDF should always be positive\n cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)\n assert_(numpy.all((0 <= cdf) & (cdf <= 1)))\n\n\nclass TestExponpow(object):\n def test_tail(self):\n assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)\n assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),\n 5)\n\n\nclass TestSkellam(object):\n def test_pmf(self):\n # comparison to R\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skpmfR = numpy.array(\n [4.2254582961926893e-005, 1.1404838449648488e-004,\n 2.8979625801752660e-004, 6.9177078182101231e-004,\n 1.5480716105844708e-003, 3.2412274963433889e-003,\n 6.3373707175123292e-003, 1.1552351566696643e-002,\n 1.9606152375042644e-002, 3.0947164083410337e-002,\n 4.5401737566767360e-002, 6.1894328166820688e-002,\n 7.8424609500170578e-002, 9.2418812533573133e-002,\n 1.0139793148019728e-001, 1.0371927988298846e-001,\n 9.9076583077406091e-002, 8.8546660073089561e-002,\n 7.4187842052486810e-002, 5.8392772862200251e-002,\n 4.3268692953013159e-002, 3.0248159818374226e-002,\n 1.9991434305603021e-002, 1.2516877303301180e-002,\n 7.4389876226229707e-003])\n\n assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)\n\n def test_cdf(self):\n # comparison to R, only 5 decimals\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skcdfR = numpy.array(\n [6.4061475386192104e-005, 1.7810985988267694e-004,\n 4.6790611790020336e-004, 1.1596768997212152e-003,\n 2.7077485103056847e-003, 5.9489760066490718e-003,\n 1.2286346724161398e-002, 2.3838698290858034e-002,\n 4.3444850665900668e-002, 7.4392014749310995e-002,\n 1.1979375231607835e-001, 1.8168808048289900e-001,\n 2.6011268998306952e-001, 3.5253150251664261e-001,\n 4.5392943399683988e-001, 5.5764871387982828e-001,\n 6.5672529695723436e-001, 7.4527195703032389e-001,\n 8.1945979908281064e-001, 8.7785257194501087e-001,\n 9.2112126489802404e-001, 9.5136942471639818e-001,\n 9.7136085902200120e-001, 9.8387773632530240e-001,\n 9.9131672394792536e-001])\n\n assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)\n\n\nclass TestLognorm(object):\n def test_pdf(self):\n # Regression test for Ticket #1471: avoid nan with 0/0 situation\n # Also make sure there are no warnings at x=0, cf gh-5202\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n pdf = stats.lognorm.pdf([0, 0.5, 1], 1)\n assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])\n\n def test_logcdf(self):\n # Regression test for gh-5940: sf et al would underflow too early\n x2, mu, sigma = 201.68, 195, 0.149\n assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),\n stats.norm.sf(np.log(x2-mu)/sigma))\n assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),\n stats.norm.logsf(np.log(x2-mu)/sigma))\n\n\nclass TestBeta(object):\n def test_logpdf(self):\n # Regression test for Ticket #1326: avoid nan with 0*log(0) situation\n logpdf = stats.beta.logpdf(0, 1, 0.5)\n assert_almost_equal(logpdf, -0.69314718056)\n logpdf = stats.beta.logpdf(0, 0.5, 1)\n assert_almost_equal(logpdf, np.inf)\n\n def test_logpdf_ticket_1866(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.beta(alpha, beta)\n assert_allclose(b.logpdf(x).sum(), -1201.699061824062)\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n def test_fit_bad_keyword_args(self):\n x = [0.1, 0.5, 0.6]\n assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,\n plate=\"shrimp\")\n\n def test_fit_duplicated_fixed_parameter(self):\n # At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.\n # More than one raises a ValueError.\n x = [0.1, 0.5, 0.6]\n assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)\n\n\nclass TestBetaPrime(object):\n def test_logpdf(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.betaprime(alpha, beta)\n assert_(np.isfinite(b.logpdf(x)).all())\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n def test_cdf(self):\n # regression test for gh-4030: Implementation of\n # scipy.stats.betaprime.cdf()\n x = stats.betaprime.cdf(0, 0.2, 0.3)\n assert_equal(x, 0.0)\n\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n cdfs = stats.betaprime.cdf(x, alpha, beta)\n assert_(np.isfinite(cdfs).all())\n\n # check the new cdf implementation vs generic one:\n gen_cdf = stats.rv_continuous._cdf_single\n cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]\n assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)\n\n\nclass TestGamma(object):\n def test_pdf(self):\n # a few test cases to compare with R\n pdf = stats.gamma.pdf(90, 394, scale=1./5)\n assert_almost_equal(pdf, 0.002312341)\n\n pdf = stats.gamma.pdf(3, 10, scale=1./5)\n assert_almost_equal(pdf, 0.1620358)\n\n def test_logpdf(self):\n # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)\n # situation\n logpdf = stats.gamma.logpdf(0, 1)\n assert_almost_equal(logpdf, 0)\n\n def test_fit_bad_keyword_args(self):\n x = [0.1, 0.5, 0.6]\n assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate=\"shrimp\")\n\n\nclass TestChi2(object):\n # regression tests after precision improvements, ticket:1041, not verified\n def test_precision(self):\n assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,\n decimal=14)\n assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,\n decimal=14)\n\n def test_ppf(self):\n # Expected values computed with mpmath.\n df = 4.8\n x = stats.chi2.ppf(2e-47, df)\n assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)\n x = stats.chi2.ppf(0.5, df)\n assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)\n\n df = 13\n x = stats.chi2.ppf(2e-77, df)\n assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)\n x = stats.chi2.ppf(0.1, df)\n assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)\n\n\nclass TestGumbelL(object):\n # gh-6228\n def test_cdf_ppf(self):\n x = np.linspace(-100, -4)\n y = stats.gumbel_l.cdf(x)\n xx = stats.gumbel_l.ppf(y)\n assert_allclose(x, xx)\n\n def test_logcdf_logsf(self):\n x = np.linspace(-100, -4)\n y = stats.gumbel_l.logcdf(x)\n z = stats.gumbel_l.logsf(x)\n u = np.exp(y)\n v = -special.expm1(z)\n assert_allclose(u, v)\n\n def test_sf_isf(self):\n x = np.linspace(-20, 5)\n y = stats.gumbel_l.sf(x)\n xx = stats.gumbel_l.isf(y)\n assert_allclose(x, xx)\n\n\nclass TestLevyStable(object):\n\n def test_fit(self):\n # construct data to have percentiles that match\n # example in McCulloch 1986.\n x = [-.05413, -.05413,\n 0., 0., 0., 0.,\n .00533, .00533, .00533, .00533, .00533,\n .03354, .03354, .03354, .03354, .03354,\n .05309, .05309, .05309, .05309, .05309]\n alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)\n assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)\n assert_almost_equal(beta1, -.22, 2)\n assert_almost_equal(scale1, 0.01717, 4)\n # to 2 dps due to rounding error in McCulloch86\n assert_almost_equal(loc1, 0.00233, 2)\n\n # cover alpha=2 scenario\n x2 = x + [.05309, .05309, .05309, .05309, .05309]\n alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)\n assert_equal(alpha2, 2)\n assert_equal(beta2, -1)\n assert_almost_equal(scale2, .02503, 4)\n assert_almost_equal(loc2, .03354, 4)\n\n @pytest.mark.slow\n def test_pdf_nolan_samples(self):\n \"\"\" Test pdf values against Nolan's stablec.exe output\n see - http://fs2.american.edu/jpnolan/www/stable/stable.html\n\n There's a known limitation of Nolan's executable for alpha < 0.2.\n\n Repeat following with beta = -1, -.5, 0, .5 and 1\n stablec.exe <<\n 1 # pdf\n 1 # Nolan S equivalent to S0 in scipy\n .25,2,.25 # alpha\n -1,-1,0 # beta\n -10,10,1 # x\n 1,0 # gamma, delta\n 2 # output file\n \"\"\"\n fn = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'data/stable-pdf-sample-data.npy'))\n data = np.load(fn)\n\n data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')\n\n # support numpy 1.8.2 for travis\n npisin = np.isin if hasattr(np, \"isin\") else np.in1d\n\n tests = [\n # best selects\n ['best', None, 8, None],\n\n # quadrature is accurate for most alpha except 0.25; perhaps\n # limitation of Nolan stablec?\n # we reduce size of x to speed up computation as numerical\n # integration slow.\n ['quadrature', None, 8,\n lambda r: ((r['alpha'] > 0.25) &\n (npisin(r['x'], [-10, -5, 0, 5, 10])))],\n\n # zolatarev is accurate except at alpha==1, beta != 0\n ['zolotarev', None, 8, lambda r: r['alpha'] != 1],\n ['zolotarev', None, 8,\n lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],\n ['zolotarev', None, 1,\n lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],\n\n # fft accuracy reduces as alpha decreases, fails at low values of\n # alpha and x=0\n ['fft', 0, 4, lambda r: r['alpha'] > 1],\n ['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],\n # not useful here\n ['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)],\n ]\n for ix, (default_method, fft_min_points,\n decimal_places, filter_func) in enumerate(tests):\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n subdata = (data[filter_func(data)] if filter_func is not None else\n data)\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning,\n \"Density calculation unstable for alpha=1 \"\n \"and beta!=0.*\")\n sup.record(RuntimeWarning,\n \"Density calculations experimental for FFT \"\n \"method.*\")\n p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'],\n subdata['beta'], scale=1, loc=0)\n subdata2 = rec_append_fields(subdata, 'calc', p)\n padiff = np.abs(p-subdata['p'])\n failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |\n np.isnan(p)]\n assert_almost_equal(p, subdata['p'], decimal_places,\n (\"pdf test %s failed with method '%s'\\n%s\"\n % (ix, default_method, failures)),\n verbose=False)\n\n @pytest.mark.slow\n def test_cdf_nolan_samples(self):\n \"\"\" Test cdf values against Nolan's stablec.exe output\n see - http://fs2.american.edu/jpnolan/www/stable/stable.html\n\n There's a known limitation of Nolan's executable for alpha < 0.2.\n\n Repeat following with beta = -1, -.5, 0, .5 and 1\n stablec.exe <<\n 2 # cdf\n 1 # Nolan S equivalent to S0 in scipy\n .25,2,.25 # alpha\n -1,-1,0 # beta\n -10,10,1 # x\n 1,0 # gamma, delta\n 2 # output file\n \"\"\"\n fn = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'data/stable-cdf-sample-data.npy'))\n data = np.load(fn)\n\n data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')\n\n tests = [\n # zolatarev is accurate for all values\n ['zolotarev', None, 8, None],\n\n # fft accuracy poor, very poor alpha < 1\n ['fft', 0, 2, lambda r: r['alpha'] > 1],\n ]\n for ix, (default_method, fft_min_points, decimal_places,\n filter_func) in enumerate(tests):\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n subdata = (data[filter_func(data)] if filter_func is not None else\n data)\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning, 'FFT method is considered ' +\n 'experimental for cumulative distribution ' +\n 'function evaluations.*')\n p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'],\n subdata['beta'], scale=1, loc=0)\n subdata2 = rec_append_fields(subdata, 'calc', p)\n padiff = np.abs(p - subdata['p'])\n failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |\n np.isnan(p)]\n assert_almost_equal(p, subdata['p'], decimal_places,\n (\"cdf test %s failed with method '%s'\\n%s\"\n % (ix, default_method, failures)),\n verbose=False)\n\n def test_pdf_alpha_equals_one_beta_non_zero(self):\n \"\"\"\n sample points extracted from Tables and Graphs of Stable Probability\n Density Functions - Donald R Holt - 1973 - p 187.\n \"\"\"\n xs = np.array([0, 0, 0, 0,\n 1, 1, 1, 1,\n 2, 2, 2, 2,\n 3, 3, 3, 3,\n 4, 4, 4, 4])\n density = np.array([.3183, .3096, .2925, .2622,\n .1591, .1587, .1599, .1635,\n .0637, .0729, .0812, .0955,\n .0318, .0390, .0458, .0586,\n .0187, .0236, .0285, .0384])\n betas = np.array([0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1])\n\n tests = [\n ['quadrature', None, 4],\n ['zolotarev', None, 1],\n ]\n\n with np.errstate(all='ignore'), suppress_warnings() as sup:\n sup.filter(category=RuntimeWarning,\n message=\"Density calculation unstable.*\")\n for default_method, fft_min_points, decimal_places in tests:\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)\n assert_almost_equal(pdf, density, decimal_places,\n default_method)\n\n def test_stats(self):\n param_sets = [\n [(1.48, -.22, 0, 1), (0, np.inf, np.NaN, np.NaN)],\n [(2, .9, 10, 1.5), (10, 4.5, 0, 0)]\n ]\n for args, exp_stats in param_sets:\n calc_stats = stats.levy_stable.stats(args[0], args[1],\n loc=args[2], scale=args[3],\n moments='mvsk')\n assert_almost_equal(calc_stats, exp_stats)\n\n\nclass TestArrayArgument(object): # test for ticket:992\n def setup_method(self):\n np.random.seed(1234)\n\n def test_noexception(self):\n rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),\n size=(10, 5))\n assert_equal(rvs.shape, (10, 5))\n\n\nclass TestDocstring(object):\n def test_docstrings(self):\n # See ticket #761\n if stats.rayleigh.__doc__ is not None:\n assert_(\"rayleigh\" in stats.rayleigh.__doc__.lower())\n if stats.bernoulli.__doc__ is not None:\n assert_(\"bernoulli\" in stats.bernoulli.__doc__.lower())\n\n def test_no_name_arg(self):\n # If name is not given, construction shouldn't fail. See #1508.\n stats.rv_continuous()\n stats.rv_discrete()\n\n\nclass TestEntropy(object):\n def test_entropy_positive(self):\n # See ticket #497\n pk = [0.5, 0.2, 0.3]\n qk = [0.1, 0.25, 0.65]\n eself = stats.entropy(pk, pk)\n edouble = stats.entropy(pk, qk)\n assert_(0.0 == eself)\n assert_(edouble >= 0.0)\n\n def test_entropy_base(self):\n pk = np.ones(16, float)\n S = stats.entropy(pk, base=2.)\n assert_(abs(S - 4.) < 1.e-5)\n\n qk = np.ones(16, float)\n qk[:8] = 2.\n S = stats.entropy(pk, qk)\n S2 = stats.entropy(pk, qk, base=2.)\n assert_(abs(S/S2 - np.log(2.)) < 1.e-5)\n\n def test_entropy_zero(self):\n # Test for PR-479\n assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,\n decimal=12)\n\n def test_entropy_2d(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk),\n [0.1933259, 0.18609809])\n\n def test_entropy_2d_zero(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk),\n [np.inf, 0.18609809])\n\n pk[0][0] = 0.0\n assert_array_almost_equal(stats.entropy(pk, qk),\n [0.17403988, 0.18609809])\n\n def test_entropy_base_2d_nondefault_axis(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n assert_array_almost_equal(stats.entropy(pk, axis=1),\n [0.63651417, 0.63651417, 0.66156324])\n\n def test_entropy_2d_nondefault_axis(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk, axis=1),\n [0.231049, 0.231049, 0.127706])\n\n def test_entropy_raises_value_error(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.1, 0.2], [0.6, 0.3]]\n assert_raises(ValueError, stats.entropy, pk, qk)\n\n def test_base_entropy_with_axis_0_is_equal_to_default(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n assert_array_almost_equal(stats.entropy(pk, axis=0),\n stats.entropy(pk))\n\n def test_entropy_with_axis_0_is_equal_to_default(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk, axis=0),\n stats.entropy(pk, qk))\n\n def test_base_entropy_transposed(self):\n pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])\n assert_array_almost_equal(stats.entropy(pk.T).T,\n stats.entropy(pk, axis=1))\n\n def test_entropy_transposed(self):\n pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])\n qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])\n assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,\n stats.entropy(pk, qk, axis=1))\n\n\ndef TestArgsreduce():\n a = array([1, 3, 2, 1, 2, 3, 3])\n b, c = argsreduce(a > 1, a, 2)\n\n assert_array_equal(b, [3, 2, 2, 3, 3])\n assert_array_equal(c, [2, 2, 2, 2, 2])\n\n b, c = argsreduce(2 > 1, a, 2)\n assert_array_equal(b, a[0])\n assert_array_equal(c, [2])\n\n b, c = argsreduce(a > 0, a, 2)\n assert_array_equal(b, a)\n assert_array_equal(c, [2] * numpy.size(a))\n\n\nclass TestFitMethod(object):\n skip = ['ncf', 'ksone', 'kstwo']\n\n def setup_method(self):\n np.random.seed(1234)\n\n # skip these b/c deprecated, or only loc and scale arguments\n fitSkipNonFinite = ['expon', 'norm', 'uniform']\n\n @pytest.mark.parametrize('dist,args', distcont)\n def test_fit_w_non_finite_data_values(self, dist, args):\n \"\"\"gh-10300\"\"\"\n if dist in self.fitSkipNonFinite:\n pytest.skip(\"%s fit known to fail or deprecated\" % dist)\n x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])\n y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])\n distfunc = getattr(stats, dist)\n assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)\n assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)\n\n def test_fix_fit_2args_lognorm(self):\n # Regression test for #1551.\n np.random.seed(12345)\n with np.errstate(all='ignore'):\n x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)\n expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())\n assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),\n [expected_shape, 0, 20], atol=1e-8)\n\n def test_fix_fit_norm(self):\n x = np.arange(1, 6)\n\n loc, scale = stats.norm.fit(x)\n assert_almost_equal(loc, 3)\n assert_almost_equal(scale, np.sqrt(2))\n\n loc, scale = stats.norm.fit(x, floc=2)\n assert_equal(loc, 2)\n assert_equal(scale, np.sqrt(3))\n\n loc, scale = stats.norm.fit(x, fscale=2)\n assert_almost_equal(loc, 3)\n assert_equal(scale, 2)\n\n def test_fix_fit_gamma(self):\n x = np.arange(1, 6)\n meanlog = np.log(x).mean()\n\n # A basic test of gamma.fit with floc=0.\n floc = 0\n a, loc, scale = stats.gamma.fit(x, floc=floc)\n s = np.log(x.mean()) - meanlog\n assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # Regression tests for gh-2514.\n # The problem was that if `floc=0` was given, any other fixed\n # parameters were ignored.\n f0 = 1\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n f0 = 2\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # loc and scale fixed.\n floc = 0\n fscale = 2\n a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)\n assert_equal(loc, floc)\n assert_equal(scale, fscale)\n c = meanlog - np.log(fscale)\n assert_almost_equal(special.digamma(a), c)\n\n def test_fix_fit_beta(self):\n # Test beta.fit when both floc and fscale are given.\n\n def mlefunc(a, b, x):\n # Zeros of this function are critical points of\n # the maximum likelihood function.\n n = len(x)\n s1 = np.log(x).sum()\n s2 = np.log(1-x).sum()\n psiab = special.psi(a + b)\n func = [s1 - n * (-psiab + special.psi(a)),\n s2 - n * (-psiab + special.psi(b))]\n return func\n\n # Basic test with floc and fscale given.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)\n\n # Basic test with f0, floc and fscale given.\n # This is also a regression test for gh-2514.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)\n assert_equal(a, 2)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n da, db = mlefunc(a, b, x)\n assert_allclose(db, 0, atol=1e-5)\n\n # Same floc and fscale values as above, but reverse the data\n # and fix b (f1).\n x2 = 1 - x\n a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)\n assert_equal(b2, 2)\n assert_equal(loc2, 0)\n assert_equal(scale2, 1)\n da, db = mlefunc(a2, b2, x2)\n assert_allclose(da, 0, atol=1e-5)\n # a2 of this test should equal b from above.\n assert_almost_equal(a2, b)\n\n # Check for detection of data out of bounds when floc and fscale\n # are given.\n assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)\n y = np.array([0, .5, 1])\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)\n\n # Check that attempting to fix all the parameters raises a ValueError.\n assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,\n floc=2, fscale=3)\n\n def test_expon_fit(self):\n x = np.array([2, 2, 4, 4, 4, 4, 4, 8])\n\n loc, scale = stats.expon.fit(x)\n assert_equal(loc, 2) # x.min()\n assert_equal(scale, 2) # x.mean() - x.min()\n\n loc, scale = stats.expon.fit(x, fscale=3)\n assert_equal(loc, 2) # x.min()\n assert_equal(scale, 3) # fscale\n\n loc, scale = stats.expon.fit(x, floc=0)\n assert_equal(loc, 0) # floc\n assert_equal(scale, 4) # x.mean() - loc\n\n def test_lognorm_fit(self):\n x = np.array([1.5, 3, 10, 15, 23, 59])\n lnxm1 = np.log(x - 1)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1)\n assert_allclose(shape, lnxm1.std(), rtol=1e-12)\n assert_equal(loc, 1)\n assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)\n assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),\n rtol=1e-12)\n assert_equal(loc, 1)\n assert_equal(scale, 6)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)\n assert_equal(shape, 0.75)\n assert_equal(loc, 1)\n assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)\n\n def test_uniform_fit(self):\n x = np.array([1.0, 1.1, 1.2, 9.0])\n\n loc, scale = stats.uniform.fit(x)\n assert_equal(loc, x.min())\n assert_equal(scale, x.ptp())\n\n loc, scale = stats.uniform.fit(x, floc=0)\n assert_equal(loc, 0)\n assert_equal(scale, x.max())\n\n loc, scale = stats.uniform.fit(x, fscale=10)\n assert_equal(loc, 0)\n assert_equal(scale, 10)\n\n assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)\n assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)\n\n def test_fshapes(self):\n # take a beta distribution, with shapes='a, b', and make sure that\n # fa is equivalent to f0, and fb is equivalent to f1\n a, b = 3., 4.\n x = stats.beta.rvs(a, b, size=100, random_state=1234)\n res_1 = stats.beta.fit(x, f0=3.)\n res_2 = stats.beta.fit(x, fa=3.)\n assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)\n\n res_2 = stats.beta.fit(x, fix_a=3.)\n assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)\n\n res_3 = stats.beta.fit(x, f1=4.)\n res_4 = stats.beta.fit(x, fb=4.)\n assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)\n\n res_4 = stats.beta.fit(x, fix_b=4.)\n assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)\n\n # cannot specify both positional and named args at the same time\n assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)\n\n # check that attempting to fix all parameters raises a ValueError\n assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,\n floc=2, fscale=3)\n\n # check that specifying floc, fscale and fshapes works for\n # beta and gamma which override the generic fit method\n res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)\n aa, bb, ll, ss = res_5\n assert_equal([aa, ll, ss], [3., 0, 1])\n\n # gamma distribution\n a = 3.\n data = stats.gamma.rvs(a, size=100)\n aa, ll, ss = stats.gamma.fit(data, fa=a)\n assert_equal(aa, a)\n\n def test_extra_params(self):\n # unknown parameters should raise rather than be silently ignored\n dist = stats.exponnorm\n data = dist.rvs(K=2, size=100)\n dct = dict(enikibeniki=-101)\n assert_raises(TypeError, dist.fit, data, **dct)\n\n\nclass TestFrozen(object):\n def setup_method(self):\n np.random.seed(1234)\n\n # Test that a frozen distribution gives the same results as the original\n # object.\n #\n # Only tested for the normal distribution (with loc and scale specified)\n # and for the gamma distribution (with a shape parameter specified).\n def test_norm(self):\n dist = stats.norm\n frozen = stats.norm(loc=10.0, scale=3.0)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n assert_equal(frozen.a, dist.a)\n assert_equal(frozen.b, dist.b)\n\n def test_gamma(self):\n a = 2.0\n dist = stats.gamma\n frozen = stats.gamma(a)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(a)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(a)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(a)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(a)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(a)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2, a)\n assert_equal(result_f, result)\n\n assert_equal(frozen.a, frozen.dist.a)\n assert_equal(frozen.b, frozen.dist.b)\n\n def test_regression_ticket_1293(self):\n # Create a frozen distribution.\n frozen = stats.lognorm(1)\n # Call one of its methods that does not take any keyword arguments.\n m1 = frozen.moment(2)\n # Now call a method that takes a keyword argument.\n frozen.stats(moments='mvsk')\n # Call moment(2) again.\n # After calling stats(), the following was raising an exception.\n # So this test passes if the following does not raise an exception.\n m2 = frozen.moment(2)\n # The following should also be true, of course. But it is not\n # the focus of this test.\n assert_equal(m1, m2)\n\n def test_ab(self):\n # test that the support of a frozen distribution\n # (i) remains frozen even if it changes for the original one\n # (ii) is actually correct if the shape parameters are such that\n # the values of [a, b] are not the default [0, inf]\n # take a genpareto as an example where the support\n # depends on the value of the shape parameter:\n # for c > 0: a, b = 0, inf\n # for c < 0: a, b = 0, -1/c\n\n c = -0.1\n rv = stats.genpareto(c=c)\n a, b = rv.dist._get_support(c)\n assert_equal([a, b], [0., 10.])\n\n c = 0.1\n stats.genpareto.pdf(0, c=c)\n assert_equal(rv.dist._get_support(c), [0, np.inf])\n\n c = -0.1\n rv = stats.genpareto(c=c)\n a, b = rv.dist._get_support(c)\n assert_equal([a, b], [0., 10.])\n\n c = 0.1\n stats.genpareto.pdf(0, c) # this should NOT change genpareto.b\n assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))\n\n rv1 = stats.genpareto(c=0.1)\n assert_(rv1.dist is not rv.dist)\n\n # c >= 0: a, b = [0, inf]\n for c in [1., 0.]:\n c = np.asarray(c)\n rv = stats.genpareto(c=c)\n a, b = rv.a, rv.b\n assert_equal(a, 0.)\n assert_(np.isposinf(b))\n\n # c < 0: a=0, b=1/|c|\n c = np.asarray(-2.)\n a, b = stats.genpareto._get_support(c)\n assert_allclose([a, b], [0., 0.5])\n\n def test_rv_frozen_in_namespace(self):\n # Regression test for gh-3522\n assert_(hasattr(stats.distributions, 'rv_frozen'))\n\n def test_random_state(self):\n # only check that the random_state attribute exists,\n frozen = stats.norm()\n assert_(hasattr(frozen, 'random_state'))\n\n # ... that it can be set,\n frozen.random_state = 42\n assert_equal(frozen.random_state.get_state(),\n np.random.RandomState(42).get_state())\n\n # ... and that .rvs method accepts it as an argument\n rndm = np.random.RandomState(1234)\n frozen.rvs(size=8, random_state=rndm)\n\n def test_pickling(self):\n # test that a frozen instance pickles and unpickles\n # (this method is a clone of common_tests.check_pickling)\n beta = stats.beta(2.3098496451481823, 0.62687954300963677)\n poiss = stats.poisson(3.)\n sample = stats.rv_discrete(values=([0, 1, 2, 3],\n [0.1, 0.2, 0.3, 0.4]))\n\n for distfn in [beta, poiss, sample]:\n distfn.random_state = 1234\n distfn.rvs(size=8)\n s = pickle.dumps(distfn)\n r0 = distfn.rvs(size=8)\n\n unpickled = pickle.loads(s)\n r1 = unpickled.rvs(size=8)\n assert_equal(r0, r1)\n\n # also smoke test some methods\n medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]\n assert_equal(medians[0], medians[1])\n assert_equal(distfn.cdf(medians[0]),\n unpickled.cdf(medians[1]))\n\n def test_expect(self):\n # smoke test the expect method of the frozen distribution\n # only take a gamma w/loc and scale and poisson with loc specified\n def func(x):\n return x\n\n gm = stats.gamma(a=2, loc=3, scale=4)\n gm_val = gm.expect(func, lb=1, ub=2, conditional=True)\n gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,\n lb=1, ub=2, conditional=True)\n assert_allclose(gm_val, gamma_val)\n\n p = stats.poisson(3, loc=4)\n p_val = p.expect(func)\n poisson_val = stats.poisson.expect(func, args=(3,), loc=4)\n assert_allclose(p_val, poisson_val)\n\n\nclass TestExpect(object):\n # Test for expect method.\n #\n # Uses normal distribution and beta distribution for finite bounds, and\n # hypergeom for discrete distribution with finite support\n def test_norm(self):\n v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)\n assert_almost_equal(v, 4, decimal=14)\n\n m = stats.norm.expect(lambda x: (x), loc=5, scale=2)\n assert_almost_equal(m, 5, decimal=14)\n\n lb = stats.norm.ppf(0.05, loc=5, scale=2)\n ub = stats.norm.ppf(0.95, loc=5, scale=2)\n prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)\n assert_almost_equal(prob90, 0.9, decimal=14)\n\n prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,\n conditional=True)\n assert_almost_equal(prob90c, 1., decimal=14)\n\n def test_beta(self):\n # case with finite support interval\n v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),\n loc=5, scale=2)\n assert_almost_equal(v, 1./18., decimal=13)\n\n m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)\n assert_almost_equal(m, 19/3., decimal=13)\n\n ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)\n lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)\n prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,\n scale=2., lb=lb, ub=ub, conditional=False)\n assert_almost_equal(prob90, 0.9, decimal=13)\n\n prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,\n scale=2, lb=lb, ub=ub, conditional=True)\n assert_almost_equal(prob90c, 1., decimal=13)\n\n def test_hypergeom(self):\n # test case with finite bounds\n\n # without specifying bounds\n m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)\n m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)\n assert_almost_equal(m, m_true, decimal=13)\n\n v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),\n loc=5.)\n assert_almost_equal(v, v_true, decimal=14)\n\n # with bounds, bounds equal to shifted support\n v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,\n args=(20, 10, 8),\n loc=5., lb=5, ub=13)\n assert_almost_equal(v_bounds, v_true, decimal=14)\n\n # drop boundary points\n prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()\n prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n loc=5., lb=6, ub=12)\n assert_almost_equal(prob_bounds, prob_true, decimal=13)\n\n # conditional\n prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,\n lb=6, ub=12, conditional=True)\n assert_almost_equal(prob_bc, 1, decimal=14)\n\n # check simple integral\n prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n lb=0, ub=8)\n assert_almost_equal(prob_b, 1, decimal=13)\n\n def test_poisson(self):\n # poisson, use lower bound only\n prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,\n conditional=False)\n prob_b_true = 1-stats.poisson.cdf(2, 2)\n assert_almost_equal(prob_bounds, prob_b_true, decimal=14)\n\n prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,\n conditional=True)\n assert_almost_equal(prob_lb, 1, decimal=14)\n\n def test_genhalflogistic(self):\n # genhalflogistic, changes upper bound of support in _argcheck\n # regression test for gh-2622\n halflog = stats.genhalflogistic\n # check consistency when calling expect twice with the same input\n res1 = halflog.expect(args=(1.5,))\n halflog.expect(args=(0.5,))\n res2 = halflog.expect(args=(1.5,))\n assert_almost_equal(res1, res2, decimal=14)\n\n def test_rice_overflow(self):\n # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows\n # check that using i0e fixes it\n assert_(np.isfinite(stats.rice.pdf(999, 0.74)))\n\n assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))\n assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))\n assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))\n\n def test_logser(self):\n # test a discrete distribution with infinite support and loc\n p, loc = 0.3, 3\n res_0 = stats.logser.expect(lambda k: k, args=(p,))\n # check against the correct answer (sum of a geom series)\n assert_allclose(res_0,\n p / (p - 1.) / np.log(1. - p), atol=1e-15)\n\n # now check it with `loc`\n res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)\n assert_allclose(res_l, res_0 + loc, atol=1e-15)\n\n def test_skellam(self):\n # Use a discrete distribution w/ bi-infinite support. Compute two first\n # moments and compare to known values (cf skellam.stats)\n p1, p2 = 18, 22\n m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))\n m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))\n assert_allclose(m1, p1 - p2, atol=1e-12)\n assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)\n\n def test_randint(self):\n # Use a discrete distribution w/ parameter-dependent support, which\n # is larger than the default chunksize\n lo, hi = 0, 113\n res = stats.randint.expect(lambda x: x, (lo, hi))\n assert_allclose(res,\n sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)\n\n def test_zipf(self):\n # Test that there is no infinite loop even if the sum diverges\n assert_warns(RuntimeWarning, stats.zipf.expect,\n lambda x: x**2, (2,))\n\n def test_discrete_kwds(self):\n # check that discrete expect accepts keywords to control the summation\n n0 = stats.poisson.expect(lambda x: 1, args=(2,))\n n1 = stats.poisson.expect(lambda x: 1, args=(2,),\n maxcount=1001, chunksize=32, tolerance=1e-8)\n assert_almost_equal(n0, n1, decimal=14)\n\n def test_moment(self):\n # test the .moment() method: compute a higher moment and compare to\n # a known value\n def poiss_moment5(mu):\n return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu\n\n for mu in [5, 7]:\n m5 = stats.poisson.moment(5, mu)\n assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)\n\n\nclass TestNct(object):\n def test_nc_parameter(self):\n # Parameter values c<=0 were not enabled (gh-2402).\n # For negative values c and for c=0 results of rv.cdf(0) below were nan\n rv = stats.nct(5, 0)\n assert_equal(rv.cdf(0), 0.5)\n rv = stats.nct(5, -1)\n assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)\n\n def test_broadcasting(self):\n res = stats.nct.pdf(5, np.arange(4, 7)[:, None],\n np.linspace(0.1, 1, 4))\n expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],\n [0.00217142, 0.00395366, 0.00683888, 0.01126276],\n [0.00153078, 0.00291093, 0.00525206, 0.00900815]])\n assert_allclose(res, expected, rtol=1e-5)\n\n def test_variance_gh_issue_2401(self):\n # Computation of the variance of a non-central t-distribution resulted\n # in a TypeError: ufunc 'isinf' not supported for the input types,\n # and the inputs could not be safely coerced to any supported types\n # according to the casting rule 'safe'\n rv = stats.nct(4, 0)\n assert_equal(rv.var(), 2.0)\n\n def test_nct_inf_moments(self):\n # n-th moment of nct only exists for df > n\n m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')\n assert_(np.isfinite(m))\n assert_equal([v, s, k], [np.inf, np.nan, np.nan])\n\n m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')\n assert_(np.isfinite([m, v, s]).all())\n assert_equal(k, np.nan)\n\n\nclass TestRice(object):\n def test_rice_zero_b(self):\n # rice distribution should work with b=0, cf gh-2164\n x = [0.2, 1., 5.]\n assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())\n\n q = [0.1, 0.1, 0.5, 0.9]\n assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())\n\n mvsk = stats.rice.stats(0, moments='mvsk')\n assert_(np.isfinite(mvsk).all())\n\n # furthermore, pdf is continuous as b\\to 0\n # rice.pdf(x, b\\to 0) = x exp(-x^2/2) + O(b^2)\n # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10\n b = 1e-8\n assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),\n atol=b, rtol=0)\n\n def test_rice_rvs(self):\n rvs = stats.rice.rvs\n assert_equal(rvs(b=3.).size, 1)\n assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))\n\n\nclass TestErlang(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_erlang_runtimewarning(self):\n # erlang should generate a RuntimeWarning if a non-integer\n # shape parameter is used.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n # The non-integer shape parameter 1.3 should trigger a\n # RuntimeWarning\n assert_raises(RuntimeWarning,\n stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)\n\n # Calling the fit method with `f0` set to an integer should\n # *not* trigger a RuntimeWarning. It should return the same\n # values as gamma.fit(...).\n data = [0.5, 1.0, 2.0, 4.0]\n result_erlang = stats.erlang.fit(data, f0=1)\n result_gamma = stats.gamma.fit(data, f0=1)\n assert_allclose(result_erlang, result_gamma, rtol=1e-3)\n\n def test_gh_pr_10949_argcheck(self):\n assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),\n stats.gamma.pdf(0.5, a=[1, -1]))\n\n\nclass TestRayleigh(object):\n # gh-6227\n def test_logpdf(self):\n y = stats.rayleigh.logpdf(50)\n assert_allclose(y, -1246.0879769945718)\n\n def test_logsf(self):\n y = stats.rayleigh.logsf(50)\n assert_allclose(y, -1250)\n\n\nclass TestExponWeib(object):\n\n def test_pdf_logpdf(self):\n # Regression test for gh-3508.\n x = 0.1\n a = 1.0\n c = 100.0\n p = stats.exponweib.pdf(x, a, c)\n logp = stats.exponweib.logpdf(x, a, c)\n # Expected values were computed with mpmath.\n assert_allclose([p, logp],\n [1.0000000000000054e-97, -223.35075402042244])\n\n def test_a_is_1(self):\n # For issue gh-3508.\n # Check that when a=1, the pdf and logpdf methods of exponweib are the\n # same as those of weibull_min.\n x = np.logspace(-4, -1, 4)\n a = 1\n c = 100\n\n p = stats.exponweib.pdf(x, a, c)\n expected = stats.weibull_min.pdf(x, c)\n assert_allclose(p, expected)\n\n logp = stats.exponweib.logpdf(x, a, c)\n expected = stats.weibull_min.logpdf(x, c)\n assert_allclose(logp, expected)\n\n def test_a_is_1_c_is_1(self):\n # When a = 1 and c = 1, the distribution is exponential.\n x = np.logspace(-8, 1, 10)\n a = 1\n c = 1\n\n p = stats.exponweib.pdf(x, a, c)\n expected = stats.expon.pdf(x)\n assert_allclose(p, expected)\n\n logp = stats.exponweib.logpdf(x, a, c)\n expected = stats.expon.logpdf(x)\n assert_allclose(logp, expected)\n\n\nclass TestWeibull(object):\n\n def test_logpdf(self):\n # gh-6217\n y = stats.weibull_min.logpdf(0, 1)\n assert_equal(y, 0)\n\n def test_with_maxima_distrib(self):\n # Tests for weibull_min and weibull_max.\n # The expected values were computed using the symbolic algebra\n # program 'maxima' with the package 'distrib', which has\n # 'pdf_weibull' and 'cdf_weibull'. The mapping between the\n # scipy and maxima functions is as follows:\n # -----------------------------------------------------------------\n # scipy maxima\n # --------------------------------- ------------------------------\n # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)\n # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))\n # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)\n # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))\n # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)\n # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))\n #\n # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)\n # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))\n # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)\n # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))\n # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)\n # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))\n # -----------------------------------------------------------------\n x = 1.5\n a = 2.0\n b = 3.0\n\n # weibull_min\n\n p = stats.weibull_min.pdf(x, a, scale=b)\n assert_allclose(p, np.exp(-0.25)/3)\n\n lp = stats.weibull_min.logpdf(x, a, scale=b)\n assert_allclose(lp, -0.25 - np.log(3))\n\n c = stats.weibull_min.cdf(x, a, scale=b)\n assert_allclose(c, -special.expm1(-0.25))\n\n lc = stats.weibull_min.logcdf(x, a, scale=b)\n assert_allclose(lc, np.log(-special.expm1(-0.25)))\n\n s = stats.weibull_min.sf(x, a, scale=b)\n assert_allclose(s, np.exp(-0.25))\n\n ls = stats.weibull_min.logsf(x, a, scale=b)\n assert_allclose(ls, -0.25)\n\n # Also test using a large value x, for which computing the survival\n # function using the CDF would result in 0.\n s = stats.weibull_min.sf(30, 2, scale=3)\n assert_allclose(s, np.exp(-100))\n\n ls = stats.weibull_min.logsf(30, 2, scale=3)\n assert_allclose(ls, -100)\n\n # weibull_max\n x = -1.5\n\n p = stats.weibull_max.pdf(x, a, scale=b)\n assert_allclose(p, np.exp(-0.25)/3)\n\n lp = stats.weibull_max.logpdf(x, a, scale=b)\n assert_allclose(lp, -0.25 - np.log(3))\n\n c = stats.weibull_max.cdf(x, a, scale=b)\n assert_allclose(c, np.exp(-0.25))\n\n lc = stats.weibull_max.logcdf(x, a, scale=b)\n assert_allclose(lc, -0.25)\n\n s = stats.weibull_max.sf(x, a, scale=b)\n assert_allclose(s, -special.expm1(-0.25))\n\n ls = stats.weibull_max.logsf(x, a, scale=b)\n assert_allclose(ls, np.log(-special.expm1(-0.25)))\n\n # Also test using a value of x close to 0, for which computing the\n # survival function using the CDF would result in 0.\n s = stats.weibull_max.sf(-1e-9, 2, scale=3)\n assert_allclose(s, -special.expm1(-1/9000000000000000000))\n\n ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)\n assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))\n\n\nclass TestRdist(object):\n def test_rdist_cdf_gh1285(self):\n # check workaround in rdist._cdf for issue gh-1285.\n distfn = stats.rdist\n values = [0.001, 0.5, 0.999]\n assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),\n values, decimal=5)\n\n def test_rdist_beta(self):\n # rdist is a special case of stats.beta\n x = np.linspace(-0.99, 0.99, 10)\n c = 2.7\n assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),\n stats.rdist(c).pdf(x))\n\n\nclass TestTrapz(object):\n def test_reduces_to_triang(self):\n modes = [0, 0.3, 0.5, 1]\n for mode in modes:\n x = [0, mode, 1]\n assert_almost_equal(stats.trapz.pdf(x, mode, mode),\n stats.triang.pdf(x, mode))\n assert_almost_equal(stats.trapz.cdf(x, mode, mode),\n stats.triang.cdf(x, mode))\n\n def test_reduces_to_uniform(self):\n x = np.linspace(0, 1, 10)\n assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))\n assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))\n\n def test_cases(self):\n # edge cases\n assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)\n assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)\n assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),\n 1.11111111111111111)\n assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),\n 1.11111111111111111)\n\n # straightforward case\n assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)\n assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)\n assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)\n\n assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)\n assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)\n assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)\n assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)\n assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)\n\n def test_moments_and_entropy(self):\n # issue #11795: improve precision of trapz stats\n # Apply formulas from Wikipedia for the following parameters:\n a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6\n p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a\n h = 2 / (d+c-b-a)\n\n def moment(n):\n return (h * ((d**(n+2) - c**(n+2)) / (d-c)\n - (b**(n+2) - a**(n+2)) / (b-a)) /\n (n+1) / (n+2))\n\n mean = moment(1)\n var = moment(2) - mean**2\n entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))\n assert_almost_equal(stats.trapz.mean(p1, p2, loc, scale),\n mean, decimal=13)\n assert_almost_equal(stats.trapz.var(p1, p2, loc, scale),\n var, decimal=13)\n assert_almost_equal(stats.trapz.entropy(p1, p2, loc, scale),\n entropy, decimal=13)\n\n # Check boundary cases where scipy d=0 or d=1.\n assert_almost_equal(stats.trapz.mean(0, 0, -3, 6), -1, decimal=13)\n assert_almost_equal(stats.trapz.mean(0, 1, -3, 6), 0, decimal=13)\n assert_almost_equal(stats.trapz.var(0, 1, -3, 6), 3, decimal=13)\n\n def test_trapz_vect(self):\n # test that array-valued shapes and arguments are handled\n c = np.array([0.1, 0.2, 0.3])\n d = np.array([0.5, 0.6])[:, None]\n x = np.array([0.15, 0.25, 0.9])\n v = stats.trapz.pdf(x, c, d)\n\n cc, dd, xx = np.broadcast_arrays(c, d, x)\n\n res = np.empty(xx.size, dtype=xx.dtype)\n ind = np.arange(xx.size)\n for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):\n res[i] = stats.trapz.pdf(x1, c1, d1)\n\n assert_allclose(v, res.reshape(v.shape), atol=1e-15)\n\n # Check that the stats() method supports vector arguments.\n v = np.asarray(stats.trapz.stats(c, d, moments=\"mvsk\"))\n cc, dd = np.broadcast_arrays(c, d)\n res = np.empty((cc.size, 4)) # 4 stats returned per value\n ind = np.arange(cc.size)\n for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):\n res[i] = stats.trapz.stats(c1, d1, moments=\"mvsk\")\n\n assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)\n\n\nclass TestTriang(object):\n def test_edge_cases(self):\n with np.errstate(all='raise'):\n assert_equal(stats.triang.pdf(0, 0), 2.)\n assert_equal(stats.triang.pdf(0.5, 0), 1.)\n assert_equal(stats.triang.pdf(1, 0), 0.)\n\n assert_equal(stats.triang.pdf(0, 1), 0)\n assert_equal(stats.triang.pdf(0.5, 1), 1.)\n assert_equal(stats.triang.pdf(1, 1), 2)\n\n assert_equal(stats.triang.cdf(0., 0.), 0.)\n assert_equal(stats.triang.cdf(0.5, 0.), 0.75)\n assert_equal(stats.triang.cdf(1.0, 0.), 1.0)\n\n assert_equal(stats.triang.cdf(0., 1.), 0.)\n assert_equal(stats.triang.cdf(0.5, 1.), 0.25)\n assert_equal(stats.triang.cdf(1., 1.), 1)\n\n\nclass TestMielke(object):\n def test_moments(self):\n k, s = 4.642, 0.597\n # n-th moment exists only if n < s\n assert_equal(stats.mielke(k, s).moment(1), np.inf)\n assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)\n assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))\n\n def test_burr_equivalence(self):\n x = np.linspace(0.01, 100, 50)\n k, s = 2.45, 5.32\n assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))\n\n\nclass TestBurr(object):\n def test_endpoints_7491(self):\n # gh-7491\n # Compute the pdf at the left endpoint dst.a.\n data = [\n [stats.fisk, (1,), 1],\n [stats.burr, (0.5, 2), 1],\n [stats.burr, (1, 1), 1],\n [stats.burr, (2, 0.5), 1],\n [stats.burr12, (1, 0.5), 0.5],\n [stats.burr12, (1, 1), 1.0],\n [stats.burr12, (1, 2), 2.0]]\n\n ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]\n correct = [_correct_ for _f, _args, _correct_ in data]\n assert_array_almost_equal(ans, correct)\n\n ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]\n correct = [np.log(_correct_) for _f, _args, _correct_ in data]\n assert_array_almost_equal(ans, correct)\n\n def test_burr_stats_9544(self):\n # gh-9544. Test from gh-9978\n c, d = 5.0, 3\n mean, variance = stats.burr(c, d).stats()\n # mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...\n # var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -\n # (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2\n mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643\n assert_allclose(mean, mean_hc)\n assert_allclose(variance, variance_hc)\n\n def test_burr_nan_mean_var_9544(self):\n # gh-9544. Test from gh-9978\n c, d = 0.5, 3\n mean, variance = stats.burr(c, d).stats()\n assert_(np.isnan(mean))\n assert_(np.isnan(variance))\n c, d = 1.5, 3\n mean, variance = stats.burr(c, d).stats()\n assert_(np.isfinite(mean))\n assert_(np.isnan(variance))\n\n c, d = 0.5, 3\n e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)\n assert_(np.isnan(e1))\n assert_(np.isnan(e2))\n assert_(np.isnan(e3))\n assert_(np.isnan(e4))\n c, d = 1.5, 3\n e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)\n assert_(np.isfinite(e1))\n assert_(np.isnan(e2))\n assert_(np.isnan(e3))\n assert_(np.isnan(e4))\n c, d = 2.5, 3\n e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)\n assert_(np.isfinite(e1))\n assert_(np.isfinite(e2))\n assert_(np.isnan(e3))\n assert_(np.isnan(e4))\n c, d = 3.5, 3\n e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)\n assert_(np.isfinite(e1))\n assert_(np.isfinite(e2))\n assert_(np.isfinite(e3))\n assert_(np.isnan(e4))\n c, d = 4.5, 3\n e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)\n assert_(np.isfinite(e1))\n assert_(np.isfinite(e2))\n assert_(np.isfinite(e3))\n assert_(np.isfinite(e4))\n\n\ndef test_540_567():\n # test for nan returned in tickets 540, 567\n assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,\n decimal=10, err_msg='test_540_567')\n assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,\n decimal=10, err_msg='test_540_567')\n assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,\n scale=0.204423758009),\n 0.98353464004309321,\n decimal=10, err_msg='test_540_567')\n\n\ndef test_regression_ticket_1316():\n # The following was raising an exception, because _construct_default_doc()\n # did not handle the default keyword extradoc=None. See ticket #1316.\n stats._continuous_distns.gamma_gen(name='gamma')\n\n\ndef test_regression_ticket_1326():\n # adjust to avoid nan with 0*log(0)\n assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)\n\n\ndef test_regression_tukey_lambda():\n # Make sure that Tukey-Lambda distribution correctly handles\n # non-positive lambdas.\n x = np.linspace(-5.0, 5.0, 101)\n\n with np.errstate(divide='ignore'):\n for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:\n p = stats.tukeylambda.pdf(x, lam)\n assert_((p != 0.0).all())\n assert_(~np.isnan(p).all())\n\n lam = np.array([[-1.0], [0.0], [2.0]])\n p = stats.tukeylambda.pdf(x, lam)\n\n assert_(~np.isnan(p).all())\n assert_((p[0] != 0.0).all())\n assert_((p[1] != 0.0).all())\n assert_((p[2] != 0.0).any())\n assert_((p[2] == 0.0).any())\n\n\n@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstrings stripped\")\ndef test_regression_ticket_1421():\n assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)\n assert_('pmf(x,' in stats.poisson.__doc__)\n\n\ndef test_nan_arguments_gh_issue_1362():\n with np.errstate(invalid='ignore'):\n assert_(np.isnan(stats.t.logcdf(1, np.nan)))\n assert_(np.isnan(stats.t.cdf(1, np.nan)))\n assert_(np.isnan(stats.t.logsf(1, np.nan)))\n assert_(np.isnan(stats.t.sf(1, np.nan)))\n assert_(np.isnan(stats.t.pdf(1, np.nan)))\n assert_(np.isnan(stats.t.logpdf(1, np.nan)))\n assert_(np.isnan(stats.t.ppf(1, np.nan)))\n assert_(np.isnan(stats.t.isf(1, np.nan)))\n\n assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))\n\n\ndef test_frozen_fit_ticket_1536():\n np.random.seed(5678)\n true = np.array([0.25, 0., 0.5])\n x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)\n\n with np.errstate(divide='ignore'):\n params = np.array(stats.lognorm.fit(x, floc=0.))\n\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))\n assert_almost_equal(params, true, decimal=2)\n\n np.random.seed(5678)\n loc = 1\n floc = 0.9\n x = stats.norm.rvs(loc, 2., size=100)\n params = np.array(stats.norm.fit(x, floc=floc))\n expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])\n assert_almost_equal(params, expected, decimal=4)\n\n\ndef test_regression_ticket_1530():\n # Check the starting value works for Cauchy distribution fit.\n np.random.seed(654321)\n rvs = stats.cauchy.rvs(size=100)\n params = stats.cauchy.fit(rvs)\n expected = (0.045, 1.142)\n assert_almost_equal(params, expected, decimal=1)\n\n\ndef test_gh_pr_4806():\n # Check starting values for Cauchy distribution fit.\n np.random.seed(1234)\n x = np.random.randn(42)\n for offset in 10000.0, 1222333444.0:\n loc, scale = stats.cauchy.fit(x + offset)\n assert_allclose(loc, offset, atol=1.0)\n assert_allclose(scale, 0.6, atol=1.0)\n\n\ndef test_tukeylambda_stats_ticket_1545():\n # Some test for the variance and kurtosis of the Tukey Lambda distr.\n # See test_tukeylamdba_stats.py for more tests.\n\n mv = stats.tukeylambda.stats(0, moments='mvsk')\n # Known exact values:\n expected = [0, np.pi**2/3, 0, 1.2]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(3.13, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(0.14, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]\n assert_almost_equal(mv, expected, decimal=10)\n\n\ndef test_poisson_logpmf_ticket_1436():\n assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))\n\n\ndef test_powerlaw_stats():\n \"\"\"Test the powerlaw stats function.\n\n This unit test is also a regression test for ticket 1548.\n\n The exact values are:\n mean:\n mu = a / (a + 1)\n variance:\n sigma**2 = a / ((a + 2) * (a + 1) ** 2)\n skewness:\n One formula (see https://en.wikipedia.org/wiki/Skewness) is\n gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3\n A short calculation shows that E[X**k] is a / (a + k), so gamma_1\n can be implemented as\n n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3\n d = sqrt(a/((a+2)*(a+1)**2)) ** 3\n gamma_1 = n/d\n Either by simplifying, or by a direct calculation of mu_3 / sigma**3,\n one gets the more concise formula:\n gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)\n kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)\n The excess kurtosis is\n gamma_2 = mu_4 / sigma**4 - 3\n A bit of calculus and algebra (sympy helps) shows that\n mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))\n so\n gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3\n which can be rearranged to\n gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))\n \"\"\"\n cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),\n (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]\n for a, exact_mvsk in cases:\n mvsk = stats.powerlaw.stats(a, moments=\"mvsk\")\n assert_array_almost_equal(mvsk, exact_mvsk)\n\n\ndef test_powerlaw_edge():\n # Regression test for gh-3986.\n p = stats.powerlaw.logpdf(0, 1)\n assert_equal(p, 0.0)\n\n\ndef test_exponpow_edge():\n # Regression test for gh-3982.\n p = stats.exponpow.logpdf(0, 1)\n assert_equal(p, 0.0)\n\n # Check pdf and logpdf at x = 0 for other values of b.\n p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])\n assert_equal(p, [np.inf, 1.0, 0.0])\n p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])\n assert_equal(p, [np.inf, 0.0, -np.inf])\n\n\ndef test_gengamma_edge():\n # Regression test for gh-3985.\n p = stats.gengamma.pdf(0, 1, 1)\n assert_equal(p, 1.0)\n\n # Regression tests for gh-4724.\n p = stats.gengamma._munp(-2, 200, 1.)\n assert_almost_equal(p, 1./199/198)\n\n p = stats.gengamma._munp(-2, 10, 1.)\n assert_almost_equal(p, 1./9/8)\n\n\ndef test_ksone_fit_freeze():\n # Regression test for ticket #1638.\n d = np.array(\n [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,\n -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,\n 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,\n 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,\n 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,\n 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,\n -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,\n -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,\n -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,\n -0.06037974, 0.37670779, -0.21684405])\n\n with np.errstate(invalid='ignore'):\n with suppress_warnings() as sup:\n sup.filter(IntegrationWarning,\n \"The maximum number of subdivisions .50. has been \"\n \"achieved.\")\n sup.filter(RuntimeWarning,\n \"floating point number truncated to an integer\")\n stats.ksone.fit(d)\n\n\ndef test_norm_logcdf():\n # Test precision of the logcdf of the normal distribution.\n # This precision was enhanced in ticket 1614.\n x = -np.asarray(list(range(0, 120, 4)))\n # Values from R\n expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,\n -131.69539607, -203.91715537, -292.09872100, -396.25241451,\n -516.38564863, -652.50322759, -804.60844201, -972.70364403,\n -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,\n -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,\n -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,\n -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,\n -6277.63751711, -6733.67260303]\n\n assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)\n\n # also test the complex-valued code path\n assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)\n\n # test the accuracy: d(logcdf)/dx = pdf / cdf \\equiv exp(logpdf - logcdf)\n deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag\n deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))\n assert_allclose(deriv, deriv_expected, atol=1e-10)\n\n\ndef test_levy_cdf_ppf():\n # Test levy.cdf, including small arguments.\n x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])\n\n # Expected values were calculated separately with mpmath.\n # E.g.\n # >>> mpmath.mp.dps = 100\n # >>> x = mpmath.mp.mpf('0.01')\n # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))\n expected = np.array([0.9747728793699604,\n 0.3173105078629141,\n 0.1572992070502851,\n 0.0015654022580025495,\n 1.523970604832105e-23,\n 1.795832784800726e-219])\n\n y = stats.levy.cdf(x)\n assert_allclose(y, expected, rtol=1e-10)\n\n # ppf(expected) should get us back to x.\n xx = stats.levy.ppf(expected)\n assert_allclose(xx, x, rtol=1e-13)\n\n\ndef test_levy_sf():\n # Large values, far into the tail of the distribution.\n x = np.array([1e15, 1e25, 1e35, 1e50])\n # Expected values were calculated with mpmath.\n expected = np.array([2.5231325220201597e-08,\n 2.52313252202016e-13,\n 2.52313252202016e-18,\n 7.978845608028653e-26])\n y = stats.levy.sf(x)\n assert_allclose(y, expected, rtol=1e-14)\n\n\ndef test_levy_l_sf():\n # Test levy_l.sf for small arguments.\n x = np.array([-0.016, -0.01, -0.005, -0.0015])\n # Expected values were calculated with mpmath.\n expected = np.array([2.6644463892359302e-15,\n 1.523970604832107e-23,\n 2.0884875837625492e-45,\n 5.302850374626878e-147])\n y = stats.levy_l.sf(x)\n assert_allclose(y, expected, rtol=1e-13)\n\n\ndef test_levy_l_isf():\n # Test roundtrip sf(isf(p)), including a small input value.\n p = np.array([3.0e-15, 0.25, 0.99])\n x = stats.levy_l.isf(p)\n q = stats.levy_l.sf(x)\n assert_allclose(q, p, rtol=5e-14)\n\n\ndef test_hypergeom_interval_1802():\n # these two had endless loops\n assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),\n (152.0, 197.0))\n assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),\n (152.0, 197.0))\n # this was working also before\n assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),\n (153.0, 196.0))\n\n # degenerate case .a == .b\n assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)\n assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)\n\n\ndef test_distribution_too_many_args():\n np.random.seed(1234)\n\n # Check that a TypeError is raised when too many args are given to a method\n # Regression test for ticket 1815.\n x = np.linspace(0.1, 0.7, num=5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)\n\n # These should not give errors\n stats.gamma.pdf(x, 2, 3) # loc=3\n stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4\n stats.gamma.stats(2., 3)\n stats.gamma.stats(2., 3, 4)\n stats.gamma.stats(2., 3, 4, 'mv')\n stats.gamma.rvs(2., 3, 4, 5)\n stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)\n\n # Also for a discrete distribution\n stats.geom.pmf(x, 2, loc=3) # no error, loc=3\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)\n\n # And for distributions with 0, 2 and 3 args respectively\n assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)\n stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale\n\n\ndef test_ncx2_tails_ticket_955():\n # Trac #955 -- check that the cdf computed by special functions\n # matches the integrated pdf\n a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n assert_allclose(a, b, rtol=1e-3, atol=0)\n\n\ndef test_ncx2_tails_pdf():\n # ncx2.pdf does not return nans in extreme tails(example from gh-1577)\n # NB: this is to check that nan_to_num is not needed in ncx2.pdf\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)\n logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)\n\n assert_(np.isneginf(logval).all())\n\n # Verify logpdf has extended precision when pdf underflows to 0\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)\n assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)\n\n\n@pytest.mark.parametrize('method, expected', [\n ('cdf', np.array([2.497951336e-09, 3.437288941e-10])),\n ('pdf', np.array([1.238579980e-07, 1.710041145e-08])),\n ('logpdf', np.array([-15.90413011, -17.88416331])),\n ('ppf', np.array([4.865182052, 7.017182271]))\n])\ndef test_ncx2_zero_nc(method, expected):\n # gh-5441\n # ncx2 with nc=0 is identical to chi2\n # Comparison to R (v3.5.1)\n # > options(digits=10)\n # > pchisq(0.1, df=10, ncp=c(0,4))\n # > dchisq(0.1, df=10, ncp=c(0,4))\n # > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)\n # > qchisq(0.1, df=10, ncp=c(0,4))\n\n result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)\n assert_allclose(result, expected, atol=1e-15)\n\n\ndef test_ncx2_zero_nc_rvs():\n # gh-5441\n # ncx2 with nc=0 is identical to chi2\n result = stats.ncx2.rvs(df=10, nc=0, random_state=1)\n expected = stats.chi2.rvs(df=10, random_state=1)\n assert_allclose(result, expected, atol=1e-15)\n\n\ndef test_foldnorm_zero():\n # Parameter value c=0 was not enabled, see gh-2399.\n rv = stats.foldnorm(0, scale=1)\n assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan\n\n\ndef test_stats_shapes_argcheck():\n # stats method was failing for vector shapes if some of the values\n # were outside of the allowed range, see gh-2678\n mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`\n mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)\n mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)\n assert_equal(mv2_augmented, mv3)\n\n # -1 is not a legal shape parameter\n mv3 = stats.lognorm.stats([2, 2.4, -1])\n mv2 = stats.lognorm.stats([2, 2.4])\n mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)\n assert_equal(mv2_augmented, mv3)\n\n # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.\n # stats method with multiple shape parameters is not properly vectorized\n # anyway, so some distributions may or may not fail.\n\n\n# Test subclassing distributions w/ explicit shapes\n\nclass _distr_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return 42\n\n\nclass _distr2_gen(stats.rv_continuous):\n def _cdf(self, x, a):\n return 42 * a + x\n\n\nclass _distr3_gen(stats.rv_continuous):\n def _pdf(self, x, a, b):\n return a + b\n\n def _cdf(self, x, a):\n # Different # of shape params from _pdf, to be able to check that\n # inspection catches the inconsistency.\"\"\"\n return 42 * a + x\n\n\nclass _distr6_gen(stats.rv_continuous):\n # Two shape parameters (both _pdf and _cdf defined, consistent shapes.)\n def _pdf(self, x, a, b):\n return a*x + b\n\n def _cdf(self, x, a, b):\n return 42 * a + x\n\n\nclass TestSubclassingExplicitShapes(object):\n # Construct a distribution w/ explicit shapes parameter and test it.\n\n def test_correct_shapes(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_wrong_shapes_1(self):\n dummy_distr = _distr_gen(name='dummy', shapes='A')\n assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))\n\n def test_wrong_shapes_2(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')\n dct = dict(a=1, b=2, c=3)\n assert_raises(TypeError, dummy_distr.pdf, 1, **dct)\n\n def test_shapes_string(self):\n # shapes must be a string\n dct = dict(name='dummy', shapes=42)\n assert_raises(TypeError, _distr_gen, **dct)\n\n def test_shapes_identifiers_1(self):\n # shapes must be a comma-separated list of valid python identifiers\n dct = dict(name='dummy', shapes='(!)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_2(self):\n dct = dict(name='dummy', shapes='4chan')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_3(self):\n dct = dict(name='dummy', shapes='m(fti)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_nodefaults(self):\n dct = dict(name='dummy', shapes='a=2')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_args(self):\n dct = dict(name='dummy', shapes='*args')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_kwargs(self):\n dct = dict(name='dummy', shapes='**kwargs')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_keywords(self):\n # python keywords cannot be used for shape parameters\n dct = dict(name='dummy', shapes='a, b, c, lambda')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_signature(self):\n # test explicit shapes which agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a')\n assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)\n\n def test_shapes_signature_inconsistent(self):\n # test explicit shapes which do not agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a, b')\n assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))\n\n def test_star_args(self):\n # test _pdf with only starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _dist_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)\n assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)\n assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))\n\n def test_star_args_2(self):\n # test _pdf with named & starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, offset, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg + offset\n\n dist = _dist_gen(shapes='offset, extra_kwarg')\n assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),\n stats.norm.pdf(0.5)*33 + 111)\n assert_equal(dist.pdf(0.5, 111, 33),\n stats.norm.pdf(0.5)*33 + 111)\n\n def test_extra_kwarg(self):\n # **kwargs to _pdf are ignored.\n # this is a limitation of the framework (_pdf(x, *goodargs))\n class _distr_gen(stats.rv_continuous):\n def _pdf(self, x, *args, **kwargs):\n # _pdf should handle *args, **kwargs itself. Here \"handling\"\n # is ignoring *args and looking for ``extra_kwarg`` and using\n # that.\n extra_kwarg = kwargs.pop('extra_kwarg', 1)\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _distr_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))\n\n def shapes_empty_string(self):\n # shapes='' is equivalent to shapes=None\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x):\n return stats.norm.pdf(x)\n\n dist = _dist_gen(shapes='')\n assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))\n\n\nclass TestSubclassingNoShapes(object):\n # Construct a distribution w/o explicit shapes parameter and test it.\n\n def test_only__pdf(self):\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_only__cdf(self):\n # _pdf is determined from _cdf by taking numerical derivative\n dummy_distr = _distr2_gen(name='dummy')\n assert_almost_equal(dummy_distr.pdf(1, a=1), 1)\n\n @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\n def test_signature_inspection(self):\n # check that _pdf signature inspection works correctly, and is used in\n # the class docstring\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 1)\n assert_equal(dummy_distr.shapes, 'a')\n res = re.findall(r'logpdf\\(x, a, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\n def test_signature_inspection_2args(self):\n # same for 2 shape params and both _pdf and _cdf defined\n dummy_distr = _distr6_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 2)\n assert_equal(dummy_distr.shapes, 'a, b')\n res = re.findall(r'logpdf\\(x, a, b, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n def test_signature_inspection_2args_incorrect_shapes(self):\n # both _pdf and _cdf defined, but shapes are inconsistent: raises\n assert_raises(TypeError, _distr3_gen, name='dummy')\n\n def test_defaults_raise(self):\n # default arguments should raise\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a=42):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_starargs_raise(self):\n # without explicit shapes, *args are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, *args):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_kwargs_raise(self):\n # without explicit shapes, **kwargs are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, **kwargs):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n\n@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\ndef test_docstrings():\n badones = [r',\\s*,', r'\\(\\s*,', r'^\\s*:']\n for distname in stats.__all__:\n dist = getattr(stats, distname)\n if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):\n for regex in badones:\n assert_(re.search(regex, dist.__doc__) is None)\n\n\ndef test_infinite_input():\n assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)\n assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)\n\n\ndef test_lomax_accuracy():\n # regression test for gh-4033\n p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_gompertz_accuracy():\n # Regression test for gh-4031\n p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_truncexpon_accuracy():\n # regression test for gh-4035\n p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_rayleigh_accuracy():\n # regression test for gh-4034\n p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)\n assert_almost_equal(p, 9.0, decimal=15)\n\n\ndef test_genextreme_give_no_warnings():\n \"\"\"regression test for gh-6219\"\"\"\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n stats.genextreme.cdf(.5, 0)\n stats.genextreme.pdf(.5, 0)\n stats.genextreme.ppf(.5, 0)\n stats.genextreme.logpdf(-np.inf, 0.0)\n number_of_warnings_thrown = len(w)\n assert_equal(number_of_warnings_thrown, 0)\n\n\ndef test_genextreme_entropy():\n # regression test for gh-5181\n euler_gamma = 0.5772156649015329\n\n h = stats.genextreme.entropy(-1.0)\n assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(0)\n assert_allclose(h, euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(1.0)\n assert_equal(h, 1)\n\n h = stats.genextreme.entropy(-2.0, scale=10)\n assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(10)\n assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(-10)\n assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)\n\n\ndef test_genextreme_sf_isf():\n # Expected values were computed using mpmath:\n #\n # import mpmath\n #\n # def mp_genextreme_sf(x, xi, mu=0, sigma=1):\n # # Formula from wikipedia, which has a sign convention for xi that\n # # is the opposite of scipy's shape parameter.\n # if xi != 0:\n # t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)\n # else:\n # t = mpmath.exp(-(x - mu)/sigma)\n # return 1 - mpmath.exp(-t)\n #\n # >>> mpmath.mp.dps = 1000\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"1e8\"), mpmath.mp.mpf(\"0.125\"))\n # >>> float(s)\n # 1.6777205262585625e-57\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"7.98\"), mpmath.mp.mpf(\"-0.125\"))\n # >>> float(s)\n # 1.52587890625e-21\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"7.98\"), mpmath.mp.mpf(\"0\"))\n # >>> float(s)\n # 0.00034218086528426593\n\n x = 1e8\n s = stats.genextreme.sf(x, -0.125)\n assert_allclose(s, 1.6777205262585625e-57)\n x2 = stats.genextreme.isf(s, -0.125)\n assert_allclose(x2, x)\n\n x = 7.98\n s = stats.genextreme.sf(x, 0.125)\n assert_allclose(s, 1.52587890625e-21)\n x2 = stats.genextreme.isf(s, 0.125)\n assert_allclose(x2, x)\n\n x = 7.98\n s = stats.genextreme.sf(x, 0)\n assert_allclose(s, 0.00034218086528426593)\n x2 = stats.genextreme.isf(s, 0)\n assert_allclose(x2, x)\n\n\ndef test_burr12_ppf_small_arg():\n prob = 1e-16\n quantile = stats.burr12.ppf(prob, 2, 3)\n # The expected quantile was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.mp.dps = 100\n # >>> prob = mpmath.mpf('1e-16')\n # >>> c = mpmath.mpf(2)\n # >>> d = mpmath.mpf(3)\n # >>> float(((1-prob)**(-1/d) - 1)**(1/c))\n # 5.7735026918962575e-09\n assert_allclose(quantile, 5.7735026918962575e-09)\n\n\ndef test_crystalball_function():\n \"\"\"\n All values are calculated using the independent implementation of the\n ROOT framework (see https://root.cern.ch/).\n Corresponding ROOT code is given in the comments.\n \"\"\"\n X = np.linspace(-5.0, 5.0, 21)[:-1]\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << \", \";\n calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)\n expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,\n 0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,\n 0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,\n 0.013226, 0.00334407, 0.000658486, 0.000100982,\n 1.20606e-05])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << \", \";\n calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)\n expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,\n 0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,\n 0.345928, 0.391987, 0.345928, 0.237752, 0.12726,\n 0.0530497, 0.0172227, 0.00435458, 0.000857469,\n 0.000131497, 1.57051e-05])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5) {\n # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);\n # std::cout << \", \";\n # }\n calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)\n expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,\n 0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,\n 0.172964, 0.189964, 0.195994, 0.189964, 0.172964,\n 0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,\n 0.0265249])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << \", \";\n calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)\n expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,\n 0.208663, 0.24344, 0.292128, 0.36516, 0.478254,\n 0.622723, 0.767192, 0.880286, 0.94959, 0.982834,\n 0.995314, 0.998981, 0.999824, 0.999976, 0.999997])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << \", \";\n calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)\n expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,\n 0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,\n 0.320592, 0.508717, 0.696841, 0.844111, 0.934357,\n 0.977646, 0.993899, 0.998674, 0.999771, 0.999969,\n 0.999997])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5) {\n # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);\n # std::cout << \", \";\n # }\n calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)\n expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,\n 0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,\n 0.411731, 0.508717, 0.605702, 0.696841, 0.777324,\n 0.844111, 0.896192, 0.934357, 0.960639, 0.977646])\n assert_allclose(expected, calculated, rtol=0.001)\n\n\ndef test_crystalball_function_moments():\n \"\"\"\n All values are calculated using the pdf formula and the integrate function\n of Mathematica\n \"\"\"\n # The Last two (alpha, n) pairs test the special case n == alpha**2\n beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])\n m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])\n\n # The distribution should be correctly normalised\n expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])\n calculated_0th_moment = stats.crystalball._munp(0, beta, m)\n assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)\n\n # calculated using wolframalpha.com\n # e.g. for beta = 2 and m = 3 we calculate the norm like this:\n # integrate exp(-x^2/2) from -2 to infinity +\n # integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2\n norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])\n\n a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])\n expected_1th_moment = a / norm\n calculated_1th_moment = stats.crystalball._munp(1, beta, m)\n assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])\n expected_2th_moment = a / norm\n calculated_2th_moment = stats.crystalball._munp(2, beta, m)\n assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])\n expected_3th_moment = a / norm\n calculated_3th_moment = stats.crystalball._munp(3, beta, m)\n assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])\n expected_4th_moment = a / norm\n calculated_4th_moment = stats.crystalball._munp(4, beta, m)\n assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])\n expected_5th_moment = a / norm\n calculated_5th_moment = stats.crystalball._munp(5, beta, m)\n assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)\n\n\n@pytest.mark.parametrize(\n 'df1,df2,x',\n [(2, 2, [-0.5, 0.2, 1.0, 2.3]),\n (4, 11, [-0.5, 0.2, 1.0, 2.3]),\n (7, 17, [1, 2, 3, 4, 5])]\n)\ndef test_ncf_edge_case(df1, df2, x):\n # Test for edge case described in gh-11660.\n # Non-central Fisher distribution when nc = 0\n # should be the same as Fisher distribution.\n nc = 0\n expected_cdf = stats.f.cdf(x, df1, df2)\n calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)\n assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)\n\n # when ncf_gen._skip_pdf will be used instead of generic pdf,\n # this additional test will be useful.\n expected_pdf = stats.f.pdf(x, df1, df2)\n calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)\n assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)\n\n\ndef test_ncf_variance():\n # Regression test for gh-10658 (incorrect variance formula for ncf).\n # The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for\n # example, Wolfram Alpha with the expression\n # Variance[NoncentralFRatioDistribution[2, 6, 4]]\n # or with the implementation of the noncentral F distribution in the C++\n # library Boost.\n v = stats.ncf.var(2, 6, 4)\n assert_allclose(v, 42.75, rtol=1e-14)\n\n\nclass TestHistogram(object):\n def setup_method(self):\n np.random.seed(1234)\n\n # We have 8 bins\n # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)\n # But actually np.histogram will put the last 9 also in the [8,9) bin!\n # Therefore there is a slight difference below for the last bin, from\n # what you might have expected.\n histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,\n 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)\n self.template = stats.rv_histogram(histogram)\n\n data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)\n norm_histogram = np.histogram(data, bins=50)\n self.norm_template = stats.rv_histogram(norm_histogram)\n\n def test_pdf(self):\n values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,\n 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])\n pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,\n 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,\n 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,\n 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,\n 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])\n assert_allclose(self.template.pdf(values), pdf_values)\n\n # Test explicitly the corner cases:\n # As stated above the pdf in the bin [8,9) is greater than\n # one would naively expect because np.histogram putted the 9\n # into the [8,9) bin.\n assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)\n assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)\n # 9 is outside our defined bins [8,9) hence the pdf is already 0\n # for a continuous distribution this is fine, because a single value\n # does not have a finite probability!\n assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)\n assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)\n\n x = np.linspace(-2, 2, 10)\n assert_allclose(self.norm_template.pdf(x),\n stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)\n\n def test_cdf_ppf(self):\n values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,\n 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])\n cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,\n 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,\n 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,\n 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,\n 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])\n assert_allclose(self.template.cdf(values), cdf_values)\n # First three and last two values in cdf_value are not unique\n assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])\n\n # Test of cdf and ppf are inverse functions\n x = np.linspace(1.0, 9.0, 100)\n assert_allclose(self.template.ppf(self.template.cdf(x)), x)\n x = np.linspace(0.0, 1.0, 100)\n assert_allclose(self.template.cdf(self.template.ppf(x)), x)\n\n x = np.linspace(-2, 2, 10)\n assert_allclose(self.norm_template.cdf(x),\n stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)\n\n def test_rvs(self):\n N = 10000\n sample = self.template.rvs(size=N, random_state=123)\n assert_equal(np.sum(sample < 1.0), 0.0)\n assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)\n assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)\n assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)\n assert_equal(np.sum(sample > 9.0), 0.0)\n\n def test_munp(self):\n for n in range(4):\n assert_allclose(self.norm_template._munp(n),\n stats.norm(1.0, 2.5).moment(n), rtol=0.05)\n\n def test_entropy(self):\n assert_allclose(self.norm_template.entropy(),\n stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)\n\n\ndef test_loguniform():\n # This test makes sure the alias of \"loguniform\" is log-uniform\n rv = stats.loguniform(10 ** -3, 10 ** 0)\n rvs = rv.rvs(size=10000, random_state=42)\n vals, _ = np.histogram(np.log10(rvs), bins=10)\n assert 900 <= vals.min() <= vals.max() <= 1100\n assert np.abs(np.median(vals) - 1000) <= 10\n\n\nclass TestArgus(object):\n def test_argus_rvs_large_chi(self):\n # test that the algorithm can handle large values of chi\n x = stats.argus.rvs(50, size=500, random_state=325)\n assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)\n\n def test_argus_rvs_ratio_uniforms(self):\n # test that the ratio of uniforms algorithms works for chi > 2.611\n x = stats.argus.rvs(3.5, size=1500, random_state=1535)\n assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)\n assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)\n\n\ndef test_rvs_no_size_warning():\n class rvs_no_size_gen(stats.rv_continuous):\n def _rvs(self):\n return 1\n\n rvs_no_size = rvs_no_size_gen(name='rvs_no_size')\n\n with assert_warns(np.VisibleDeprecationWarning):\n rvs_no_size.rvs()\n"
] |
[
[
"scipy.stats._continuous_distns.gamma_gen",
"scipy.stats.tukeylambda.pdf",
"scipy.stats.beta",
"scipy.stats.binom.rvs",
"scipy.stats.triang.pdf",
"scipy.stats.geom.cdf",
"scipy.stats.gamma",
"scipy.stats.loggamma.stats",
"scipy.stats.gamma.stats",
"scipy.stats.triang.cdf",
"scipy.stats.t.cdf",
"scipy.stats.cauchy.rvs",
"scipy.stats.uniform.fit",
"scipy.stats.laplace.fit",
"scipy.stats.pearson3.rvs",
"scipy.stats.poisson.logpmf",
"scipy.stats.invgauss._reduce_func",
"scipy.stats.cauchy.fit",
"scipy.stats.geom.logsf",
"numpy.isposinf",
"numpy.lib.recfunctions.rec_append_fields",
"scipy.stats.bernoulli",
"numpy.mean",
"scipy.stats.genextreme.sf",
"scipy.stats.burr.pdf",
"scipy.stats.t.ppf",
"scipy.stats.uniform.ppf",
"scipy.stats.hypergeom.pmf",
"scipy.stats.bernoulli.cdf",
"scipy.stats.gumbel_l.cdf",
"scipy.stats.bradford.ppf",
"scipy.stats.skewnorm.cdf",
"numpy.array",
"scipy.stats.invgamma.sf",
"scipy.stats.genpareto.cdf",
"scipy.stats.weibull_max.pdf",
"numpy.testing.assert_",
"scipy.stats.dlaplace.rvs",
"scipy.stats.rice.cdf",
"scipy.stats.weibull_min.logcdf",
"scipy.stats.weibull_min.pdf",
"scipy.stats.zipf.rvs",
"scipy.stats.levy_stable.pdf",
"scipy.stats.trapz.mean",
"numpy.histogram",
"scipy.stats.randint.expect",
"scipy.stats.norminvgauss.stats",
"scipy.stats.ncx2.rvs",
"scipy.integrate.simps",
"scipy.stats.genpareto.stats",
"scipy.stats.levy.cdf",
"scipy.stats.norm.ppf",
"scipy.stats.logser.expect",
"scipy.stats.pareto.rvs",
"numpy.linspace",
"scipy.stats.beta.expect",
"scipy.stats.norm.expect",
"scipy.stats.truncnorm.cdf",
"scipy.stats.trapz.pdf",
"scipy.stats.rice.ppf",
"scipy.stats.laplace.rvs",
"scipy.stats.logistic.cdf",
"numpy.testing.assert_warns",
"scipy.stats.logser.pmf",
"scipy.stats.lognorm",
"scipy.stats.kappa4.cdf",
"numpy.sqrt",
"scipy.stats.bradford.cdf",
"numpy.testing.assert_equal",
"scipy.stats.crystalball._munp",
"scipy.stats.chi2.pdf",
"scipy.stats.invgauss.rvs",
"scipy.stats.norminvgauss.ppf",
"scipy.stats.hypergeom.expect",
"scipy.stats.powerlaw.stats",
"scipy.stats.binom.pmf",
"scipy.stats.ksone.fit",
"scipy.stats.geom.logpmf",
"scipy.stats.skew",
"numpy.sign",
"scipy.stats.weibull_min.logsf",
"scipy.stats.powerlaw.logpdf",
"scipy.stats.lomax.cdf",
"scipy.stats.f.pdf",
"scipy.stats.rice.pdf",
"scipy.stats.exponnorm.stats",
"scipy.stats.chi2.ppf",
"scipy.stats.rayleigh.__doc__.lower",
"scipy.stats.gengamma._munp",
"scipy.stats.bernoulli.rvs",
"scipy.stats.bernoulli.logpmf",
"scipy.stats.rv_continuous",
"numpy.random.RandomState",
"numpy.testing.assert_array_equal",
"scipy.stats.hypergeom.ppf",
"scipy.stats.crystalball.pdf",
"numpy.testing.assert_allclose",
"scipy.stats.trapz.var",
"scipy.stats.poisson.rvs",
"scipy.stats.genpareto._get_support",
"scipy.stats.binom",
"scipy.stats.skewnorm.stats",
"scipy.stats.genextreme.pdf",
"scipy.stats.pareto._fitstart",
"numpy.random.randn",
"scipy.stats.norm",
"numpy.random.seed",
"scipy.stats.t.isf",
"scipy.stats.trapz.entropy",
"scipy.stats.weibull_max.logsf",
"scipy.stats.kstest",
"scipy.stats.logistic.sf",
"scipy.stats.geom.ppf",
"scipy.stats.bernoulli.pmf",
"scipy.stats.vonmises",
"scipy.stats.t.std",
"scipy.stats.weibull_min.logpdf",
"scipy.stats.crystalball.cdf",
"scipy.stats.truncexpon.cdf",
"scipy.stats.pearson3",
"scipy.stats.invgamma.stats",
"scipy.stats.logser.rvs",
"scipy.stats.kappa4",
"scipy.stats.poisson.pmf",
"scipy.stats.gamma.fit",
"scipy.stats.bernoulli.ppf",
"scipy.stats.invgauss._fitstart",
"scipy.stats.beta.logpdf",
"numpy.asarray",
"scipy.stats.t.pdf",
"numpy.testing.assert_array_less",
"scipy.stats.planck.sf",
"scipy.stats.argus.rvs",
"scipy.stats.hypergeom.interval",
"scipy.stats.expon.cdf",
"numpy.size",
"scipy.stats.rice.stats",
"scipy.stats.nbinom.pmf",
"scipy.stats.ncf.cdf",
"numpy.testing.assert_array_almost_equal",
"scipy.stats.pearson3.cdf",
"scipy.stats.halfnorm.pdf",
"scipy.stats.levy_stable.stats",
"scipy.stats.uniform.sf",
"scipy.stats.norm.entropy",
"scipy.stats.norm.cdf",
"scipy.stats.lognorm.stats",
"scipy.stats.geom.pmf",
"scipy.stats.logistic.isf",
"scipy.stats.randint",
"scipy.stats.expon.pdf",
"scipy.stats.geninvgauss.cdf",
"scipy.stats.gompertz.cdf",
"scipy.stats.poisson.expect",
"scipy.stats.truncnorm.stats",
"scipy.stats.geninvgauss",
"scipy.stats.randint.pmf",
"scipy.stats._distn_infrastructure.argsreduce",
"scipy.stats.tukeylambda.stats",
"numpy.random.rand",
"numpy.median",
"scipy.stats.loguniform",
"scipy.stats.burr",
"numpy.select",
"scipy.stats.betaprime.cdf",
"numpy.logspace",
"numpy.empty",
"numpy.log",
"scipy.stats.rice.logcdf",
"scipy.stats.exponpow.cdf",
"numpy.log1p",
"scipy.stats.gamma.expect",
"scipy.stats.skellam.pmf",
"scipy.stats.genextreme.cdf",
"scipy.stats.gumbel_l.sf",
"scipy.stats.exponpow.logpdf",
"scipy.stats.t.logpdf",
"scipy.stats.levy.sf",
"scipy.stats.norminvgauss.cdf",
"scipy.stats.rice.logpdf",
"numpy.errstate",
"scipy.stats.genextreme.logpdf",
"numpy.isneginf",
"scipy.stats.geom.rvs",
"scipy.stats.geom.sf",
"scipy.stats.nct.stats",
"scipy.stats.lognorm.sf",
"scipy.stats.weibull_min.sf",
"scipy.stats.invgamma.cdf",
"scipy.stats.gumbel_l.logcdf",
"scipy.stats.betaprime",
"scipy.stats.exponnorm.pdf",
"numpy.arange",
"scipy.stats.rdist",
"scipy.stats.foldnorm",
"scipy.stats.lognorm.fit",
"scipy.stats.kurtosis",
"scipy.stats.hypergeom.logsf",
"numpy.broadcast_arrays",
"scipy.stats.exponweib.logpdf",
"scipy.stats.gamma.pdf",
"scipy.stats.norm.logcdf",
"scipy.stats.truncnorm.rvs",
"scipy.stats.norm.pdf",
"scipy.stats.mielke.pdf",
"scipy.stats.weibull_min.cdf",
"scipy.stats.geom.logcdf",
"scipy.stats.logser.mean",
"scipy.stats.genextreme.isf",
"scipy.stats.t.logcdf",
"scipy.stats.exponpow.pdf",
"scipy.stats.nbinom.logpmf",
"scipy.stats.ncx2._cdf",
"scipy.stats.invgauss.fit",
"scipy.stats.pareto.sf",
"scipy.stats.laplace.pdf",
"scipy.stats.levy_stable._fitstart",
"numpy.sum",
"scipy.stats.t.logsf",
"scipy.stats.ncf.pdf",
"scipy.stats.expon.logpdf",
"scipy.stats.skewnorm.pdf",
"numpy.abs",
"scipy.stats.invgamma.isf",
"numpy.exp",
"numpy.testing.suppress_warnings",
"scipy.stats.geninvgauss.pdf",
"scipy.stats.skewnorm.rvs",
"scipy.stats.weibull_max.logcdf",
"scipy.stats.expon.sf",
"scipy.special.digamma",
"scipy.stats.kstwo.isf",
"numpy.full",
"scipy.stats.gennorm.pdf",
"scipy.stats.genpareto.isf",
"scipy.stats.levy_l.isf",
"scipy.stats.gamma.rvs",
"scipy.stats.truncnorm.ppf",
"scipy.stats.genpareto.pdf",
"scipy.stats.rayleigh.sf",
"scipy.stats.truncnorm.sf",
"numpy.shape",
"scipy.stats.norm.fit",
"scipy.stats.halfgennorm.pdf",
"numpy.random.default_rng",
"scipy.stats.levy.ppf",
"scipy.stats.pearson3.pdf",
"scipy.stats.zipf",
"scipy.stats.argus",
"scipy.stats.genpareto",
"scipy.stats.bernoulli.sf",
"scipy.stats.skewnorm.sf",
"scipy.stats.beta.ppf",
"scipy.stats.gamma.logpdf",
"scipy.stats.lognorm.rvs",
"scipy.stats.poisson.stats",
"scipy.stats.ncf.var",
"scipy.stats.genpareto.logsf",
"scipy.stats.uniform.pdf",
"scipy.stats.logistic.ppf",
"scipy.stats.kstwo.sf",
"scipy.stats.nbinom",
"scipy.stats.rv_histogram",
"scipy.stats.norm.rvs",
"scipy.stats.hypergeom.stats",
"scipy.stats.rv_discrete",
"numpy.diff",
"scipy.stats.kstwo.ppf",
"scipy.stats.mielke",
"scipy.stats.bernoulli.logsf",
"scipy.stats.levy_l.sf",
"scipy.stats.kstwo.cdf",
"scipy.stats.randint.cdf",
"scipy.stats.chi2.rvs",
"scipy.stats.bernoulli.__doc__.lower",
"scipy.stats.hypergeom.sf",
"scipy.stats.nct",
"scipy.stats.trapz.stats",
"scipy.special.expm1",
"scipy.stats.invgauss",
"scipy.stats.trapz.cdf",
"scipy.stats.truncnorm.pdf",
"scipy.stats.rayleigh.logpdf",
"scipy.stats.norm.logpdf",
"scipy.stats.geom",
"scipy.stats.genextreme.ppf",
"scipy.stats.burr._munp",
"scipy.stats.hypergeom",
"scipy.stats.norminvgauss.pdf",
"scipy.stats.expon.ppf",
"scipy._lib._util.check_random_state",
"scipy.stats.dlaplace",
"scipy.stats.rice.expect",
"scipy.stats.skellam.expect",
"scipy.stats.geninvgauss.rvs",
"scipy.stats.poisson",
"scipy.stats.rayleigh.logsf",
"scipy.stats.nbinom.rvs",
"scipy.stats.skellam.sf",
"numpy.log10",
"scipy.stats.hypergeom.cdf",
"scipy.stats.gumbel_r.cdf",
"scipy.stats.pareto.stats",
"scipy.stats.invgamma.ppf",
"scipy.stats.bernoulli.isf",
"scipy.stats.bernoulli.logcdf",
"numpy.ones",
"scipy.stats.poisson.moment",
"scipy.stats.hypergeom.rvs",
"scipy.stats.t.sf",
"scipy.stats.randint.rvs",
"scipy.stats.lognorm.pdf",
"scipy.stats.weibull_max.cdf",
"scipy.stats.exponpow.sf",
"scipy.stats.uniform.cdf",
"scipy.stats.expon.fit",
"numpy.load",
"scipy.stats.norm._pdf",
"numpy.where",
"scipy.stats.t.stats",
"scipy.stats.erlang.pdf",
"scipy.stats.genextreme.entropy",
"scipy.stats.genpareto.ppf",
"scipy.stats.beta.fit",
"scipy.stats.ksone.sf",
"scipy.stats.logistic.logpdf",
"scipy.stats.erlang.fit",
"numpy.testing.assert_almost_equal",
"scipy.stats.f.stats",
"scipy.stats.f.cdf",
"numpy.allclose",
"scipy.stats.weibull_max.sf",
"scipy.stats.burr12.ppf",
"numpy.floor",
"scipy.special.psi",
"scipy.stats.pareto.fit",
"numpy.core.records.fromarrays",
"scipy.stats.zipf.stats",
"scipy.stats.levy_stable.cdf",
"scipy.stats.ncx2.pdf",
"numpy.all",
"scipy.stats.gumbel_l.isf",
"scipy.stats.gumbel_l.ppf",
"scipy.stats.genpareto.var",
"scipy.stats.entropy",
"scipy.stats.beta.rvs",
"scipy.stats.gengamma.pdf",
"scipy.stats.pareto._reduce_func",
"scipy.stats.poisson.cdf",
"scipy.stats.poisson.interval",
"scipy.stats.ncx2.logpdf",
"numpy.isfinite",
"scipy.stats.exponweib.pdf",
"scipy.stats.hypergeom.logcdf",
"scipy.special.xlogy",
"scipy.stats.gumbel_l.logsf",
"scipy.stats.planck.logsf",
"scipy.stats.lognorm.logsf",
"scipy.stats.weibull_max.logpdf",
"scipy.stats.genlogistic.logpdf",
"numpy.isnan",
"scipy.stats.truncnorm.isf",
"scipy.stats.skellam.cdf",
"scipy.stats.logser",
"numpy.var"
]
] |
zlpmichelle/crackingtensorflow
|
[
"66c3517b60c3793ef06f904e5d58e4d044628182"
] |
[
"crackingcode/day4/cc_tf_day4_1.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\n\n\n# In[2]:\n\n\nnode1 = tf.constant(3.0, tf.float32)\nnode2 = tf.constant(4.0) # also tf.float32 implicity\nprint(node1, node2)\n\n\n# In[3]:\n\n\nsess = tf.Session()\nprint(sess.run([node1, node2]))\n\n\n# In[5]:\n\n\nnode3 = tf.add(node1, node2)\nprint(\"node3: \", node3)\nprint(\"sess.run(node3):\", sess.run(node3))\n\n\n# In[6]:\n\n\na = tf.placeholder(tf.float32)\nb = tf.placeholder(tf.float32)\nadder_node = a + b # + provides a shortcut for tf.add(a, b)\n\n\n# In[7]:\n\n\nprint(sess.run(adder_node, {a: 3, b: 4.5}))\nprint(sess.run(adder_node, {a: [1,3], b: [2, 4]}))\n\n\n# In[9]:\n\n\nadd_and_triple = adder_node * 3.\nprint(sess.run(add_and_triple, {a: 3, b: 4.5}))\n\n\n# In[10]:\n\n\nW = tf.Variable([.3], tf.float32)\nb = tf.Variable([-.3], tf.float32)\nx = tf.placeholder(tf.float32)\nlinear_model = W * x + b\n\n\n# In[12]:\n\n\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n\n# In[13]:\n\n\nprint(sess.run(linear_model, {x: [1,2,3,4]}))\n\n\n# In[15]:\n\n\ny = tf.placeholder(tf.float32)\nsquared_deltas = tf.square(linear_model - y)\nloss = tf.reduce_sum(squared_deltas)\nprint(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))\n\n\n# In[16]:\n\n\nfixW = tf.assign(W, [-1.])\nfixb = tf.assign(b, [1.])\nsess.run([fixW, fixb])\nprint(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))\n\n\n# In[17]:\n\n\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\n\n# In[18]:\n\n\nsess.run(init) # reset values to incorrect defaults\nfor i in range(1000):\n sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})\n \nprint(sess.run([W, b]))\n\n\n# In[21]:\n\n\n# complete progame\nimport numpy as np\nimport tensorflow as tf\n\n# model parameters\nW = tf.Variable([.3], tf.float32)\nb = tf.Variable([-.3], tf.float32)\n\n# model input and output\nx = tf.placeholder(tf.float32)\nlinear_model = W * x + b\ny = tf.placeholder(tf.float32)\n\n# loss\nloss = tf.reduce_sum(tf.square(linear_model - y)) # sum of squares \n\n# optimizer\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\n# training data\nx_train = [1,2,3,4]\ny_train = [0,-1,-2,-3]\n\n# training loop\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init) # reset values to wrong\nfor i in range(1000):\n sess.run(train, {x:x_train, y:y_train})\n \n# evaluation training accuracy\ncurr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})\nprint(\"W: %s b: %s loss: %s\" %(curr_W, curr_b, curr_loss))\n\n\n# In[27]:\n\n\n# high lever API tf.contrib.learn\n\nimport tensorflow as tf\n# NumPy is often used to load, manipulate and preprocess data.\nimport numpy as np\n\n# Declare list of features. We only have one real-valued feature. There are many\n# other types of columns that are more complicated and useful.\nfeatures = [tf.contrib.layers.real_valued_column(\"x\", dimension = 1)]\n\n# An estimator is the front end to invoke training (fitting) and evaluation\n# (inference). There are many predefined types like linear regression,\n# logistic regression, linear classification, logistic classification, and\n# many neural network classifiers and regressors. The following code\n# provides an estimator that does linear regression.\nestimator = tf.contrib.learn.LinearRegressor(feature_columns= features)\n\n# TensorFlow provides many helper methods to read and set up data sets.\n# Here we use `numpy_input_fn`. We have to tell the function how many batches\n# of data (num_epochs) we want and how big each batch should be.\nx = np.array([1., 2., 3., 4.])\ny = np.array([0., -1, -2, -3.])\ninput_fn = tf.contrib.learn.io.numpy_input_fn({\"x\":x}, y, batch_size=4, num_epochs=1000)\n\n# We can invoke 1000 training steps by invoking the `fit` method and passing the\n# training data set.\nestimator.fit(input_fn=input_fn, steps=1000)\n\n# Here we evaluate how well our model did. In a real example, we would want\n# to use a separate validation and testing data set to avoid overfitting.\nprint(estimator.evaluate(input_fn=input_fn))\n\n\n\n# In[30]:\n\n\n# a custom model\nimport numpy as np\nimport tensorflow as tf\n# Declare list of features, we only have one real-valued feature\ndef model(features, lables, mode):\n # Build a linear model and predict values\n W = tf.get_variable(\"W\", [1], dtype = tf.float64)\n b = tf.get_variable(\"b\", [1], dtype = tf.float64)\n y = W * features['x'] + b\n # Loss sub-graph\n loss = tf.reduce_sum(tf.square(y - lables))\n # traing sub-graph\n global_step = tf.train.get_global_step()\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = tf.group(optimizer.minimize(loss), tf.assign_add(global_step, 1))\n \n # ModelFnOps connects subgraphs we built to the\n # appropriate functionality.\n return tf.contrib.learn.ModelFnOps(mode = mode, predictions = y, loss = loss, train_op = train)\n\nestimator = tf.contrib.learn.Estimator(model_fn = model)\n# define our data set\nx = np.array([1., 2., 3., 4.])\ny = np.array([0., -1., -2., -3.])\ninput_fn = tf.contrib.learn.io.numpy_input_fn({'x': x}, y, 4, num_epochs = 1000)\n\n# train\nestimator.fit(input_fn = input_fn, steps = 1000)\n# evaluate our model\nprint(estimator.evaluate(input_fn=input_fn, steps = 10))\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"tensorflow.contrib.learn.LinearRegressor",
"tensorflow.contrib.learn.ModelFnOps",
"tensorflow.assign_add",
"tensorflow.contrib.learn.Estimator",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Variable",
"tensorflow.constant",
"tensorflow.add",
"tensorflow.train.get_global_step",
"numpy.array",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.get_variable",
"tensorflow.assign",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.learn.io.numpy_input_fn",
"tensorflow.square"
]
] |
iro-upgto/cime
|
[
"1e71927f8dc029d5968f63a1c86991eca1456dd0"
] |
[
"cime/_experimental.py"
] |
[
"from sympy import *\nfrom sympy.matrices import *\nfrom sympy.physics.mechanics import dynamicsymbols, init_vprinting\nimport matplotlib.pyplot as plt\ninit_vprinting()\n\ndef kinvars(*args,**kwargs):\n return dynamicsymbols(*args,**kwargs)\n\nclass Vector2D(object):\n def __init__(self,*args,**kwargs):\n if \"x\" in kwargs and \"y\" in kwargs:\n self._x = kwargs[\"x\"]\n self._y = kwargs[\"y\"]\n self._calculate_magnitude_and_angle()\n elif \"r\" in kwargs and \"theta\" in kwargs:\n self._r = kwargs[\"r\"]\n self._theta = kwargs[\"theta\"]\n self._calculate_rect_components()\n if len(args) == 2:\n self._r = args[0]\n self._theta = args[1]\n self._calculate_rect_components()\n\n def _calculate_rect_components(self):\n self._x = self.r*cos(self.theta)\n self._y = self.r*sin(self.theta)\n\n def _calculate_magnitude_and_angle(self):\n self._theta = atan2(self.y, self.x)\n self._r = sqrt(self.x**2 + self.y**2)\n\n @property\n def theta(self):\n return self._theta\n\n @theta.setter\n def theta(self,theta):\n self._theta = theta\n \n @property\n def r(self):\n return self._r\n\n @r.setter\n def r(self,r):\n self._r = r\n\n @property\n def x(self):\n return self._x\n\n @x.setter\n def x(self,x):\n self._x = x\n\n @property\n def y(self):\n return self._y\n\n @y.setter\n def y(self,y):\n self._y = y\n\n def get_orientation(self):\n \"\"\" Dir Cosines \"\"\"\n th = atan2(self.y, self.x)\n return th\n \n def get_norm(self):\n n = sqrt( self.x**2 + self.y**2)\n return n\n \n def dot(self, other):\n ux, uy = self.x, self.y\n vx, vy = other.x, other.y\n dp = ux*vx + uy*vy\n return dp\n\n def diff(self,var,order=1):\n dx = diff(self.x, var, order)\n dy = diff(self.y, var, order)\n return Vector2D(x=dx, y=dy)\n\n def subs(self,vals):\n for old,new in vals.items():\n self.x = self.x.subs(old,new)\n self.y = self.y.subs(old,new)\n \n def __add__(self, other):\n ux, uy = self.x, self.y\n vx, vy = other.x, other.y\n return Vector2D(x = ux + vx, y = uy + vy)\n\n def __radd__(self, other):\n if other == 0:\n return self\n else:\n return self.__add__(other)\n\n def __neg__(self):\n return Vector2D(x = -self.x, y=-self.y)\n \n def __sub__(self, other):\n ux, uy = self.x, self.y\n vx, vy = other.x, other.y\n return Vector2D(x = ux - vx, y = uy - vy)\n\n def __str__(self):\n s = \"<{x}, {y}>\".format(x=self.x, y=self.y)\n return s\n\n def __repr__(self):\n s = \"<{x}, {y}>\".format(x=self.x, y=self.y)\n return s\n # from sympy.printing.latex import latex\n # s = latex(self, mode='plain')\n # return \"$\\\\displaystyle %s$\" % s\n\n def _repr_html_(self):\n from sympy.printing.latex import LatexPrinter\n from sympy.physics.vector import vlatex\n lp = LatexPrinter()\n x = vlatex(self.x)\n y = vlatex(self.y)\n # return lp.doprint(\"$$ \\\\langle {0}, \\\\\\\\ {1} \\\\rangle $$\".format(x,y))\n return lp.doprint(r\"$$ \\begin{bmatrix} {%s} \\\\ {%s} \\end{bmatrix} $$\"%(x,y))\n\n \nclass VectorLoop(object):\n def __init__(self,*vectors):\n self.vectors = []\n for vector in vectors:\n self.vectors.append(vector)\n\n self._loop = sum(self.vectors)\n\n def add_vector(self,vector):\n self.vectors.append(vector)\n self._loop = sum(self.vectors)\n\n @property\n def loop(self):\n return self._loop\n\n def diff(self,var,order=1):\n vds = []\n for vector in self.vectors:\n dv = vector.diff(var,order)\n vds.append(dv)\n return VectorLoop(*vds)\n\n def solve(self,vars,values):\n x = self.loop.x.subs(values)\n y = self.loop.y.subs(values)\n sols = solve([x,y], vars)\n return sols\n\n def draw(self,values={}):\n xcoords = [0]\n ycoords = [0]\n for vector in self.vectors:\n if isinstance(vector.x, (float, int)):\n _cx = vector.x + xcoords[-1]\n _cy = vector.y + ycoords[-1]\n else:\n _cx = vector.x.subs(values) + xcoords[-1]\n _cy = vector.y.subs(values) + ycoords[-1]\n xcoords.append(_cx)\n ycoords.append(_cy)\n plt.plot(xcoords, ycoords, \"-o\")\n return xcoords,ycoords\n\n def __str__(self):\n s = \"<{x}, {y}>\".format(x=self.loop.x, y=self.loop.y)\n return s\n\n def __repr__(self):\n s = \"<{x}, {y}>\".format(x=self.loop.x, y=self.loop.y)\n return s\n\n def _repr_html_(self):\n from sympy.printing.latex import LatexPrinter\n from sympy.physics.vector import vlatex\n lp = LatexPrinter()\n x = vlatex(self.loop.x)\n y = vlatex(self.loop.y)\n return lp.doprint(r\"$$ \\begin{bmatrix} {%s} \\\\ {%s} \\end{bmatrix} $$\"%(x,y))\n\n\n\n\nif __name__ == '__main__':\n t = symbols(\"t\")\n r1,r2 = symbols(\"r_1:3\")\n t1,t2 = dynamicsymbols(\"\\\\theta_1:3\")\n v1 = Vector2D(x=1,y=2)\n v2 = Vector2D(x=3,y=4)\n v3 = Vector2D(r1,t1)\n # print(f\"{v1} + {v2} = {v1+v2}\")\n # print(f\"{v1} - {v2} = {v1-v2}\")\n # print(f\"{v1} . {v2} = {v1.dot(v2)}\")\n # print(f\"D_v3 = {v3.diff(t,2)}\")\n print(v3._repr_html_())"
] |
[
[
"matplotlib.pyplot.plot"
]
] |
ElgaSalvadore/watools
|
[
"daaaad474add572f32dd6a45a4230ccf636c479a",
"daaaad474add572f32dd6a45a4230ccf636c479a"
] |
[
"Collect/RFE/DataAccess.py",
"General/data_conversions.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nAuthors: Tim Hessels\n UNESCO-IHE 2016\nContact: t.hessels@unesco-ihe.org\nRepository: https://github.com/wateraccounting/wa\nModule: Collect/RFE\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport pandas as pd\nfrom ftplib import FTP\nfrom joblib import Parallel, delayed\n\nimport watools.General.data_conversions as DC\nimport watools.General.raster_conversions as RC\n\ndef DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores):\n \"\"\"\n This function downloads RFE daily or monthly data\n\n Keyword arguments:\n Dir -- 'C:/file/to/path/'\n Startdate -- 'yyyy-mm-dd'\n Enddate -- 'yyyy-mm-dd'\n latlim -- [ymin, ymax] (values must be between -50 and 50)\n lonlim -- [xmin, xmax] (values must be between -180 and 180)\n cores -- The number of cores used to run the routine. It can be 'False'\n to avoid using parallel computing routines.\n TimeCase -- String equal to 'daily' or 'monthly'\n Waitbar -- 1 (Default) will print a waitbar\n \"\"\"\n\n\t# Check variables\n if not Startdate:\n Startdate = pd.Timestamp('2001-01-01')\n if not Enddate:\n Enddate = pd.Timestamp('Now')\n Dates = pd.date_range(Startdate, Enddate, freq='D')\n\n # Create Waitbar\n if Waitbar == 1:\n import wa.Functions.Start.WaitbarConsole as WaitbarConsole\n total_amount = len(Dates)\n amount = 0\n WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)\n\n if latlim[0] < -40.05 or latlim[1] > 40.05:\n print('Latitude above 50N or below 50S is not possible.'\n ' Value set to maximum')\n latlim[0] = np.max(latlim[0], -40.05)\n latlim[1] = np.min(lonlim[1], 40.05)\n if lonlim[0] < -20.05 or lonlim[1] > 55.05:\n print('Longitude must be between 180E and 180W.'\n ' Now value is set to maximum')\n lonlim[0] = np.max(latlim[0], -20.05)\n lonlim[1] = np.min(lonlim[1], 55.05)\n\n\t # Make directory\n output_folder = os.path.join(Dir, 'Precipitation', 'RFE', 'Daily/')\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n\n # Define IDs\n yID = 801 - np.int16(np.array([np.ceil((latlim[1] + 40.05)*10),\n np.floor((latlim[0] + 40.05)*10)-1]))\n xID = np.int16(np.array([np.floor((lonlim[0] + 20.05)*10),\n np.ceil((lonlim[1] + 20.05)*10)+1]))\n\n # Pass variables to parallel function and run\n args = [output_folder, lonlim, latlim, xID, yID]\n\n if not cores:\n for Date in Dates:\n RetrieveData(Date, args)\n if Waitbar == 1:\n amount += 1\n WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)\n results = True\n else:\n results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)\n for Date in Dates)\n\n return results\n\n\ndef RetrieveData(Date, args):\n \"\"\"\n This function retrieves RFE data for a given date from the\n ftp://disc2.nascom.nasa.gov server.\n\n Keyword arguments:\n Date -- 'yyyy-mm-dd'\n args -- A list of parameters defined in the DownloadData function.\n \"\"\"\n # Argument\n [output_folder, lonlim, latlim, xID, yID] = args\n\n # Create https\n DirFile = os.path.join(output_folder,'P_RFE.v2.0_mm-day-1_daily_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))\n\n if not os.path.isfile(DirFile):\n # open ftp server\n ftp = FTP(\"ftp.cpc.ncep.noaa.gov\", \"\", \"\")\n ftp.login()\n\n \t # Define FTP path to directory\n pathFTP = 'fews/fewsdata/africa/rfe2/geotiff/'\n\n # find the document name in this directory\n ftp.cwd(pathFTP)\n listing = []\n\n # read all the file names in the directory\n ftp.retrlines(\"LIST\", listing.append)\n\n \t # create all the input name (filename) and output (outfilename, filetif, DiFileEnd) names\n filename = 'africa_rfe.%s%02s%02s.tif.zip' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d'))\n outfilename = os.path.join(output_folder,'africa_rfe.%s%02s%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))\n\n try:\n local_filename = os.path.join(output_folder, filename)\n lf = open(local_filename, \"wb\")\n ftp.retrbinary(\"RETR \" + filename, lf.write)\n lf.close()\n\n # unzip the file\n zip_filename = os.path.join(output_folder, filename)\n DC.Extract_Data(zip_filename, output_folder)\n\n # open tiff file\n dataset = RC.Open_tiff_array(outfilename)\n\n # clip dataset to the given extent\n data = dataset[yID[0]:yID[1], xID[0]:xID[1]]\n data[data < 0] = -9999\n\n # save dataset as geotiff file\n latlim_adj = 40.05 - 0.1 * yID[0]\n lonlim_adj = -20.05 + 0.1 * xID[0]\n geo = [lonlim_adj, 0.1, 0, latlim_adj, 0, -0.1]\n DC.Save_as_tiff(name=DirFile, data=data, geo=geo, projection=\"WGS84\")\n\n # delete old tif file\n os.remove(outfilename)\n os.remove(zip_filename)\n\n except:\n print(\"file not exists\")\n\n\n return True\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 18 13:07:32 2016\n\n@author: tih\n\"\"\"\nfrom __future__ import division\nfrom builtins import range\nimport gzip\nimport zipfile\nimport tarfile\nfrom osgeo import gdal, osr\n#import osr\nimport os\nimport pandas as pd\nimport numpy as np\nimport netCDF4\nimport time\n\ndef Convert_nc_to_tiff(input_nc, output_folder):\n \"\"\"\n This function converts the nc file into tiff files\n\n Keyword Arguments:\n input_nc -- name, name of the adf file\n output_folder -- Name of the output tiff file\n \"\"\"\n from datetime import date\n import watools.General.raster_conversions as RC\n\n #All_Data = RC.Open_nc_array(input_nc)\n\n if type(input_nc) == str:\n nc = netCDF4.Dataset(input_nc)\n elif type(input_nc) == list:\n nc = netCDF4.MFDataset(input_nc)\n\n Var = list(nc.variables.keys())[-1]\n All_Data = nc[Var]\n\n geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(input_nc)\n\n if epsg == 4326:\n epsg = 'WGS84'\n\n # Create output folder if needed\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n for i in range(0,size_Z):\n if not Time == -9999:\n time_one = Time[i]\n d = date.fromordinal(time_one)\n name = os.path.splitext(os.path.basename(input_nc))[0]\n nameparts = name.split('_')[0:-2]\n name_out = os.path.join(output_folder, '_'.join(nameparts) + '_%d.%02d.%02d.tif' %(d.year, d.month, d.day))\n Data_one = All_Data[i,:,:]\n else:\n name=os.path.splitext(os.path.basename(input_nc))[0]\n name_out = os.path.join(output_folder, name + '.tif')\n Data_one = All_Data[:,:]\n\n Save_as_tiff(name_out, Data_one, geo_out, epsg)\n\n return()\n\ndef Convert_grb2_to_nc(input_wgrib, output_nc, band):\n\n import watools.General.raster_conversions as RC\n\n # Get environmental variable\n# WA_env_paths = os.environ[\"WA_PATHS\"].split(';')\n# GDAL_env_path = WA_env_paths[0]\n# GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')\n GDAL_TRANSLATE_PATH = 'gdal_translate' \n # Create command\n fullCmd = ' '.join(['%s -of netcdf -b %d' %(GDAL_TRANSLATE_PATH, band), input_wgrib, output_nc]) # -r {nearest}\n\n RC.Run_command_window(fullCmd)\n\n return()\n\ndef Convert_adf_to_tiff(input_adf, output_tiff):\n \"\"\"\n This function converts the adf files into tiff files\n\n Keyword Arguments:\n input_adf -- name, name of the adf file\n output_tiff -- Name of the output tiff file\n \"\"\"\n import watools.General.raster_conversions as RC\n\n # Get environmental variable\n WA_env_paths = os.environ[\"WA_PATHS\"].split(';')\n GDAL_env_path = WA_env_paths[0]\n GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')\n\n # convert data from ESRI GRID to GeoTIFF\n fullCmd = ('\"%s\" -co COMPRESS=DEFLATE -co PREDICTOR=1 -co '\n 'ZLEVEL=1 -of GTiff %s %s') % (GDAL_TRANSLATE_PATH, input_adf, output_tiff)\n\n RC.Run_command_window(fullCmd)\n\n return(output_tiff)\n\ndef Convert_bil_to_tiff(input_bil, output_tiff):\n \"\"\"\n This function converts the bil files into tiff files\n\n Keyword Arguments:\n input_bil -- name, name of the bil file\n output_tiff -- Name of the output tiff file\n \"\"\"\n import gdalconst\n \n gdal.GetDriverByName('EHdr').Register()\n dest = gdal.Open(input_bil, gdalconst.GA_ReadOnly)\n Array = dest.GetRasterBand(1).ReadAsArray()\n geo_out = dest.GetGeoTransform()\n Save_as_tiff(output_tiff, Array, geo_out, \"WGS84\") \n \n return(output_tiff)\n \n\ndef Convert_hdf5_to_tiff(inputname_hdf, Filename_tiff_end, Band_number, scaling_factor, geo_out):\n \"\"\"\n This function converts the hdf5 files into tiff files\n\n Keyword Arguments:\n input_adf -- name, name of the adf file\n output_tiff -- Name of the output tiff file\n Band_number -- bandnumber of the hdf5 that needs to be converted\n scaling_factor -- factor multipied by data is the output array\n geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,\n pixelsize], (geospatial dataset)\n \"\"\"\n import watools.General.raster_conversions as RC\n \n # Open the hdf file\n g = gdal.Open(inputname_hdf, gdal.GA_ReadOnly)\n\n # Define temporary file out and band name in\n name_in = g.GetSubDatasets()[Band_number][0]\n\n # Get environmental variable\n WA_env_paths = os.environ[\"WA_PATHS\"].split(';')\n GDAL_env_path = WA_env_paths[0]\n GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')\n\n # run gdal translate command\n FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, Filename_tiff_end)\n RC.Run_command_window(FullCmd)\n\n # Get the data array\n dest = gdal.Open(Filename_tiff_end)\n Data = dest.GetRasterBand(1).ReadAsArray()\n dest = None\n \n # If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV\n Data_scaled = Data * scaling_factor\n\n # Save the PROBA-V as a tif file\n Save_as_tiff(Filename_tiff_end, Data_scaled, geo_out, \"WGS84\") \n \n return()\n\ndef Extract_Data(input_file, output_folder):\n \"\"\"\n This function extract the zip files\n\n Keyword Arguments:\n output_file -- name, name of the file that must be unzipped\n output_folder -- Dir, directory where the unzipped data must be\n stored\n \"\"\"\n # extract the data\n z = zipfile.ZipFile(input_file, 'r')\n z.extractall(output_folder)\n z.close()\n\ndef Extract_Data_gz(zip_filename, outfilename):\n \"\"\"\n This function extract the zip files\n\n Keyword Arguments:\n zip_filename -- name, name of the file that must be unzipped\n outfilename -- Dir, directory where the unzipped data must be\n stored\n \"\"\"\n\n with gzip.GzipFile(zip_filename, 'rb') as zf:\n file_content = zf.read()\n save_file_content = open(outfilename, 'wb')\n save_file_content.write(file_content)\n save_file_content.close()\n zf.close()\n os.remove(zip_filename)\n \ndef Extract_Data_tar_gz(zip_filename, output_folder):\n \"\"\"\n This function extract the tar.gz files\n\n Keyword Arguments:\n zip_filename -- name, name of the file that must be unzipped\n output_folder -- Dir, directory where the unzipped data must be\n stored\n \"\"\"\n\n os.chdir(output_folder)\n tar = tarfile.open(zip_filename, \"r:gz\")\n tar.extractall()\n tar.close()\n \n\ndef Save_as_tiff(name='', data='', geo='', projection=''):\n \"\"\"\n This function save the array as a geotiff\n\n Keyword arguments:\n name -- string, directory name\n data -- [array], dataset of the geotiff\n geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,\n pixelsize], (geospatial dataset)\n projection -- integer, the EPSG code\n \"\"\"\n # save as a geotiff\n driver = gdal.GetDriverByName(\"GTiff\")\n dst_ds = driver.Create(name, int(data.shape[1]), int(data.shape[0]), 1,\n gdal.GDT_Float32, ['COMPRESS=LZW'])\n srse = osr.SpatialReference()\n if projection == '':\n srse.SetWellKnownGeogCS(\"WGS84\")\n\n else:\n try:\n if not srse.SetWellKnownGeogCS(projection) == 6:\n srse.SetWellKnownGeogCS(projection)\n else:\n try:\n srse.ImportFromEPSG(int(projection))\n except:\n srse.ImportFromWkt(projection)\n except:\n try:\n srse.ImportFromEPSG(int(projection))\n except:\n srse.ImportFromWkt(projection)\n\n dst_ds.SetProjection(srse.ExportToWkt())\n dst_ds.GetRasterBand(1).SetNoDataValue(-9999)\n dst_ds.SetGeoTransform(geo)\n dst_ds.GetRasterBand(1).WriteArray(data)\n dst_ds = None\n return()\n\ndef Save_as_MEM(data='', geo='', projection=''):\n \"\"\"\n This function save the array as a memory file\n\n Keyword arguments:\n data -- [array], dataset of the geotiff\n geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,\n pixelsize], (geospatial dataset)\n projection -- interger, the EPSG code\n \"\"\"\n # save as a geotiff\n driver = gdal.GetDriverByName(\"MEM\")\n dst_ds = driver.Create('', int(data.shape[1]), int(data.shape[0]), 1,\n gdal.GDT_Float32)\n srse = osr.SpatialReference()\n if projection == '':\n srse.SetWellKnownGeogCS(\"WGS84\")\n else:\n srse.SetWellKnownGeogCS(projection)\n dst_ds.SetProjection(srse.ExportToWkt())\n dst_ds.GetRasterBand(1).SetNoDataValue(-9999)\n dst_ds.SetGeoTransform(geo)\n dst_ds.GetRasterBand(1).WriteArray(data)\n return(dst_ds)\n\ndef Save_as_NC(namenc, DataCube, Var, Reference_filename, Startdate = '', Enddate = '', Time_steps = '', Scaling_factor = 1):\n \"\"\"\n This function save the array as a netcdf file\n\n Keyword arguments:\n namenc -- string, complete path of the output file with .nc extension\n DataCube -- [array], dataset of the nc file, can be a 2D or 3D array [time, lat, lon], must be same size as reference data\n Var -- string, the name of the variable\n Reference_filename -- string, complete path to the reference file name\n Startdate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the Start datum of the dataset\n Enddate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the End datum of the dataset\n Time_steps -- 'monthly' or 'daily', needs to be filled when you want to save a 3D array, defines the timestep of the dataset\n Scaling_factor -- number, scaling_factor of the dataset, default = 1\n \"\"\"\n # Import modules\n import watools.General.raster_conversions as RC\n from netCDF4 import Dataset\n\n if not os.path.exists(namenc):\n\n # Get raster information\n geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_filename)\n\n # Create the lat/lon rasters\n lon = np.arange(size_X)*geo_out[1]+geo_out[0] - 0.5 * geo_out[1]\n lat = np.arange(size_Y)*geo_out[5]+geo_out[3] - 0.5 * geo_out[5]\n\n # Create the nc file\n nco = Dataset(namenc, 'w', format='NETCDF4_CLASSIC')\n nco.description = '%s data' %Var\n\n # Create dimensions, variables and attributes:\n nco.createDimension('longitude', size_X)\n nco.createDimension('latitude', size_Y)\n\n # Create time dimension if the parameter is time dependent\n if Startdate is not '':\n if Time_steps == 'monthly':\n Dates = pd.date_range(Startdate,Enddate,freq = 'MS')\n if Time_steps == 'daily':\n Dates = pd.date_range(Startdate,Enddate,freq = 'D')\n time_or=np.zeros(len(Dates))\n i = 0\n for Date in Dates:\n time_or[i] = Date.toordinal()\n i += 1\n nco.createDimension('time', None)\n timeo = nco.createVariable('time', 'f4', ('time',))\n timeo.units = '%s' %Time_steps\n timeo.standard_name = 'time'\n\n # Create the lon variable\n lono = nco.createVariable('longitude', 'f8', ('longitude',))\n lono.standard_name = 'longitude'\n lono.units = 'degrees_east'\n lono.pixel_size = geo_out[1]\n\n # Create the lat variable\n lato = nco.createVariable('latitude', 'f8', ('latitude',))\n lato.standard_name = 'latitude'\n lato.units = 'degrees_north'\n lato.pixel_size = geo_out[5]\n\n # Create container variable for CRS: lon/lat WGS84 datum\n crso = nco.createVariable('crs', 'i4')\n crso.long_name = 'Lon/Lat Coords in WGS84'\n crso.grid_mapping_name = 'latitude_longitude'\n crso.projection = proj\n crso.longitude_of_prime_meridian = 0.0\n crso.semi_major_axis = 6378137.0\n crso.inverse_flattening = 298.257223563\n crso.geo_reference = geo_out\n\n # Create the data variable\n if Startdate is not '':\n preco = nco.createVariable('%s' %Var, 'f8', ('time', 'latitude', 'longitude'), zlib=True, least_significant_digit=1)\n timeo[:]=time_or\n else:\n preco = nco.createVariable('%s' %Var, 'f8', ('latitude', 'longitude'), zlib=True, least_significant_digit=1)\n\n # Set the data variable information\n preco.scale_factor = Scaling_factor\n preco.add_offset = 0.00\n preco.grid_mapping = 'crs'\n preco.set_auto_maskandscale(False)\n\n # Set the lat/lon variable\n lono[:] = lon\n lato[:] = lat\n\n # Set the data variable\n if Startdate is not '':\n for i in range(len(Dates)):\n preco[i,:,:] = DataCube[i,:,:] / np.float(Scaling_factor)\n else:\n preco[:,:] = DataCube[:,:] / np.float(Scaling_factor)\n\n nco.close()\n return()\n\ndef Create_NC_name(Var, Simulation, Dir_Basin, sheet_nmbr, info = ''):\n\n # Create the output name\n nameOut=''.join(['_'.join([Var,'Simulation%d' % Simulation,'_'.join(info)]),'.nc'])\n namePath = os.path.join(Dir_Basin,'Simulations','Simulation_%d' %Simulation, 'Sheet_%d' %sheet_nmbr)\n if not os.path.exists(namePath):\n os.makedirs(namePath)\n nameTot=os.path.join(namePath,nameOut)\n\n return(nameTot)\n\ndef Create_new_NC_file(nc_outname, Basin_Example_File, Basin):\n\n # Open basin file\n dest = gdal.Open(Basin_Example_File)\n Basin_array = dest.GetRasterBand(1).ReadAsArray()\n Basin_array[np.isnan(Basin_array)] = -9999\n Basin_array[Basin_array<0] = -9999\n\n # Get Basic information\n Geo = dest.GetGeoTransform()\n size_X = dest.RasterXSize\n size_Y = dest.RasterYSize\n epsg = dest.GetProjection()\n\n # Get Year and months\n year = int(os.path.basename(nc_outname).split(\".\")[0])\n Dates = pd.date_range(\"%d-01-01\" %year, \"%d-12-31\" %year, freq = \"MS\")\n\n # Latitude and longitude\n lons = np.arange(size_X)*Geo[1]+Geo[0] + 0.5 * Geo[1]\n lats = np.arange(size_Y)*Geo[5]+Geo[3] + 0.5 * Geo[5]\n\n # Create NetCDF file\n nco = netCDF4.Dataset(nc_outname, 'w', format = 'NETCDF4_CLASSIC')\n nco.set_fill_on()\n nco.description = '%s' %Basin\n\n # Create dimensions\n nco.createDimension('latitude', size_Y)\n nco.createDimension('longitude', size_X)\n nco.createDimension('time', None)\n\n # Create NetCDF variables\n crso = nco.createVariable('crs', 'i4')\n crso.long_name = 'Lon/Lat Coords in WGS84'\n crso.standard_name = 'crs'\n crso.grid_mapping_name = 'latitude_longitude'\n crso.projection = epsg\n crso.longitude_of_prime_meridian = 0.0\n crso.semi_major_axis = 6378137.0\n crso.inverse_flattening = 298.257223563\n crso.geo_reference = Geo\n\n ######################### Save Rasters in NetCDF ##############################\n\n lato = nco.createVariable('latitude', 'f8', ('latitude',))\n lato.units = 'degrees_north'\n lato.standard_name = 'latitude'\n lato.pixel_size = Geo[5]\n\n lono = nco.createVariable('longitude', 'f8', ('longitude',))\n lono.units = 'degrees_east'\n lono.standard_name = 'longitude'\n lono.pixel_size = Geo[1]\n\n timeo = nco.createVariable('time', 'f4', ('time',))\n timeo.units = 'Monthly'\n timeo.standard_name = 'time'\n\n # Variables\n basin_var = nco.createVariable('Landuse', 'i',\n ('latitude', 'longitude'),\n fill_value=-9999)\n basin_var.long_name = 'Landuse'\n basin_var.grid_mapping = 'crs'\n\n # Create time unit\n i = 0\n time_or=np.zeros(len(Dates))\n for Date in Dates:\n time_or[i] = Date.toordinal()\n i += 1\n\n # Load data\n lato[:] = lats\n lono[:] = lons\n timeo[:] = time_or\n basin_var[:,:] = Basin_array\n\n # close the file\n time.sleep(1)\n nco.close()\n return()\n\ndef Add_NC_Array_Variable(nc_outname, Array, name, unit, Scaling_factor = 1):\n\n # create input array\n Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)\n Array = np.int_(Array /np.float(Scaling_factor))\n\n # Create NetCDF file\n nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')\n nco.set_fill_on()\n\n paro = nco.createVariable('%s' %name, 'i',\n ('time', 'latitude', 'longitude'),fill_value=-9999,\n zlib=True, least_significant_digit=0)\n\n paro.scale_factor = Scaling_factor\n paro.add_offset = 0.00\n paro.grid_mapping = 'crs'\n paro.long_name = name\n paro.units = unit\n paro.set_auto_maskandscale(False)\n\n # Set the data variable\n paro[:,:,:] = Array\n\n # close the file\n time.sleep(1)\n nco.close()\n\n return()\n\ndef Add_NC_Array_Static(nc_outname, Array, name, unit, Scaling_factor = 1):\n\n # create input array\n Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)\n Array = np.int_(Array /np.float(Scaling_factor))\n\n # Create NetCDF file\n nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')\n nco.set_fill_on()\n\n paro = nco.createVariable('%s' %name, 'i',\n ('latitude', 'longitude'),fill_value=-9999,\n zlib=True, least_significant_digit=0)\n\n paro.scale_factor = Scaling_factor\n paro.add_offset = 0.00\n paro.grid_mapping = 'crs'\n paro.long_name = name\n paro.units = unit\n paro.set_auto_maskandscale(False)\n\n # Set the data variable\n paro[:,:] = Array\n\n # close the file\n time.sleep(1)\n nco.close()\n\n return()\n\ndef Convert_dict_to_array(River_dict, Array_dict, Reference_data):\n\n import numpy as np\n import os\n import watools.General.raster_conversions as RC\n\n if os.path.splitext(Reference_data)[-1] == '.nc':\n # Get raster information\n geo_out, proj, size_X, size_Y, size_Z, Time = RC.Open_nc_info(Reference_data)\n else:\n # Get raster information\n geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_data)\n\n # Create ID Matrix\n y,x = np.indices((size_Y, size_X))\n ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape)) + 1\n\n # Get tiff array time dimension:\n time_dimension = int(np.shape(Array_dict[0])[0])\n\n # create an empty array\n DataCube = np.ones([time_dimension, size_Y, size_X]) * np.nan\n\n for river_part in range(0,len(River_dict)):\n for river_pixel in range(1,len(River_dict[river_part])):\n river_pixel_ID = River_dict[river_part][river_pixel]\n if len(np.argwhere(ID_Matrix == river_pixel_ID))>0:\n row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]\n DataCube[:,row,col] = Array_dict[river_part][:,river_pixel]\n\n return(DataCube)\n\n"
] |
[
[
"numpy.max",
"numpy.ceil",
"pandas.date_range",
"numpy.min",
"pandas.Timestamp",
"numpy.floor"
],
[
"numpy.isnan",
"numpy.float",
"pandas.date_range",
"numpy.ones",
"numpy.shape",
"numpy.arange",
"numpy.argwhere",
"numpy.indices"
]
] |
Tomwmg/player-detection
|
[
"a5212bf80353a3b56c92f64bc07562161fa386f3"
] |
[
"train.py"
] |
[
"# general packages\nimport numpy as np\nimport random\nimport os\nimport errno\nimport argparse\nimport torch\nfrom torch.utils import model_zoo\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\n\nfrom data_loader import YC2_train_data\nfrom model import DVSA\nfrom datetime import datetime\nfrom torch.utils.data import Dataset, DataLoader\nfrom config import Config\n\n\ntorch.manual_seed(123)\nnp.random.seed(123)\ntorch.backends.cudnn.deterministic = True\nCUDA = True if torch.cuda.is_available() else False\n\n\ndef pause():\n programPause = input(\"Press the <ENTER> key to continue...\")\n\n\nif torch.cuda.is_available(): \n torch.cuda.manual_seed_all(123)\n\n\n\ndef adjust_learning_rate(optimizer, epoch, drop_rate, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.learning_rate * (drop_rate ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef main(args):\n try:\n os.makedirs(args.checkpoint_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n print('Directory already exists.')\n else:\n raise\n print('loading dataset')\n train_set, val_set = get_dataset(args)\n train_loader = DataLoader(train_set, batch_size = 10, shuffle = True, num_workers= 6)\n #test_loader = DataLoader(val_set, batch_size=1, shuffle = False, num_workers = 1)\n\n print('building model')\n model = get_model(args)\n model.cuda()\n \n optimizer = torch.optim.Adam(model.parameters(), lr=args.learn_rate, weight_decay=1e-4)\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size= 30*len(train_loader), gamma = 1e-1)\n if args.start_from!='':\n print('start from args.start_from')\n checkpoint = torch.load(args.start_from)\n model.load_state_dict(checkpoint)\n 'start training'\n for epoch in range(1, args.max_epochs + 1):\n model.train()\n epoch_loss = 0.\n loss_time=0\n\n output_str = \"%s \" %(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n for batch_cnt, batch in enumerate(train_loader):\n\n pos_feature, neg_feature, = batch\n pos_feature = Variable(pos_feature).cuda()\n neg_feature = Variable(neg_feature).cuda()\n cost = model(pos_feature, neg_feature)\n optimizer.zero_grad()\n exp_lr_scheduler.step() \n cost.backward()\n optimizer.step()\n epoch_loss += cost.item()\n loss_time+=1\n\n\n\n print(output_str, ' epoch: %d, epoch_loss: %lf, lr: %lf' % (epoch, epoch_loss / loss_time, optimizer.param_groups[0]['lr']))\n if (epoch+1)%5==0:\n torch.save(model.state_dict(),\n args.checkpoint_path + '/' + '%d'%epoch + '.pth')\n\ndef get_model(args):\n model=DVSA()\n return model\ndef get_dataset(args):\n train_set = YC2_train_data(pos_feature='pos.npy',\n neg_feature='neg.npy')\n val_set = YC2_train_data(pos_feature='pos.npy',\n neg_feature='neg.npy',train=False)\n\n return train_set,val_set\n\nif __name__ == \"__main__\":\n args = Config()\n main(args)\n"
] |
[
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.autograd.Variable",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
robfatland/nlp
|
[
"f4cab048d21a4c63726d19c1d453f1d68b2589a0"
] |
[
"austen-kmeans.py"
] |
[
"from collections import Counter\r\nfrom collections import defaultdict\r\nimport glob, os\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt \r\n\r\n\r\n\r\n'''\r\nThe 'austen_alcott' folder contains a set of novels by Jane Austen and Louisa May Alcott. \r\n\r\nRead these files and create a vector table with the top 50 most frequent words in the \r\ncorpus as your feature-set. So each text will be represented as a set of relative frequencies \r\nover these words. Use K-means clustering from Scikit Learn to find two clusters. Plot with labels \r\nand color the dots in the two clusters differently \r\n'''\r\n\r\n##### TEXT PROCESSING LIB ####\r\ndef tokenize(s):\r\n \"\"\"\r\n Input: \r\n string s\r\n Output: \r\n list of strings\r\n \"\"\"\r\n return s.split()\r\n\r\ndef preprocess(s, lowercase=True, strip_punctuation=True):\r\n \"\"\"\r\n Input:\r\n string s\r\n boolean lowercase\r\n boolean strip_punctuation\r\n Return:\r\n list of strings\r\n \"\"\"\r\n punctuation = '.,?<>:;\"\\'!%'\r\n if isinstance(s, str):\r\n s = tokenize(s)\r\n if lowercase:\r\n s = [t.lower() for t in s]\r\n if strip_punctuation:\r\n s = [t.strip(punctuation) for t in s]\r\n \r\n return s\r\n\r\ndef token_frequency(tokens=None, tf={}, relative=False):\r\n \"\"\"\r\n Input:\r\n tokens = list of strings or None\r\n tf = dict or None\r\n relative = boolean\r\n Return:\r\n dictionary of token frequencies\r\n \"\"\"\r\n for t in tokens:\r\n if t in tf:\r\n tf[t]+=1\r\n else:\r\n tf[t]=1\r\n if relative:\r\n total = sum([c for t, c in tf.items()])\r\n tf = {t:tf[t]/total for t in tf}\r\n return tf\r\n\r\n#### PROCESSING ####\r\n# 1. Get files from folder\r\nfilepath = './austen_alcott/*.txt'\r\nfiles = glob.glob(filepath)\r\nlabels = [os.path.split(f)[1][:-4].replace('_', ' ').title() for f in files]\r\n\r\n# 2. Get 50 most freq words in corpus for feature set (wk5 assignment)\r\nword_freq = dict()\r\nfor f in files:\r\n file = open(f, \"r\")\r\n word_list = preprocess(file.read()) # process list of words from file\r\n word_freq = token_frequency(word_list, tf=word_freq) # get freq for new list of words\r\n\r\n # sort dict in decreasing order by frequencies (v) and print first 50 tokens\r\nfeatures = [word for word, freq in sorted(word_freq.items(), key=lambda x: x[1], reverse=True)][0:50]\r\n\r\n# 3. Vectorize over feature set\r\nvectors = []\r\nfor file_name in files:\r\n text = preprocess(open(file_name, 'r').read())\r\n freqs = token_frequency(text, relative=True)\r\n vectors.append({k:freqs[k] for k in freqs if k in features})\r\n\r\n # Put labels, features, vectors into a single dataframe\r\nvectors_df = pd.DataFrame(vectors, index=labels, columns=features).fillna(0)\r\n\r\n# 4. Use K-means clustering from Scikit Learn to find two clusters. \r\nn_clusters=2\r\nkmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(vectors_df)\r\n\r\n# 5. Plot with labels and color the dots in the two clusters differently (from text processing lib)\r\npca = PCA(n_components=2)\r\ntransformed = pca.fit_transform(vectors_df)\r\nx = transformed[:,0]\r\ny = transformed[:,1]\r\ncol_dict = {0:'green', 1:'blue'}\r\ncols = [col_dict[l] for l in kmeans.labels_]\r\nplt.figure(figsize=(15,10))\r\nplt.scatter(x,y, c=cols, s=100, alpha=.5)\r\nfor i, l in enumerate(labels):\r\n plt.text(x[i]+.0003,y[i]-.0001, l)\r\nfor i, c in enumerate(pca.components_.transpose()):\r\n plt.arrow(0,0, c[0]/50, c[1]/50, alpha=.3, width=.0001)\r\n plt.text(c[0]/50, c[1]/50, features[i])\r\nplt.xlabel('PCA1')\r\nplt.ylabel('PCA2')\r\nplt.title('Austen works in space of 50 most freq words')\r\nplt.show()\r\n\r\nplt.savefig(\"kmeans-clustering.png\")\r\n"
] |
[
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.arrow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"sklearn.decomposition.PCA"
]
] |
dadu0699/tytus
|
[
"e1920f6932c840859e3e79eb8756a1d3da88bd77"
] |
[
"bases_2021_1S/Grupo 10/server/utilities/analisys_parser/analizer/statement/instructions/select/from_.py"
] |
[
"from utilities.analisys_parser.analizer.reports import Nodo\nfrom utilities.analisys_parser.analizer.abstract import instruction\nfrom utilities.analisys_parser.analizer.statement.instructions.select.select import Select\nfrom utilities.analisys_parser.analizer.symbol.symbol import Symbol\nfrom utilities.analisys_parser.analizer.abstract.expression import Expression\nfrom utilities.storage import avlMode\nfrom utilities.analisys_parser.analizer.typechecker.Metadata import Struct\n\nimport pandas as pd\n\n\nclass FromClause(instruction.Instruction):\n \"\"\"\n Clase encargada de la clausa FROM para la obtencion de datos\n \"\"\"\n\n def __init__(self, tables, aliases, row, column):\n instruction.Instruction.__init__(self, row, column)\n self.tables = tables\n self.aliases = aliases\n\n def crossJoin(self, tables):\n if len(tables) <= 1:\n return tables[0]\n for t in tables:\n t[\"____tempCol\"] = 1\n\n new_df = tables[0]\n i = 1\n while i < len(tables):\n new_df = pd.merge(new_df, tables[i], on=[\"____tempCol\"])\n i += 1\n\n new_df = new_df.drop(\"____tempCol\", axis=1)\n return new_df\n\n def execute(self, environment):\n Struct.load()\n tempDf = None\n for i in range(len(self.tables)):\n exec = self.tables[i].execute(environment)\n data = exec[0]\n types = exec[1]\n if isinstance(self.tables[i], Select):\n newNames = {}\n subqAlias = self.aliases[i]\n for (columnName, columnData) in data.iteritems():\n colSplit = columnName.split(\".\")\n if len(colSplit) >= 2:\n newNames[columnName] = subqAlias + \".\" + colSplit[1]\n types[subqAlias + \".\" + colSplit[1]] = columnName\n else:\n newNames[columnName] = subqAlias + \".\" + colSplit[0]\n types[subqAlias + \".\" + colSplit[0]] = columnName\n data.rename(columns=newNames, inplace=True)\n environment.addVar(subqAlias, subqAlias, \"TABLE\", self.row, self.column)\n else:\n sym = Symbol(\n self.tables[i].name,\n None,\n self.tables[i].row,\n self.tables[i].column,\n )\n environment.addSymbol(self.tables[i].name, sym)\n if self.aliases[i]:\n environment.addSymbol(self.aliases[i], sym)\n if i == 0:\n tempDf = data\n else:\n tempDf = self.crossJoin([tempDf, data])\n environment.dataFrame = tempDf\n try:\n environment.types.update(types)\n except:\n instruction.syntaxPostgreSQL.append(\n \"Error: P0001: Error en la instruccion SELECT clausula FROM\"\n )\n return\n\n def dot(self):\n new = Nodo.Nodo(\"FROM\")\n for t in self.tables:\n if isinstance(t, Select):\n n = t.dot()\n new.addNode(n)\n else:\n t1 = Nodo.Nodo(t.name)\n new.addNode(t1)\n for a in self.aliases:\n a1 = Nodo.Nodo(a)\n new.addNode(a1)\n return new\n\n\nclass TableID(Expression):\n \"\"\"\n Esta clase representa un objeto abstracto para el manejo de las tablas\n \"\"\"\n\n type_ = None\n\n def __init__(self, name, row, column):\n Expression.__init__(self, row, column)\n self.name = name\n\n def execute(self, environment):\n result = avlMode.extractTable(instruction.dbtemp, self.name)\n if result == None:\n instruction.semanticErrors.append(\n [\n \"La tabla \"\n + str(self.name)\n + \" no pertenece a la base de datos \"\n + instruction.dbtemp,\n self.row,\n ]\n )\n instruction.syntaxPostgreSQL.append(\n \"Error: 42P01: la relacion \"\n + instruction.dbtemp\n + \".\"\n + str(self.name)\n + \" no existe\"\n )\n return \"FATAL ERROR TABLE ID\"\n # Almacena una lista con con el nombre y tipo de cada columna\n lst = Struct.extractColumns(instruction.dbtemp, self.name)\n columns = [l.name for l in lst]\n newColumns = [self.name + \".\" + col for col in columns]\n df = pd.DataFrame(result, columns=newColumns)\n environment.addTable(self.name)\n tempTypes = {}\n for i in range(len(newColumns)):\n tempTypes[newColumns[i]] = lst[i].type\n return [df, tempTypes]\n"
] |
[
[
"pandas.DataFrame",
"pandas.merge"
]
] |
deisemaia/Higra
|
[
"82cb78b606a383f3961faa882457a9a987f802e0",
"82cb78b606a383f3961faa882457a9a987f802e0"
] |
[
"higra/hierarchy/random_hierarchy.py",
"higra/hierarchy/constrained_connectivity_hierarchy.py"
] |
[
"############################################################################\n# Copyright ESIEE Paris (2018) #\n# #\n# Contributor(s) : Benjamin Perret #\n# #\n# Distributed under the terms of the CECILL-B License. #\n# #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\nimport higra as hg\nimport numpy as np\n\n\ndef random_binary_partition_tree(num_leaves, asymmetry_probability):\n \"\"\"\n Random binary partition tree with a controlled amount of asymmetry/unbalancedness.\n\n The tree is grown from the root to the leaves.\n At each step, the algorithm randomly select one of the *growable* leaf node of the current tree.\n Two children are added to the selected node; the number of leaf nodes is hence increased by one.\n Then,\n\n - with probability :math:`1-asymmetry\\_probability`, both new children are marked as *growable*\n - with probability :math:`asymmetry\\_probability`, only one of the children is marked as *growable*\n\n The altitudes of the returned hierarchy are obtained with :func:`~higra.attribute_regular_altitudes`:\n *The regular altitudes is comprised between 0 and 1 and is inversely proportional to the depth of a node*.\n\n A valid minimal connected graph (a tree) is associated to the leaves of the tree.\n\n :param num_leaves: expected number of leaves in the generated tree\n :param asymmetry_probability: real value between 0 and 1. At 0 the tree is perfectly unbalanced, at 1 it is\n perfectly balanced (if :attr:`num_leaves` is a power of 2)\n :return: a tree (Concept :class:`~higra.CptBinaryHierarchy`) and its node altitudes\n \"\"\"\n import random\n import math\n\n assert (0 <= asymmetry_probability <= 1)\n num_leaves = int(num_leaves)\n assert (num_leaves > 0)\n\n parents = np.zeros((num_leaves * 2 - 1,), dtype=np.int64)\n\n n = 1\n root = {}\n leaves = []\n leaves.append(root)\n\n all_nodes = [root]\n\n i = parents.size - 1\n root[\"parent\"] = i\n\n while n != 2 * num_leaves - 1:\n\n ni = random.randint(0, math.floor(asymmetry_probability * (len(leaves) - 1)))\n node = leaves[ni]\n del leaves[ni]\n\n node[\"i\"] = i\n node[\"left\"] = {\"parent\": i}\n node[\"right\"] = {\"parent\": i}\n i -= 1\n all_nodes.append(node[\"left\"])\n all_nodes.append(node[\"right\"])\n n += 2\n\n if random.random() <= asymmetry_probability:\n if random.random() >= 0.5:\n leaves.append(node[\"right\"])\n else:\n leaves.append(node[\"left\"])\n else:\n leaves.append(node[\"left\"])\n leaves.append(node[\"right\"])\n\n k = 0\n for node in all_nodes:\n if \"i\" not in node:\n node[\"i\"] = k\n k += 1\n parents[node[\"i\"]] = node[\"parent\"]\n\n tree = hg.Tree(parents)\n\n altitudes = hg.attribute_regular_altitudes(tree)\n\n def _get_associated_mst(tree, altitudes):\n \"\"\"\n Create a valid edge mst for the given tree (returns an edge weighted undirected graph)\n \"\"\"\n nb = tree.num_leaves()\n link_v = np.arange(nb)\n link_v = hg.accumulate_sequential(tree, link_v, hg.Accumulators.first)\n\n g = hg.UndirectedGraph(nb)\n edge_weights = np.zeros((nb - 1,), np.float32)\n for r in tree.leaves_to_root_iterator(include_leaves=False):\n g.add_edge(link_v[tree.child(0, r)], link_v[tree.child(1, r)])\n edge_weights[r - nb] = altitudes[r]\n\n return g, edge_weights\n\n mst, edge_weights = _get_associated_mst(tree, altitudes)\n\n hg.CptBinaryHierarchy.link(tree, mst)\n\n return tree, altitudes\n",
"############################################################################\n# Copyright ESIEE Paris (2018) #\n# #\n# Contributor(s) : Benjamin Perret #\n# #\n# Distributed under the terms of the CECILL-B License. #\n# #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\nimport higra as hg\nimport numpy as np\n\n\ndef constrained_connectivity_hierarchy_alpha_omega(graph, vertex_weights):\n \"\"\"\n Alpha-omega constrained connectivity hierarchy based on the given vertex weighted graph.\n\n For :math:`(i,j)` be an edge of the graph, we define :math:`w(i,j)=|w(i) - w(j)|`, the weight of this edge.\n Let :math:`X` be a set of vertices, the range of :math:`X` is the maximal absolute difference between the weights of any two vertices in :math:`X`:\n :math:`R(X) = \\max\\{|w(i) - w(j)|, (i,j)\\in X^2\\}`\n\n Let :math:`\\\\alpha` be a positive real number, a set of vertices :math:`X` is :math:`\\\\alpha`-connected, if for any two vertices\n :math:`i` and :math:`j` in :math:`X`, there exists a path from :math:`i` to :math:`j` in :math:`X` composed of edges of weights\n lower than or equal to :math:`\\\\alpha`.\n\n Let :math:`\\\\alpha` and :math:`\\omega` be a two positive real numbers, the :math:`\\\\alpha-\\omega`-connected components of the graph are\n the maximal :math:`\\\\alpha'`-connected sets of vertices with a range lower than or equal to :math:`\\omega`, with :math:`\\\\alpha'\\leq\\\\alpha`.\n\n Finally, the alpha-omega constrained connectivity hierarchy is defined as the hierarchy composed of all the :math:`k-k`-connected components for all positive :math:`k`.\n\n The definition used follows the one given in:\n\n P. Soille,\n \"Constrained connectivity for hierarchical image partitioning and simplification,\"\n in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 30, no. 7, pp. 1132-1145, July 2008.\n doi: 10.1109/TPAMI.2007.70817\n\n The algorithm runs in time :math:`\\mathcal{O}(n\\log(n))` and proceeds by filtering a quasi-flat zone hierarchy (see :func:`~higra.quasi_flat_zones_hierarchy`)\n\n :param graph: input graph\n :param vertex_weights: edge_weights: edge weights of the input graph\n :return: a tree (Concept :class:`~higra.CptHierarchy`) and its node altitudes\n \"\"\"\n\n vertex_weights = hg.linearize_vertex_weights(vertex_weights, graph)\n if vertex_weights.ndim != 1:\n raise ValueError(\"constrainted_connectivity_hierarchy_alpha_omega only works for scalar vertex weights.\")\n\n # QFZ on the L1 distance weighted graph\n edge_weights = hg.weight_graph(graph, vertex_weights, hg.WeightFunction.L1)\n tree, altitudes = hg.quasi_flat_zone_hierarchy(graph, edge_weights)\n altitude_parents = altitudes[tree.parents()]\n\n # vertex value range inside each region\n min_value = hg.accumulate_sequential(tree, vertex_weights, hg.Accumulators.min)\n max_value = hg.accumulate_sequential(tree, vertex_weights, hg.Accumulators.max)\n value_range = max_value - min_value\n\n # parent node can't be deleted\n altitude_parents[tree.root()] = max(altitudes[tree.root()], value_range[tree.root()])\n\n # nodes whith a range greater than the altitudes of their parent have to be deleted\n violated_constraints = value_range >= altitude_parents\n\n # the altitude of nodes with a range greater than their altitude but lower than the one of their parent must be changed\n reparable_node_indices = np.nonzero(np.logical_and(value_range > altitudes, value_range < altitude_parents))\n altitudes[reparable_node_indices] = value_range[reparable_node_indices]\n\n # final result construction\n tree, node_map = hg.simplify_tree(tree, violated_constraints)\n altitudes = altitudes[node_map]\n hg.CptHierarchy.link(tree, graph)\n\n return tree, altitudes\n\n\ndef constrained_connectivity_hierarchy_strong_connection(graph, edge_weights):\n \"\"\"\n Strongly constrained connectivity hierarchy based on the given edge weighted graph.\n\n Let :math:`X` be a set of vertices, the range of :math:`X` is the maximal weight of the edges linking two vertices inside :math:`X`.\n\n Let :math:`\\\\alpha` be a positive real number, a set of vertices :math:`X` is :math:`\\\\alpha`-connected, if for any two vertices\n :math:`i` and :math:`j` in :math:`X`, there exists a path from :math:`i` to :math:`j` in :math:`X` composed of edges of weights\n lower than or equal to :math:`\\\\alpha`.\n\n Let :math:`\\\\alpha` be a positive real numbers, the :math:`\\\\alpha`-strongly connected components of the graph are\n the maximal :math:`\\\\alpha'`-connected sets of vertices with a range lower than or equal to :math:`\\\\alpha` with :math:`\\\\alpha'\\leq\\\\alpha`.\n\n Finally, the strongly constrained connectivity hierarchy is defined as the hierarchy composed of all the\n :math:`\\\\alpha`- strongly connected components for all positive :math:`\\\\alpha`.\n\n The definition used follows the one given in:\n\n P. Soille,\n \"Constrained connectivity for hierarchical image partitioning and simplification,\"\n in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 30, no. 7, pp. 1132-1145, July 2008.\n doi: 10.1109/TPAMI.2007.70817\n\n The algorithm runs in time :math:`\\mathcal{O}(n\\log(n))` and proceeds by filtering a quasi-flat zone hierarchy (see :func:`~higra.quasi_flat_zones_hierarchy`)\n\n :param graph: input graph\n :param edge_weights: edge_weights: edge weights of the input graph\n :return: a tree (Concept :class:`~higra.CptHierarchy`) and its node altitudes\n \"\"\"\n\n tree, altitudes = hg.quasi_flat_zone_hierarchy(graph, edge_weights)\n altitude_parents = altitudes[tree.parents()]\n\n # max edge weights inside each region\n lca_map = hg.attribute_lca_map(tree)\n max_edge_weights = np.zeros((tree.num_vertices(),), dtype=edge_weights.dtype)\n np.maximum.at(max_edge_weights, lca_map, edge_weights)\n max_edge_weights = hg.accumulate_and_max_sequential(tree,\n max_edge_weights,\n max_edge_weights[:tree.num_leaves()],\n hg.Accumulators.max)\n\n # parent node can't be deleted\n altitude_parents[tree.root()] = max(altitudes[tree.root()], max_edge_weights[tree.root()])\n\n # nodes whith a range greater than the altitudes of their parent have to be deleted\n violated_constraints = max_edge_weights >= altitude_parents\n\n # the altitude of nodes with a range greater than their altitude but lower than the one of their parent must be changed\n reparable_node_indices = np.nonzero(\n np.logical_and(max_edge_weights > altitudes, max_edge_weights < altitude_parents))\n altitudes[reparable_node_indices] = max_edge_weights[reparable_node_indices]\n\n # final result construction\n tree, node_map = hg.simplify_tree(tree, violated_constraints)\n altitudes = altitudes[node_map]\n hg.CptHierarchy.link(tree, graph)\n\n return tree, altitudes\n"
] |
[
[
"numpy.arange",
"numpy.zeros"
],
[
"numpy.maximum.at",
"numpy.logical_and"
]
] |
Chinmay-jain767/itr
|
[
"041d276c2c9973ea7e33d4a293914c6e20acf968"
] |
[
"itr/train_util2.py"
] |
[
"import time\r\nimport torch\r\nimport numpy as np\r\nfrom pathlib import Path\r\nfrom transformers import WEIGHTS_NAME, CONFIG_NAME\r\n\r\n\r\ndef init_seed():\r\n seed_val = 42\r\n np.random.seed(seed_val)\r\n torch.manual_seed(seed_val)\r\n torch.cuda.manual_seed_all(seed_val)\r\n\r\n\r\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\r\n\r\ndef save_model(model, output_dir):\r\n\r\n output_dir = Path(output_dir)\r\n # Step 1: Save a model, configuration and vocabulary that you have fine-tuned\r\n\r\n # If we have a distributed model, save only the encapsulated model\r\n # (it was wrapped in PyTorch DistributedDataParallel or DataParallel)\r\n model_to_save = model.module if hasattr(model, 'module') else model\r\n\r\n # If we save using the predefined names, we can load using `from_pretrained`\r\n output_model_file = output_dir / WEIGHTS_NAME\r\n output_config_file = output_dir / CONFIG_NAME\r\n\r\n torch.save(model_to_save.state_dict(), output_model_file)\r\n model_to_save.config.to_json_file(output_config_file)\r\n #src_tokenizer.save_vocabulary(output_dir)\r\n\r\ndef load_model():\r\n pass\r\n\r\n# Function to calculate the accuracy of our predictions vs labels\r\ndef flat_accuracy(preds, labels):\r\n\r\n pred_flat = np.argmax(preds, axis=2).flatten()\r\n labels_flat = labels.flatten()\r\n #print (f'preds: {pred_flat}')\r\n #print (f'labels: {labels_flat}')\r\n\r\n return np.sum(np.equal(pred_flat, labels_flat)) / len(labels_flat)\r\n\r\nimport pytorch_lightning as pl\r\nfrom pytorch_lightning import Trainer\r\n\r\n\r\nclass MyLightninModule(pl.LightningModule):\r\n def __init__(self, num_class):\r\n super(MyLightninModule, self).__init__()\r\n self.model = get_model(num_class=num_class)\r\n self.criterion = get_criterion()\r\n\r\n def forward(self, x):\r\n return self.model(x)\r\n\r\n def training_step(self, batch, batch_idx):\r\n # REQUIRED\r\n x, y = batch\r\n y_hat = self.forward(x)\r\n loss = self.criterion(y_hat, y)\r\n logs = {'train_loss': loss}\r\n return {'loss': loss, 'log': logs, 'progress_bar': logs}\r\n\r\n def validation_step(self, batch, batch_idx):\r\n # OPTIONAL\r\n x, y = batch\r\n y_hat = self.forward(x)\r\n preds = torch.argmax(y_hat, dim=1)\r\n return {'val_loss': self.criterion(y_hat, y), 'correct': (preds == y).float()}\r\n\r\n def validation_end(self, outputs):\r\n # OPTIONAL\r\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\r\n acc = torch.cat([x['correct'] for x in outputs]).mean()\r\n logs = {'val_loss': avg_loss, 'val_acc': acc}\r\n return {'avg_val_loss': avg_loss, 'log': logs}\r\n\r\n def configure_optimizers(self):\r\n # REQUIRED\r\n optimizer, scheduler = get_optimizer(model=self.model)\r\n return [optimizer], [scheduler]\r\n\r\n @pl.data_loader\r\n def train_dataloader(self):\r\n # REQUIRED\r\n return get_loaders()[0]\r\n\r\n @pl.data_loader\r\n def val_dataloader(self):\r\n # OPTIONAL\r\n return get_loaders()[1]\r\n\r\n\r\n\r\ndef run_train(config, model, train_loader, eval_loader, writer):\r\n init_seed()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader), eta_min=config.lr)\r\n\r\n training_loss_values = []\r\n validation_loss_values = []\r\n validation_accuracy_values = []\r\n\r\n for epoch in range(config.epochs):\r\n\r\n model.train()\r\n\r\n print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.epochs))\r\n start_time = time.time()\r\n\r\n total_loss = 0\r\n\r\n for batch_no, batch in enumerate(train_loader):\r\n\r\n source = batch[0].to(device)\r\n target = batch[1].to(device)\r\n\r\n model.zero_grad()\r\n\r\n loss, logits = model(source, target)\r\n total_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = target.to('cpu').numpy()\r\n\r\n loss.backward()\r\n\r\n optimizer.step()\r\n scheduler.step()\r\n\r\n #Logging the loss and accuracy (below) in Tensorboard\r\n avg_train_loss = total_loss / len(train_loader)\r\n training_loss_values.append(avg_train_loss)\r\n\r\n for name, weights in model.named_parameters():\r\n writer.add_histogram(name, weights, epoch)\r\n\r\n writer.add_scalar('Train/Loss', avg_train_loss, epoch)\r\n\r\n print(\"Average training loss: {0:.2f}\".format(avg_train_loss))\r\n print(\"Running Validation...\")\r\n\r\n model.eval()\r\n\r\n eval_loss, eval_accuracy = 0, 0\r\n nb_eval_steps = 0\r\n\r\n for batch_no, batch in enumerate(eval_loader):\r\n\r\n source = batch[0].to(device)\r\n target = batch[1].to(device)\r\n\r\n with torch.no_grad():\r\n loss, logits = model(source, target)\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = target.to('cpu').numpy()\r\n\r\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\r\n eval_accuracy += tmp_eval_accuracy\r\n eval_loss += loss\r\n\r\n nb_eval_steps += 1\r\n\r\n avg_valid_acc = eval_accuracy/nb_eval_steps\r\n avg_valid_loss = eval_loss/nb_eval_steps\r\n validation_loss_values.append(avg_valid_loss)\r\n validation_accuracy_values.append(avg_valid_acc)\r\n\r\n writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)\r\n writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)\r\n writer.flush()\r\n\r\n print(\"Avg Val Accuracy: {0:.2f}\".format(avg_valid_acc))\r\n print(\"Average Val Loss: {0:.2f}\".format(avg_valid_loss))\r\n print(\"Time taken by epoch: {0:.2f}\".format(time.time() - start_time))\r\n\r\n return training_loss_values, validation_loss_values, validation_accuracy_values\r\n"
] |
[
[
"torch.device",
"numpy.equal",
"torch.cat",
"torch.cuda.manual_seed_all",
"torch.stack",
"numpy.random.seed",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.argmax",
"torch.argmax"
]
] |
monchier/streamlit-1
|
[
"202cd80bad8c8eb51d3e75d3923b225b0092c882"
] |
[
"e2e/scripts/st_map.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\n\n# Empty map.\n\nst.map()\n\n# Simple map.\n\ncoords = np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4]\ndf = pd.DataFrame(coords, columns=[\"lat\", \"lon\"])\n\nst.map(df)\n\n# Same but with custom zoom level:\n\nst.map(df, zoom=8)\n"
] |
[
[
"pandas.DataFrame",
"numpy.random.randn"
]
] |
vishalbelsare/KramersMoyal
|
[
"57e50278b0d31567054f763f3e0f3cc2c1e08315",
"57e50278b0d31567054f763f3e0f3cc2c1e08315",
"57e50278b0d31567054f763f3e0f3cc2c1e08315"
] |
[
"test/kmc_test.py",
"test/bincount_test.py",
"older versions/older_version_1d_calculator.py"
] |
[
"import numpy as np\n\nfrom kramersmoyal import km\nfrom kramersmoyal import kernels\n\ndef test_kmc():\n for t in [1,0.1,0.01,0.001]:\n for lag in [None, [1,2,3]]:\n\n X = np.random.normal(loc = 0, scale = np.sqrt(t), size = 10000)\n\n bins = np.array([5000])\n\n powers = np.array([[1], [2]])\n\n bw = 0.15\n\n # The kmc holds the results, where edges holds the binning space\n kmc, edges = km(X, kernel = kernels.epanechnikov, bw = bw,\n bins = bins, powers = powers)\n\n assert isinstance(kmc, np.ndarray)\n assert isinstance(edges[0], np.ndarray)\n\n kmc, edges = km(X, kernel = kernels.epanechnikov, bins = bins,\n powers = powers)\n\n assert isinstance(kmc, np.ndarray)\n assert isinstance(edges[0], np.ndarray)\n",
"# from timeit import timeit\n\nimport numpy as np\n\nfrom kramersmoyal.binning import bincount1, bincount2\n\n# b1 = list()\n# b2 = list()\ndef test_bincount():\n for N in [1000000, 10000000]:\n for Nw in [1, 5, 10, 20, 40]:\n xy = np.random.randint(100, size=(N))\n weights = np.random.rand(N, Nw).T\n\n # b1.append(timeit(lambda: bincount1(xy, weights), number=5))\n # b2.append(timeit(lambda: bincount2(xy, weights), number=5))\n\n assert (bincount1(xy, weights) == bincount2(xy, weights)).all()\n\n # print(b1)\n # print(b2)\n",
"# coding: utf-8\n#! /usr/bin/env python\n# FrequencyJumpLibrary\n\nimport numpy as np\nfrom scipy import stats\nimport math as math\n\ndef KM (y, delta_t=1, Moments = [1,2,4,6,8], bandwidth = 1.5, Lowerbound = False, Upperbound = False, Kernel = 'Epanechnikov'): #Kernel-based Regression\n Moments = [0] + Moments\n length=len(Moments)\n n = 5000\n Mn = int(n * bandwidth / 10) #Minor n \n res = np.zeros([n + Mn, length])\n\n # Epanechnikov kernel: 3/4(1 - x²), x=-1 to x=1\n # #Uniform kernel: 1/2, , x=-1 to x=1\n Kernel = (3 * (1 - (np.linspace(-1 * bandwidth, 1 * bandwidth, Mn) / bandwidth) ** 2)) / (4 * bandwidth) # Kernel1 = ones([Mn]) / (2 * bandwidth)\n \n yDist = y[1:] - y[:-1] \n\n if (Lowerbound == False):\n Min = min(y)\n else:\n Min = Lowerbound\n if (Upperbound == False):\n Max = max(y)\n else:\n Max = Upperbound\n space = np.linspace(Min, Max, n + Mn)\n b = ((((y[:-1]-Min) / (abs(Max - Min))) * (n))).astype(int)\n trueb = np.unique(b[(b>=0)*(b<n)])\n for i in trueb:\n r = yDist[b==i]\n for l in range(length):\n res[i:i + Mn, l] += Kernel * (sum(r ** Moments[l]))\n\n res[:, 0][res[:, 0]==0]=1.\n \n for l in range(length-1):\n res[:, l+1] = np.divide(res[:, l+1],(res[:, 0] * math.factorial(Moments[l+1]) * (delta_t)))\n \n return res, space\n"
] |
[
[
"numpy.array",
"numpy.sqrt"
],
[
"numpy.random.randint",
"numpy.random.rand"
],
[
"numpy.linspace",
"numpy.zeros",
"numpy.unique"
]
] |
vanshhhhh/datasets
|
[
"aee32f95273ca3bfe83e09fb9b00ba4bf23597a5",
"aee32f95273ca3bfe83e09fb9b00ba4bf23597a5"
] |
[
"tensorflow_datasets/ranking/libsvm_ranking_parser.py",
"tensorflow_datasets/video/ucf101.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A parser for ranking-style LibSVM files.\n\nNote that the LibSVM ranking file format does not have a formal specification.\nThis implementation supports the common formats that are available online and\nwill work with all the publically available LTR datasets such as MSLR-WEB,\nIstella and Yahoo Webscope.\n\"\"\"\n\nimport collections\nimport dataclasses\nimport re\nfrom typing import Iterable, List, Mapping, Tuple\n\nimport numpy as np\n\n# Type alias for a tuple representing a ranking example.\nRankingExampleTuple = Tuple[str, Mapping[str, np.ndarray]]\n\n\n@dataclasses.dataclass\nclass RankingExample:\n \"\"\"Represents a parsed ranking example for a given query identifier.\n\n Attributes:\n qid: The query identifier.\n features: A mapping of feature name to feature values.\n \"\"\"\n qid: str\n features: Mapping[str, List[float]]\n\n\nclass ParserError(Exception):\n \"\"\"Raised when LibSVM-formatted contents cannot be parsed correctly.\"\"\"\n\n def __init__(self, line_number: int, line: str, reason: str):\n \"\"\"Initializes the instance.\n\n Args:\n line_number: The line number where the parser error occurred.\n line: The line where the parser error occurred.\n reason: An informative message about the nature of the parser error.\n \"\"\"\n super().__init__(f\"Unable to parse line {line_number} ('{line}'): {reason}\")\n self.line_number = line_number\n self.line = line\n self.reason = reason\n\n\nclass LibSVMRankingParser(Iterable[RankingExampleTuple]):\n \"\"\"A parser for LibSVM-formatted ranking files.\n\n This parser can parse an iterable of lines into an iterable of\n `RankingExampleTuple`s that contain the per-query labels and features.\n\n Example usage:\n >>> lines = [\"1 qid:1 1:5.0 2:3.0\", \"2 qid:1 1:2.0 2:1.0\"]\n >>> feature_names = {1: \"bm25\", 2: \"tfidf\"}\n >>> parser = LibSVMRankingParser(lines, feature_names)\n >>> for example in parser:\n ... print(example[0])\n {'bm25': array([5., 2.]), 'tfidf': array([3., 1.]), 'label': array([1, 2])}\n\n The parser will raise a ParserError with an informative error message if the\n content being parsed does not match LibSVM ranking format.\n \"\"\"\n\n def __init__(self,\n lines: Iterable[str],\n feature_names: Mapping[int, str],\n label_feature_name: str = \"label\",\n default_feature_value: float = 0.0):\n \"\"\"Initializes the instance.\n\n Args:\n lines: The lines to parse.\n feature_names: A mapping from feature indices to feature names.\n label_feature_name: The name to assign to the label feature.\n default_feature_value: The default feature value to use when a feature is\n missing from the input.\n \"\"\"\n self._lines = lines\n self._feature_names = feature_names\n self._label_feature_name = label_feature_name\n self._default_feature_value = default_feature_value\n self._current_example = None\n self._available_examples = collections.deque()\n\n def _parse_line(self, line_number: int, line: str):\n \"\"\"Parses a single line of input from a LibSVM ranking file.\n\n This method will update the internal state of the parser and may produce a\n new item to be made available in `self._available_examples`.\n\n Args:\n line_number: The current line number. This may be used to generate more\n informative ParserError messages if such an error occurs.\n line: The line to parse.\n\n Raises:\n ParserError: If the parsing of given line failed.\n \"\"\"\n # Remove comments and leading/trailing whitespace from line.\n line_clean, *_ = line.split(\"#\", maxsplit=2)\n line_clean = line_clean.strip()\n\n # An empty line is allowed but should be skipped during parsing.\n if not line_clean:\n return\n\n # Split by space separators.\n try:\n label, qid, *features = re.split(r\"\\s+\", line_clean)\n except ValueError as value_error:\n raise ParserError(\n line_number, line,\n \"could not extract label, qid and features\") from value_error\n\n # Convert relevance label to float.\n try:\n label = float(label)\n except ValueError as value_error:\n raise ParserError(\n line_number, line,\n f\"label '{label}' could not be converted to a float\") from value_error\n\n # Extract qid.\n if qid[:4] != \"qid:\":\n raise ParserError(line_number, line,\n \"line must contain a qid after the relevance label\")\n qid = qid[4:]\n if not qid:\n raise ParserError(line_number, line, \"qid can not be empty\")\n\n # Construct a feature dict containing default values.\n feature_dict = {\n feature_name: self._default_feature_value\n for feature_name in self._feature_names.values()\n }\n\n # Parse all features and add them to the feature dict.\n for feature in features:\n try:\n index, value = feature.split(\":\", maxsplit=2)\n index = int(index)\n value = float(value)\n # Only add features if they map to a feature name in the\n # `_feature_names` dict. All other features are ignored.\n if index in self._feature_names:\n feature_dict[self._feature_names[index]] = value\n except ValueError as value_error:\n raise ParserError(\n line_number, line,\n f\"failed to extract feature index and value from '{feature}'\"\n ) from value_error\n\n # Add label to feature dict.\n feature_dict[self._label_feature_name] = label\n\n # Add the parsed qid and feature dictionary to the current example.\n self._add_to_current_example(qid, feature_dict)\n\n def _add_to_current_example(self, qid: str, feature_map: Mapping[str, float]):\n \"\"\"Adds given qid and feature_map to the current example.\n\n If the qid matches the current example qid, this will add the features in\n given feature_map to the current example.\n If the qid does not match the current example qid, this will store the\n current example and then create a new one with given qid and feature_map.\n\n Args:\n qid: The query identifier.\n feature_map: A mapping of feature names to feature values.\n \"\"\"\n if self._current_example is None:\n self._current_example = RankingExample(qid, collections.defaultdict(list))\n\n if self._current_example.qid != qid:\n self._store_current_example()\n self._current_example = RankingExample(qid, collections.defaultdict(list))\n\n for key, value in feature_map.items():\n self._current_example.features[key].append(value)\n\n def _store_current_example(self):\n \"\"\"Store the current ranking example in numpy format.\n\n This method converts the current example to a RankingExampleTuple and stores\n it in `self._available_examples` so that it can be yielded by\n `self.__iter__`.\n \"\"\"\n qid = self._current_example.qid\n np_features_dict = {\n key: np.array(value)\n for key, value in self._current_example.features.items()\n }\n self._available_examples.append((qid, np_features_dict))\n\n def _end_of_parse(self):\n \"\"\"Signals the end of parsing has been reached.\n\n This method will store the item that is currently being parsed so it is\n available in `self._available_examples`.\n \"\"\"\n if self._current_example is not None:\n self._store_current_example()\n self._current_example = None\n\n def __iter__(self):\n \"\"\"Iterates over `RankingExampleTuple`s.\n\n Yields:\n `RankingExampleTuple` that represent a single query.\n\n Raises:\n ParserError: If parsing failed.\n \"\"\"\n # Parse each line and yield the resulting examples as they become available.\n for line_number, line in enumerate(self._lines, start=1):\n self._parse_line(line_number, line)\n while self._available_examples:\n yield self._available_examples.popleft()\n\n # Signal the end has been reached and yield any remaining items.\n self._end_of_parse()\n while self._available_examples:\n yield self._available_examples.popleft()\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"UCF-101 dataset from https://www.crcv.ucf.edu/data/UCF101.php.\"\"\"\n\nimport os\n\nfrom absl import logging\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\nUCF_101_URL = 'https://storage.googleapis.com/thumos14_files/UCF101_videos.zip'\nSPLITS_URL = ('https://www.crcv.ucf.edu/data/UCF101/'\n 'UCF101TrainTestSplits-RecognitionTask.zip')\n\n_CITATION = \"\"\"\\\n@article{DBLP:journals/corr/abs-1212-0402,\n author = {Khurram Soomro and\n Amir Roshan Zamir and\n Mubarak Shah},\n title = {{UCF101:} {A} Dataset of 101 Human Actions Classes From Videos in\n The Wild},\n journal = {CoRR},\n volume = {abs/1212.0402},\n year = {2012},\n url = {http://arxiv.org/abs/1212.0402},\n archivePrefix = {arXiv},\n eprint = {1212.0402},\n timestamp = {Mon, 13 Aug 2018 16:47:45 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/abs-1212-0402},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\"\"\"\n\n_LABELS_FNAME = 'video/ucf101_labels.txt'\n\n\nclass Ucf101Config(tfds.core.BuilderConfig):\n \"\"\"\"Configuration for UCF101 split and possible video rescaling.\"\"\"\n\n def __init__(self, *, split_number, width=None, height=None, **kwargs):\n \"\"\"The parameters specifying how the dataset will be processed.\n\n The dataset comes with three separate splits. You can specify which split\n you want in `split_number`. If `width` and `height` are set, the videos\n will be rescaled to have those heights and widths (using ffmpeg).\n\n Args:\n split_number: The split number, one of (1, 2, 3)\n width: An integer with the width or None.\n height: An integer with the height or None.\n **kwargs: Passed on to the constructor of `BuilderConfig`.\n \"\"\"\n super(Ucf101Config, self).__init__(\n version=tfds.core.Version('2.0.0'),\n release_notes={\n '2.0.0': 'New split API (https://tensorflow.org/datasets/splits)',\n },\n **kwargs,\n )\n if (width is None) ^ (height is None):\n raise ValueError('Either both dimensions should be set, or none of them')\n self.width = width\n self.height = height\n if split_number not in (1, 2, 3):\n raise ValueError(\n 'Unknown split number {}, should be 1, 2 or 3'.format(split_number))\n self.split_number = split_number\n\n\nclass Ucf101(tfds.core.GeneratorBasedBuilder):\n \"\"\"Ucf101 action recognition dataset.\n\n Note that in contrast to the labels provided in the original dataset, here the\n labels start at zero, not at one.\n \"\"\"\n\n BUILDER_CONFIGS = [\n Ucf101Config(\n name='ucf101_1_256',\n description='256x256 UCF with the first action recognition split.',\n width=256,\n height=256,\n split_number=1,\n ),\n Ucf101Config(\n name='ucf101_1',\n description='UCF with the action recognition split #1.',\n width=None,\n height=None,\n split_number=1,\n ),\n Ucf101Config(\n name='ucf101_2',\n description='UCF with the action recognition split #2.',\n width=None,\n height=None,\n split_number=2,\n ),\n Ucf101Config(\n name='ucf101_3',\n description='UCF with the action recognition split #3.',\n width=None,\n height=None,\n split_number=3,\n ),\n ]\n\n def _info(self):\n if self.builder_config.width is not None:\n if self.builder_config.height is None:\n raise ValueError('Provide either both height and width or none.')\n ffmpeg_extra_args = ('-vf',\n 'scale={}x{}'.format(self.builder_config.height,\n self.builder_config.width))\n else:\n ffmpeg_extra_args = []\n\n video_shape = (None, self.builder_config.height, self.builder_config.width,\n 3)\n labels_names_file = tfds.core.tfds_path(_LABELS_FNAME)\n features = tfds.features.FeaturesDict({\n 'video':\n tfds.features.Video(\n video_shape,\n ffmpeg_extra_args=ffmpeg_extra_args,\n encoding_format='jpeg'), # pytype: disable=wrong-arg-types # gen-stub-imports\n 'label':\n tfds.features.ClassLabel(names_file=labels_names_file),\n })\n return tfds.core.DatasetInfo(\n builder=self,\n description='A 101-label video classification dataset.',\n features=features,\n homepage='https://www.crcv.ucf.edu/data-sets/ucf101/',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n splits_folder = 'ucfTrainTestlist'\n\n urls_to_download = {\n 'videos': UCF_101_URL,\n 'splits': SPLITS_URL,\n }\n downloaded_urls = dl_manager.download_and_extract(urls_to_download)\n\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n 'videos_dir':\n downloaded_urls['videos'],\n 'splits_dir':\n downloaded_urls['splits'],\n 'data_list':\n '{}/trainlist{:02d}.txt'.format(\n splits_folder, self.builder_config.split_number),\n }),\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs={\n 'videos_dir':\n downloaded_urls['videos'],\n 'splits_dir':\n downloaded_urls['splits'],\n 'data_list':\n '{}/testlist{:02d}.txt'.format(\n splits_folder, self.builder_config.split_number),\n }),\n ]\n\n def _generate_examples(self, videos_dir, splits_dir, data_list):\n data_list_path_path = os.path.join(splits_dir, data_list)\n with tf.io.gfile.GFile(data_list_path_path, 'r') as data_list_file:\n labels_and_paths = data_list_file.readlines()\n for label_and_path in sorted(labels_and_paths):\n # The train splits contain not only the filename, but also a digit\n # encoding the label separated by a space, which we ignore.\n label_and_path = label_and_path.strip().split(' ')[0]\n label, path = os.path.split(label_and_path)\n # Fix an inconsistency between the names in the list and in the zip file.\n path = path.replace('HandStandPushups', 'HandstandPushups')\n video_path = os.path.join(videos_dir, 'UCF101', path)\n if not tf.io.gfile.exists(video_path):\n logging.error('Example %s not found', video_path)\n continue\n # We extract the label from the filename.\n yield path, {'video': video_path, 'label': label}\n"
] |
[
[
"numpy.array"
],
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.