repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
poramasionwu/kmodes
[ "224b19bad77a3f2150a6cb754c6ae8f5140e4291" ]
[ "kmodes/util/dissim.py" ]
[ "\"\"\"\nDissimilarity measures for clustering\n\"\"\"\n\nimport numpy as np\n\n\ndef matching_dissim(a, b, **_):\n \"\"\"Simple matching dissimilarity function\"\"\"\n return np.sum(a != b, axis=1)\n\n\ndef jaccard_dissim_binary(a, b, **__):\n \"\"\"Jaccard dissimilarity function for binary encoded variables\"\"\"\n if ((a == 0) | (a == 1)).all() and ((b == 0) | (b == 1)).all():\n numerator = np.sum(np.bitwise_and(a, b), axis=1)\n denominator = np.sum(np.bitwise_or(a, b), axis=1)\n if (denominator == 0).any(0):\n raise ValueError(\"Insufficient Number of data since union is 0\")\n else:\n return 1 - numerator / denominator\n raise ValueError(\"Missing or non Binary values detected in Binary columns.\")\n\n\ndef jaccard_dissim_label(a, b, **__):\n \"\"\"Jaccard dissimilarity function for label encoded variables\"\"\"\n if np.isnan(a.astype('float64')).any() or np.isnan(b.astype('float64')).any():\n raise ValueError(\"Missing values detected in Numeric columns.\")\n intersect_len = np.empty(len(a), dtype=int)\n union_len = np.empty(len(a), dtype=int)\n i = 0\n for row in a:\n intersect_len[i] = len(np.intersect1d(row, b))\n union_len[i] = len(np.unique(row)) + len(np.unique(b)) - intersect_len[i]\n i += 1\n if (union_len == 0).any():\n raise ValueError(\"Insufficient Number of data since union is 0\")\n return 1 - intersect_len / union_len\n\n\ndef euclidean_dissim(a, b, **_):\n \"\"\"Euclidean distance dissimilarity function\"\"\"\n if np.isnan(a).any() or np.isnan(b).any():\n raise ValueError(\"Missing values detected in numerical columns.\")\n return np.sum((a - b) ** 2, axis=1)\n\n\ndef ng_dissim(a, b, X=None, membship=None):\n \"\"\"Ng et al.'s dissimilarity measure, as presented in\n Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, \"On the\n Impact of Dissimilarity Measure in k-Modes Clustering Algorithm\", IEEE\n Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,\n January, 2007\n\n This function can potentially speed up training convergence.\n\n Note that membship must be a rectangular array such that the\n len(membship) = len(a) and len(membship[i]) = X.shape[1]\n\n In case of missing membship, this function reverts back to\n matching dissimilarity (e.g., when predicting).\n \"\"\"\n # Without membership, revert to matching dissimilarity\n if membship is None:\n return matching_dissim(a, b)\n\n def calc_cjr(b, X, memj, idr):\n \"\"\"Num objects w/ category value x_{i,r} for rth attr in jth cluster\"\"\"\n xcids = np.where(memj == 1)\n return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))\n\n def calc_dissim(b, X, memj, idr):\n # Size of jth cluster\n cj = float(np.sum(memj))\n return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0\n\n if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:\n raise ValueError(\"'membship' must be a rectangular array where \"\n \"the number of rows in 'membship' equals the \"\n \"number of rows in 'a' and the number of \"\n \"columns in 'membship' equals the number of rows in 'X'.\")\n\n return np.array([np.array([calc_dissim(b, X, membship[idj], idr)\n if b[idr] == t else 1.0\n for idr, t in enumerate(val_a)]).sum(0)\n for idj, val_a in enumerate(a)])\n" ]
[ [ "numpy.take", "numpy.unique", "numpy.isnan", "numpy.bitwise_and", "numpy.intersect1d", "numpy.where", "numpy.sum", "numpy.bitwise_or" ] ]
Ashutosh-Malve/qiskit-aqua
[ "649bd70a2a3465e37ef1ff1a04a62d4a63222177" ]
[ "test/finance/test_data_providers.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Test Data Providers \"\"\"\n\nimport datetime\nfrom test.finance import QiskitFinanceTestCase\nimport warnings\nimport numpy as np\nfrom qiskit.finance.data_providers import (RandomDataProvider,\n QiskitFinanceError,\n WikipediaDataProvider,\n StockMarket,\n DataOnDemandProvider,\n ExchangeDataProvider)\n\n\n# This can be run as python -m unittest test.test_data_providers.TestDataProviders\n\nclass TestDataProviders(QiskitFinanceTestCase):\n \"\"\"Tests data providers for the Portfolio Optimization and Diversification.\"\"\"\n\n def setUp(self):\n super().setUp()\n warnings.filterwarnings(action=\"ignore\", message=\"unclosed\", category=ResourceWarning)\n\n def tearDown(self):\n super().tearDown()\n warnings.filterwarnings(action=\"always\", message=\"unclosed\", category=ResourceWarning)\n\n def test_wrong_use(self):\n \"\"\" wrong use test \"\"\"\n rnd = RandomDataProvider(seed=1)\n # Now, the .run() method is expected, which does the actual data loading\n # (and can take seconds or minutes,\n # depending on the data volumes, hence not ok in the constructor)\n self.assertRaises(QiskitFinanceError, rnd.get_covariance_matrix)\n self.assertRaises(QiskitFinanceError, rnd.get_similarity_matrix)\n wiki = WikipediaDataProvider(\n token=\"\",\n tickers=[\"GOOG\", \"AAPL\"],\n stockmarket=StockMarket.NASDAQ,\n start=datetime.datetime(2016, 1, 1),\n end=datetime.datetime(2016, 1, 30)\n )\n # Now, the .run() method is expected, which does the actual data loading\n self.assertRaises(QiskitFinanceError, wiki.get_covariance_matrix)\n self.assertRaises(QiskitFinanceError, wiki.get_similarity_matrix)\n\n def test_random(self):\n \"\"\" random test \"\"\"\n rnd = RandomDataProvider(seed=1)\n rnd.run()\n similarity = np.array([[1.00000000e+00, 6.2284804e-04], [6.2284804e-04, 1.00000000e+00]])\n covariance = np.array([[1.75870991, -0.32842528], [-0.32842528, 2.31429182]])\n np.testing.assert_array_almost_equal(rnd.get_covariance_matrix(), covariance, decimal=3)\n np.testing.assert_array_almost_equal(rnd.get_similarity_matrix(), similarity, decimal=3)\n\n def test_wikipedia(self):\n \"\"\" wikipedia test \"\"\"\n wiki = WikipediaDataProvider(\n token=\"\",\n tickers=[\"GOOG\", \"AAPL\"],\n stockmarket=StockMarket.NASDAQ,\n start=datetime.datetime(2016, 1, 1),\n end=datetime.datetime(2016, 1, 30)\n )\n # can throw QiskitFinanceError\n try:\n wiki.run()\n similarity = np.array([\n [1.00000000e+00, 8.44268222e-05],\n [8.44268222e-05, 1.00000000e+00]\n ])\n covariance = np.array([\n [269.60118129, 25.42252332],\n [25.42252332, 7.86304499]\n ])\n np.testing.assert_array_almost_equal(wiki.get_covariance_matrix(),\n covariance, decimal=3)\n np.testing.assert_array_almost_equal(wiki.get_similarity_matrix(),\n similarity, decimal=3)\n except QiskitFinanceError:\n self.skipTest(\"Test of WikipediaDataProvider skipped due to the per-day usage limits.\")\n # The trouble for automating testing is that after 50 tries\n # from one IP address within a day\n # Quandl complains about the free usage tier limits:\n # quandl.errors.quandl_error.LimitExceededError: (Status 429) (Quandl Error QELx01)\n # You have exceeded the anonymous user limit of 50 calls per day. To make more calls\n # today, please register for a free Quandl account and then include your API\n # key with your requests.\n # This gets \"dressed\" as QiskitFinanceError.\n # This also introduces a couple of seconds of a delay.\n\n def test_nasdaq(self):\n \"\"\" nasdaq test \"\"\"\n nasdaq = DataOnDemandProvider(\n token=\"REPLACE-ME\",\n tickers=[\"GOOG\", \"AAPL\"],\n stockmarket=StockMarket.NASDAQ,\n start=datetime.datetime(2016, 1, 1),\n end=datetime.datetime(2016, 1, 2)\n )\n try:\n nasdaq.run()\n self.fail(\"Test of DataOnDemandProvider should have failed due to the lack of a token.\")\n except QiskitFinanceError:\n self.skipTest(\"Test of DataOnDemandProvider skipped due to the lack of a token.\")\n\n def test_exchangedata(self):\n \"\"\" exchange data test \"\"\"\n lse = ExchangeDataProvider(\n token=\"REPLACE-ME\",\n tickers=[\"AIBGl\", \"AVSTl\"],\n stockmarket=StockMarket.LONDON,\n start=datetime.datetime(2019, 1, 1),\n end=datetime.datetime(2019, 1, 30)\n )\n try:\n lse.run()\n self.fail(\"Test of DataOnDemandProvider should have failed due to the lack of a token.\")\n except QiskitFinanceError:\n self.skipTest(\"Test of DataOnDemandProvider skipped due to the lack of a token.\")\n" ]
[ [ "numpy.array" ] ]
sebasc167/Traveling-Salesman-Genetic-Algo
[ "fe5cf133a20761c20721ee98675e02b205007164" ]
[ "GeneticAlgorithm.py" ]
[ "import random\r\nfrom math import *\r\nfrom random import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom CityClass import City\r\nfrom SolutionClass import Solution\r\n\r\nseed(10)\r\ncityList = []\r\nfor i in range(25):\r\n cityList.append(City(randint(0,200),randint(0,200)))\r\n\r\nclass Mappy:\r\n def __init__(self,population_num:int):\r\n '''\r\n Population number is the number of total solutions\r\n to play with and NOT the number of cities\r\n '''\r\n self.bestSolution = None\r\n self.solutions = [Solution(sample(cityList,25)) for _ in range(population_num)]\r\n self.pop_num = population_num\r\n def sortSolutions(self):\r\n '''\r\n This function will not return anything but will simply\r\n sort the solution list in descending order according to the\r\n solution's fitness attribute\r\n return:\r\n None\r\n '''\r\n self.solutions.sort(key = lambda k: k.fitness, reverse=True)\r\n\r\n def updateBest(self):\r\n self.bestSolution = max(self.solutions,key=lambda k: k.fitness)\r\n print(f'The best solution has route {self.bestSolution.route}\\nfitness {1/self.bestSolution.fitness}')\r\n return int(1/self.bestSolution.fitness)\r\n\r\n def nextGeneration(self):\r\n '''\r\n This function will first add the top half solutions to the nextGeneration\r\n and then add the next top performing solutions to the pool and replace the\r\n array with a new generation\r\n return:\r\n None, updates the solutions pool\r\n '''\r\n nextGen = [] #next generation of cities\r\n first_half_gen = self.elitism()\r\n nextGen.extend(first_half_gen) #adding the first half of the generation\r\n self.solutions = nextGen #now update the solutions with the nextGen\r\n\r\n second_half_gen = []\r\n for _ in range(self.pop_num//2):\r\n parents = self.rouletteWheelSelection()\r\n parent1,parent2 = parents[0], parents[1] #the parent solutions\r\n second_half_gen.append(self.breed(parent1,parent2))\r\n\r\n nextGen.extend(second_half_gen) #adding the next half of the generation\r\n\r\n def elitism(self):\r\n '''\r\n This function will simply add on the top half performing routes\r\n to the population\r\n return:\r\n A list of cities, half the size\r\n '''\r\n halfGen=[self.solutions[i] for i in range(self.pop_num//2)]\r\n return halfGen\r\n\r\n def totalFitness(self):\r\n '''\r\n This function simply adds the fitness of the individuals\r\n '''\r\n fitness = [solution.fitness for solution in self.solutions]\r\n return sum(fitness)\r\n\r\n def rouletteWheelSelection(self):\r\n '''\r\n return two parents for breeding as a list\r\n '''\r\n parents = [] #two parents\r\n total = 0\r\n roulette = []\r\n for i in range(len(self.solutions)):\r\n total_fit = self.totalFitness()\r\n portion = self.solutions[i].fitness / total_fit\r\n roulette.append((i, total, total + portion))\r\n total += portion\r\n spin = random()\r\n #print(f'this is the roulette values: {roulette}')\r\n picked = [i for i in roulette if i[1] <= spin < i[2]]\r\n spin2 = random()\r\n picked2 = [i for i in roulette if i[1] <= spin2 < i[2]]\r\n while picked[0][0] == picked2[0][0]:\r\n #print(\"PICKED THE SAME PARENT\")\r\n spin2 = random()\r\n picked2 = [i for i in roulette if i[1] <= spin2 < i[2]]\r\n parents.append(self.solutions[picked[0][0]])\r\n parents.append(self.solutions[picked2[0][0]])\r\n return parents\r\n\r\n def breed(self,sol1:Solution,sol2:Solution)->Solution:\r\n '''\r\n parameters:\r\n sol1: A solution parent picked by rank selection\r\n return\r\n A solution child from solutions 1 and 2\r\n '''\r\n assert (len(sol1.route) == len(sol2.route))\r\n child = []\r\n\r\n gene1 = randint(0, len(sol1.route))\r\n gene2 = randint(0, len(sol2.route))\r\n while gene1 == gene2:\r\n gene2 = randint(0, len(sol2.route))\r\n generange = (gene1, gene2) if gene1 < gene2 else (gene2, gene1)\r\n slicey = sol1.route[generange[0]:generange[1] + 1]\r\n child1 = [i for i in sol2.route if i not in slicey]\r\n\r\n child.extend([child1[i] for i in range(0, generange[0])])\r\n child.extend(slicey)\r\n child.extend([child1[i] for i in range(generange[0], len(child1))])\r\n childSolution = Solution(route=child) #type is Solution\r\n return childSolution\r\n\r\n def execute(self,generations,mutation_rate):\r\n y_axis = []\r\n x_axis = []\r\n self.sortSolutions()\r\n print(\"------ Generation 0---------------\")\r\n for sol in self.solutions:\r\n print(sol)\r\n print(\"----------------------------------\")\r\n dist = self.updateBest()\r\n x_axis.append(0)\r\n y_axis.append(dist)\r\n for i in range(generations):\r\n self.nextGeneration()\r\n self.mutation(mutation_rate)\r\n self.sortSolutions()\r\n print(f'--------Generation {i+1} ----------------')\r\n #for sol in self.solutions:\r\n #print(sol)\r\n dist = self.updateBest()\r\n x_axis.append(i+1)\r\n y_axis.append(dist)\r\n print(\"-----------------------------------------\")\r\n plt.xlabel(\"Generations\")\r\n plt.ylabel(\"Route Distance\")\r\n plt.plot(np.array(x_axis,dtype=np.int32),np.array(y_axis,dtype=np.int32))\r\n plt.suptitle(\"Travelling Distances vs Generations\")\r\n plt.show()\r\n\r\n\r\n def mutate(self,index,gene1,gene2):\r\n '''\r\n This function will swap city locations to visit randomly\r\n on a random route\r\n return:\r\n None\r\n '''\r\n self.solutions[index].route[gene1], self.solutions[index].route[gene2] = self.solutions[index].route[gene2], self.solutions[index].route[gene1]\r\n print(f'Mutation has occurred at gene{gene1} and gene{gene2} at solution number {index+1}')\r\n\r\n def mutation(self,mutation_rate):\r\n '''\r\n This function is the mutation driver, so it will\r\n pick a random route solution and two random\r\n '''\r\n wheel = []\r\n total = 0\r\n wheel.append((total,total + mutation_rate))\r\n total+=mutation_rate\r\n wheel.append((total,1))\r\n spin = random()\r\n if wheel[0][0] <= spin <= wheel[0][1]:\r\n gene1 = randint(0,24)\r\n gene2 = randint(0,24)\r\n while(gene1 == gene2):\r\n gene2 = randint(0, 24)\r\n self.mutate(randint(0,self.pop_num-1),gene1,gene2)\r\n else:\r\n pass\r\n\r\nTSP1 = Mappy(20)\r\nTSP1.execute(generations=500,mutation_rate=0.31)\r\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
fromjupiter/SingerMatch
[ "d3c93ae12d280badc99d6187f5dcd477cbc5843a" ]
[ "singermatch/models/XGBoosting.py" ]
[ "import configparser\nimport pickle\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom sklearn.metrics import f1_score\n\nfrom routines import Routines\nimport xgboost as xgb\n\nconfig = configparser.ConfigParser()\nconfig.read('../../system.ini')\nroutines = Routines(config)\n\nwith open(routines.essentia_feature_dir+'/train.data', 'rb') as f:\n Xtrain = pickle.load(f)\n\nytrain = []\nwith open(routines.original_mp3_dir + '/train.list', 'r') as f:\n for l in f:\n ytrain.append(l.strip().split('/')[0])\n\nwith open(routines.essentia_feature_dir+'/test.data', 'rb') as f:\n Xtest = pickle.load(f)\n\nkeys = []\nytest = []\nwith open(routines.original_mp3_dir + '/test.list', 'r') as f:\n for l in f:\n label = l.strip().split('/')[0]\n ytest.append(label)\n if len(keys) == 0 or keys[-1] != label:\n keys.append(label)\n\nXtrain = np.array(Xtrain)\nytrain = np.array(ytrain)\nXtest = np.array(Xtest)\nytest = np.array(ytest)\n\nmodel = xgb.XGBClassifier(random_state=1, max_depth=3, n_estimators=1000)\nmodel.fit(Xtrain, ytrain)\n\npred = model.predict(Xtest)\naccuracy = sum(ytest == pred)/len(ytest)\nprint(\"accuracy: {}\".format(accuracy))\nprint(\"F1 score: {}\".format(f1_score(ytest, pred, labels=keys, average='micro')))\n# make confusion matrix\nprint(keys)\nconfusion_matrix = np.zeros((20, 20))\nfor i in range(0, len(ytest)):\n confusion_matrix[keys.index(ytest[i])][keys.index(pred[i])] += 1\n\nfor l in confusion_matrix.tolist():\n print(' '.join([str(int(x)) for x in l]))\n\n\n\n" ]
[ [ "sklearn.metrics.f1_score", "numpy.array", "numpy.zeros" ] ]
CMU-IDS-2021/fp--05839-abby-jeff-kyle-will
[ "be51cce899d636c2e2811e66cf738b5c59406819" ]
[ "support/word_cloud.py" ]
[ "# word_cloud.py\n# Word cloud generation.\n\nimport pandas as pd\nimport altair as alt\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\nimport streamlit as st\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n\n\ndef buildWordCloudText(data):\n text = \"\"\n for i in range(len(data)):\n text += data.text[i]\n return text\n\n\ndef getData(filename):\n data = pd.read_json(filename)\n return data\n\n\ndef getWordCloud(text):\n wordcloud = WordCloud(\n stopwords=STOPWORDS,\n max_font_size=50,\n max_words=150,\n background_color=\"white\",\n collocations=False,\n ).generate(text)\n fig = plt.figure()\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n return fig\n\n\ndef getSentiment(data):\n sentiment = []\n for i in range(len(data)):\n avgSentiment = []\n blob = TextBlob(data.text[i])\n for sentence in blob.sentences:\n avgSentiment.append(sentence.sentiment.polarity)\n sentiment.append(avgSentiment)\n return sentiment\n\n\ndef getAvgSentiment(sentiments, data):\n docSentiments = []\n articleTitles = []\n for i in range(len(sentiments)):\n sentiment = 0\n for j in range(len(sentiments[i])):\n sentiment += sentiments[i][j]\n docSentiments.append((sentiment / len(sentiments[i])) * 100)\n articleTitles.append(data.title[i])\n tuples = list(zip(articleTitles, docSentiments))\n output = pd.DataFrame(tuples, columns=[\"Title\", \"Sentiment\"])\n return output\n\n\ndef buildChart(data):\n sentChart = (\n alt.Chart(data)\n .mark_bar()\n .encode(\n alt.X(\n \"Sentiment:Q\",\n title=\"Media Sentiment of Articles about AI\",\n scale=alt.Scale(domain=(-100, 100)),\n ),\n y=\"Title:O\",\n color=alt.condition(\n alt.datum.Sentiment > 0, alt.value(\"steelblue\"), alt.value(\"orange\")\n ),\n )\n .properties(width=600)\n )\n return sentChart\n" ]
[ [ "matplotlib.pyplot.imshow", "pandas.DataFrame", "pandas.read_json", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
roseDickinson/nismod2
[ "5c571b055a6f5dc26bdc2bc8950b9cf1c9202fca" ]
[ "models/transport/run.py" ]
[ "\"\"\"Transport model wrapper\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport configparser\nimport csv\nimport os\nfrom subprocess import check_output, CalledProcessError\nfrom string import Template\n\nimport pandas as pd\nimport numpy as np\nfrom smif.data_layer.data_array import DataArray\nfrom smif.exception import SmifTimestepResolutionError\nfrom smif.model.sector_model import SectorModel\n\n\nclass BaseTransportWrapper(SectorModel):\n \"\"\"Base wrapper for the transport model - override class variables in implementations\n \"\"\"\n _config_filename = 'run_config.ini'\n _template_filename = 'config.properties.template'\n\n def __init__(self, *args, **kwargs):\n # shared setup\n self._current_timestep = None\n self._set_options()\n super().__init__(*args, **kwargs)\n\n def _set_options(self):\n this_dir = os.path.dirname(__file__)\n\n config = configparser.ConfigParser()\n config.read(os.path.join(this_dir, self._config_filename))\n\n self._templates_dir = os.path.join(this_dir, 'templates')\n\n if 'run' not in config:\n raise KeyError(\"Expected '[run]' section in transport run_config.ini\")\n\n if 'jar' in config['run']:\n self._jar_path = os.path.join(this_dir, config['run']['jar'])\n else:\n raise KeyError(\"Expected 'jar' in transport run_config.ini\")\n\n if 'working_dir' in config['run']:\n self._working_dir = os.path.join(this_dir, config['run']['working_dir'])\n self._input_dir = os.path.join(self._working_dir, 'input')\n self._output_dir = os.path.join(self._working_dir, 'output')\n self._config_path = os.path.join(self._working_dir, 'config.properties')\n else:\n raise KeyError(\"Expected 'data_dir' in transport run_config.ini\")\n\n if 'optional_args' in config['run']:\n self._optional_args = config['run']['optional_args'].split(\" \")\n else:\n self._optional_args = []\n\n def _output_file_path(self, filename):\n return os.path.join(self._output_dir, str(self._current_timestep), filename)\n\n def simulate(self, data):\n \"\"\"Run the transport model\n\n Arguments\n ---------\n data: smif.data_layer.DataHandle\n \"\"\"\n try:\n os.mkdir(self._input_dir)\n except FileExistsError:\n pass\n\n self._current_timestep = data.current_timestep\n self._set_parameters(data)\n self._set_inputs(data)\n self._set_properties(data)\n self._run_model_subprocess(data)\n self._set_outputs(data)\n\n def _run_model_subprocess(self, data_handle):\n \"\"\"Run the transport model jar and feed log messages\n into the smif loggerlogger\n \"\"\"\n\n working_dir = self._working_dir\n path_to_jar = self._jar_path\n\n self.logger.info(\"FROM run.py: Running transport model\")\n base_arguments = ['java'] + self._optional_args + [\n '-cp',\n path_to_jar,\n 'nismod.transport.App',\n '-c',\n self._config_path\n ]\n if data_handle.current_timestep == data_handle.base_timestep:\n base_arguments.append('-b')\n try:\n self.logger.debug(base_arguments)\n output = check_output(base_arguments)\n self.logger.info(output.decode(\"utf-8\"))\n except CalledProcessError as ex:\n self.logger.error(ex.output.decode(\"utf-8\"))\n self.logger.exception(\"Transport model failed %s\", ex)\n raise ex\n else:\n tspt_model_arguments = base_arguments + [\n '-road',\n str(data_handle.current_timestep),\n str(data_handle.previous_timestep)\n ]\n try:\n self.logger.debug(tspt_model_arguments)\n output = check_output(tspt_model_arguments)\n self.logger.info(output.decode(\"utf-8\"))\n except CalledProcessError as ex:\n self.logger.error(ex.output.decode(\"utf-8\"))\n self.logger.exception(\"Transport model failed %s\", ex)\n raise ex\n\n def _input_dimension_names(self, input_name, dimension_name):\n return self.inputs[input_name].dim_coords(dimension_name).ids\n\n def _set_parameters(self, data_handle):\n \"\"\"Read model parameters from data handle and set up config files\n \"\"\"\n input_dir = self._input_dir\n\n # Elasticities for passenger and freight demand\n variables = ['POPULATION', 'GVA', 'TIME', 'COST']\n types = {\n 'ETA': os.path.join(input_dir, 'elasticities.csv'),\n 'FREIGHT_ETA': os.path.join(\n input_dir, 'elasticitiesFreight.csv')\n }\n for suffix, filename in types.items():\n with open(filename, 'w') as file_handle:\n writer = csv.writer(file_handle)\n writer.writerow(('variable', 'elasticity'))\n for variable in variables:\n key = \"{}_{}\".format(variable, suffix)\n value = float(data_handle.get_parameter(key).as_ndarray())\n writer.writerow((variable, value))\n\n def _set_inputs(self, data_handle):\n \"\"\"Get model inputs from data handle and write to input files\n \"\"\"\n self._set_population(data_handle)\n self._set_gva(data_handle)\n self._set_fuel_price(data_handle)\n self._set_engine_fractions(data_handle)\n\n def _set_population(self, data_handle):\n current_population = data_handle.get_data(\"population\").as_df().reset_index()\n current_population['year'] = data_handle.current_timestep\n\n if data_handle.current_timestep != data_handle.base_timestep:\n previous_population = data_handle.get_previous_timestep_data(\"population\").as_df().reset_index()\n previous_population['year'] = data_handle.previous_timestep\n\n population = pd.concat(\n [previous_population, current_population]\n )\n else:\n population = current_population\n\n population.population = population.population.astype(int)\n # use region dimension name (could change) for columns\n colname = self.inputs['population'].dims[0]\n population = population.pivot(\n index='year', columns=colname, values='population'\n )\n population_filepath = os.path.join(\n self._input_dir, 'population.csv')\n population.to_csv(population_filepath)\n\n def _set_gva(self, data_handle):\n current_gva = data_handle.get_data(\"gva\").as_df().reset_index()\n current_gva['year'] = data_handle.current_timestep\n\n if data_handle.current_timestep != data_handle.base_timestep:\n previous_gva = data_handle.get_previous_timestep_data(\"gva\").as_df().reset_index()\n previous_gva['year'] = data_handle.previous_timestep\n\n gva = pd.concat(\n [previous_gva, current_gva]\n )\n else:\n gva = current_gva\n\n # use region dimension name (could change) for columns\n colname = self.inputs['gva'].dims[0]\n gva = gva.pivot(\n index='year', columns=colname, values='gva'\n )\n gva_filepath = os.path.join(self._input_dir, 'gva.csv')\n gva.to_csv(gva_filepath)\n\n def _set_fuel_price(self, data_handle):\n fuel_price = data_handle.get_data('fuel_price').as_df().reset_index()\n fuel_price['year'] = data_handle.current_timestep\n fuel_price = fuel_price.pivot(\n index='year', columns='transport_fuel_type', values='fuel_price'\n )\n fuel_price['ELECTRICITY'] = float(data_handle.get_data('fuel_price_electricity').data)\n\n fuel_price_filepath = os.path.join(self._input_dir, 'energyUnitCosts.csv')\n fuel_price.to_csv(fuel_price_filepath)\n\n def _set_engine_fractions(self, data_handle):\n current_data = self._get_engine_fractions(data_handle, data_handle.current_timestep)\n\n if data_handle.current_timestep != data_handle.base_timestep:\n base_data = self._get_engine_fractions(data_handle, data_handle.base_timestep)\n\n data = pd.concat([base_data, current_data])\n else:\n data = current_data\n\n data.to_csv(\n os.path.join(self._input_dir, 'engineTypeFractions.csv'), index=False,\n float_format='%.15f')\n\n\n def _get_engine_fractions(self, data_handle, timestep):\n engine_fractions = data_handle.get_data(\n 'engine_type_fractions', timestep=timestep).as_df().reset_index()\n engine_fractions = engine_fractions.pivot(\n index='vehicle_type', columns='engine_type', values='engine_type_fractions'\n )\n engine_fractions.columns = engine_fractions.columns.values\n engine_fractions = engine_fractions.reset_index().rename(\n columns={\n 'vehicle_type': 'vehicle'\n }\n )\n engine_fractions['year'] = timestep\n\n # ensure column order matches EngineType enum definition (Java CSV reading assumes\n # fixed column order)\n column_order = [\n 'year', 'vehicle', 'ICE_PETROL', 'ICE_DIESEL', 'ICE_LPG', 'ICE_H2', 'ICE_CNG',\n 'HEV_PETROL', 'HEV_DIESEL', 'FCEV_H2', 'PHEV_PETROL', 'PHEV_DIESEL', 'BEV']\n engine_fractions = engine_fractions[column_order]\n return engine_fractions\n\n def _set_properties(self, data_handle):\n \"\"\"Set the transport model properties, such as paths and interventions\n \"\"\"\n working_dir = self._working_dir\n working_dir_path = str(os.path.abspath(working_dir)).replace('\\\\', '/')\n path_to_config_template = os.path.join(self._templates_dir, self._template_filename)\n\n # read config as a Template for easy substitution of values\n with open(path_to_config_template) as template_fh:\n config = Template(template_fh.read())\n\n intervention_files = []\n # Must be able to identify rail model interventions\n # the key in the config.properties must be railInterventionsFileX instead of\n # interventionsFilesX\n # Next line must contain all possible types of rail interventions\n rail_interventions_types = ['NewRailStation']\n for i, intervention in enumerate(data_handle.get_current_interventions().values()):\n fname = self._write_intervention(intervention)\n # write path with \"/\" separators even on Windows\n fname = fname.replace(\"\\\\\", \"/\")\n if intervention['type'] in rail_interventions_types:\n intervention_files.append(\"railInterventionFile{} = {}\".format(i, fname))\n else:\n intervention_files.append(\"interventionFile{} = {}\".format(i, fname))\n\n config_str = config.substitute({\n 'relative_path': working_dir_path,\n 'intervention_files': '\\n'.join(intervention_files),\n 'link_travel_time_averaging_weight': \\\n float(data_handle.get_parameter('link_travel_time_averaging_weight').data),\n 'assignment_iterations': \\\n int(data_handle.get_parameter('assignment_iterations').data),\n 'prediction_iterations': \\\n int(data_handle.get_parameter('prediction_iterations').data),\n 'use_route_choice_model': \\\n bool(data_handle.get_parameter('use_route_choice_model').data),\n })\n\n with open(self._config_path, 'w') as template_fh:\n template_fh.write(config_str)\n\n def _write_intervention(self, intervention):\n \"\"\"Write a single intervention file, returning the full path\n \"\"\"\n path = os.path.normpath(os.path.abspath(os.path.join(\n self._input_dir, \"{}.properties\".format(intervention['name']))))\n\n # compute start/end year from smif intervention keys\n intervention['startYear'] = intervention['build_year']\n intervention['endYear'] = intervention['build_year'] + \\\n intervention['technical_lifetime']['value']\n del intervention['build_year']\n del intervention['technical_lifetime']\n\n # fix up path to congestion charging pricing details file\n if 'congestionChargingPricing' in intervention:\n cccp_filename = intervention['congestionChargingPricing']\n intervention['congestionChargingPricing'] = os.path.join(\n self._working_dir, 'data', 'csvfiles', cccp_filename\n ).replace(\"\\\\\", \"/\")\n\n print('Now writing {}'.format(path))\n with open(path, 'w') as file_handle:\n for key, value in intervention.items():\n file_handle.write(\"{} = {}\\n\".format(key, value))\n\n return path\n\n def _set_outputs(self, data_handle):\n \"\"\"Read results from model and write to data handle\n \"\"\"\n # !!! hack: look through output dimensions to find LAD dimension name\n dims = self.outputs['electric_vehicle_trip_starts'].dims\n zone_dim = 'lad_uk_2016' # sensible default for full model\n for dim in dims:\n if dim != 'annual_day_hours':\n # assume that time is 'annual_day_hours', so we want the other one\n zone_dim = dim\n\n # EV trip starts and consumption\n evt_name = 'electric_vehicle_trip_starts'\n evc_name = 'electric_vehicle_electricity_consumption'\n\n # set up zero-valued output arrays\n ev_trips = np.zeros(self.outputs[evt_name].shape)\n ev_consumption = np.zeros(self.outputs[evc_name].shape)\n\n for vehicle_type in ('CAR', 'VAN', 'RIGID', 'ARTIC'):\n vehicle_ev_trips = self._melt_output(\n name=evt_name,\n filename=self._output_file_path(f'zonalTemporalEVTripStarts{vehicle_type}.csv'),\n dims={\n 'zone': zone_dim,\n 'hour': 'annual_day_hours'\n },\n csv_id_vars=['zone'],\n csv_melt_var='hour'\n )\n\n vehicle_ev_consumption = self._melt_output(\n name=evc_name,\n filename=self._output_file_path(f'zonalTemporalEVTripElectricity{vehicle_type}.csv'),\n dims={\n 'zone': zone_dim,\n 'hour': 'annual_day_hours'\n },\n csv_id_vars=['zone'],\n csv_melt_var='hour'\n )\n\n # sum up over vehicles (aggregated output)\n ev_trips += self._df_to_ndarray(evt_name, vehicle_ev_trips)\n ev_consumption += self._df_to_ndarray(evc_name, vehicle_ev_consumption)\n\n # Output EV trip starts and energy consumption\n data_handle.set_results(evt_name, ev_trips)\n data_handle.set_results(evc_name, ev_consumption)\n\n # Energy consumption, all fuels\n ec_name = 'energy_consumption'\n energy_consumption = self._melt_output(\n name=ec_name,\n filename=self._output_file_path('energyConsumptions.csv'),\n dims={\n 'fuel': 'transport_fuel_type'\n },\n csv_id_vars=[],\n csv_melt_var='fuel'\n )\n # Split - non-electricity (measured in litres)\n non_elec = energy_consumption.copy()\n non_elec = non_elec[non_elec.transport_fuel_type != 'ELECTRICITY']\n non_elec['annual_day'] = 'annual_day'\n non_elec = self._df_to_ndarray(ec_name, non_elec)\n data_handle.set_results(ec_name, non_elec)\n # Split - electricity (measured in kWh)\n elec = energy_consumption[energy_consumption.transport_fuel_type == 'ELECTRICITY']\n elec = np.array(elec.energy_consumption)\n data_handle.set_results('energy_consumption_electricity', elec)\n\n def _melt_output(self, name, filename, dims, csv_id_vars, csv_melt_var):\n return pd.read_csv(\n filename\n ).drop(\n 'year', axis=1 # ignore the year output, assume it's always current timestep\n ).melt(\n id_vars=csv_id_vars,\n var_name=csv_melt_var,\n value_name=name\n ).rename(\n dims, axis=1\n )\n\n def _df_to_ndarray(self, output_name, dataframe):\n spec = self.outputs[output_name]\n dataframe.set_index(spec.dims, inplace=True)\n dataarray = DataArray.from_df(spec, dataframe)\n return dataarray.data\n\n\nclass TransportWrapper(BaseTransportWrapper):\n \"\"\"Wrap the transport model, in 'full' configuration\n \"\"\"\n _config_filename = 'run_config_full.ini'\n _template_filename = 'gb-config.properties.template'\n\n\nclass SouthamptonTransportWrapper(BaseTransportWrapper):\n \"\"\"Wrap the transport model, in 'southampton' configuration\n \"\"\"\n _config_filename = 'run_config_southampton.ini'\n _template_filename = 'southampton-config.properties.template'\n" ]
[ [ "pandas.concat", "numpy.array", "numpy.zeros", "pandas.read_csv" ] ]
Rihanamsadek/keras
[ "4c16931b49aceb38b763b4a24a55a99b6847e29b" ]
[ "keras/engine/training.py" ]
[ "\"\"\"Training-related part of the Keras engine.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\nimport copy\nimport numpy as np\n\nfrom .network import Network\nfrom .base_layer import Layer\nfrom .training_utils import collect_metrics\nfrom .training_utils import check_array_length_consistency\nfrom .training_utils import check_loss_and_target_compatibility\nfrom .training_utils import standardize_class_weights\nfrom .training_utils import standardize_input_data\nfrom .training_utils import standardize_sample_weights\nfrom .training_utils import standardize_weights\nfrom .training_utils import weighted_masked_objective\nfrom . import training_arrays\nfrom . import training_generator\nfrom .. import backend as K\nfrom .. import optimizers\nfrom .. import losses\nfrom .. import metrics as metrics_module\nfrom ..utils.generic_utils import slice_arrays\nfrom ..utils.generic_utils import to_list\nfrom ..utils.generic_utils import unpack_singleton\nfrom ..legacy import interfaces\n\n\nclass Model(Network):\n \"\"\"The `Model` class adds training & evaluation routines to a `Network`.\n \"\"\"\n\n def compile(self, optimizer,\n loss=None,\n metrics=None,\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None,\n **kwargs):\n \"\"\"Configures the model for training.\n\n # Arguments\n optimizer: String (name of optimizer) or optimizer instance.\n See [optimizers](/optimizers).\n loss: String (name of objective function) or objective function.\n See [losses](/losses).\n If the model has multiple outputs, you can use a different loss\n on each output by passing a dictionary or a list of losses.\n The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n metrics: List of metrics to be evaluated by the model\n during training and testing.\n Typically you will use `metrics=['accuracy']`.\n To specify different metrics for different outputs of a\n multi-output model, you could also pass a dictionary,\n such as `metrics={'output_a': 'accuracy'}`.\n loss_weights: Optional list or dictionary specifying scalar\n coefficients (Python floats) to weight the loss contributions\n of different model outputs.\n The loss value that will be minimized by the model\n will then be the *weighted sum* of all individual losses,\n weighted by the `loss_weights` coefficients.\n If a list, it is expected to have a 1:1 mapping\n to the model's outputs. If a tensor, it is expected to map\n output names (strings) to scalar coefficients.\n sample_weight_mode: If you need to do timestep-wise\n sample weighting (2D weights), set this to `\"temporal\"`.\n `None` defaults to sample-wise weights (1D).\n If the model has multiple outputs, you can use a different\n `sample_weight_mode` on each output by passing a\n dictionary or a list of modes.\n weighted_metrics: List of metrics to be evaluated and weighted\n by sample_weight or class_weight during training and testing.\n target_tensors: By default, Keras will create placeholders for the\n model's target, which will be fed with the target data during\n training. If instead you would like to use your own\n target tensors (in turn, Keras will not expect external\n Numpy data for these targets at training time), you\n can specify them via the `target_tensors` argument. It can be\n a single tensor (for a single-output model), a list of tensors,\n or a dict mapping output names to target tensors.\n **kwargs: When using the Theano/CNTK backends, these arguments\n are passed into `K.function`.\n When using the TensorFlow backend,\n these arguments are passed into `tf.Session.run`.\n\n # Raises\n ValueError: In case of invalid arguments for\n `optimizer`, `loss`, `metrics` or `sample_weight_mode`.\n \"\"\"\n self.optimizer = optimizers.get(optimizer)\n self.loss = loss or []\n self.metrics = metrics or []\n self.loss_weights = loss_weights\n self.sample_weight_mode = sample_weight_mode\n self.weighted_metrics = weighted_metrics\n\n if not self.built:\n # Model is not compilable because\n # it does not know its number of inputs\n # and outputs, nor their shapes and names.\n # We will compile after the first\n # time the model gets called on training data.\n return\n self._is_compiled = True\n\n # Prepare loss functions.\n if isinstance(loss, dict):\n for name in loss:\n if name not in self.output_names:\n raise ValueError('Unknown entry in loss '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' +\n str(self.output_names))\n loss_functions = []\n for name in self.output_names:\n if name not in loss:\n warnings.warn('Output \"' + name +\n '\" missing from loss dictionary. '\n 'We assume this was done on purpose, '\n 'and we will not be expecting '\n 'any data to be passed to \"' + name +\n '\" during training.', stacklevel=2)\n loss_functions.append(losses.get(loss.get(name)))\n elif isinstance(loss, list):\n if len(loss) != len(self.outputs):\n raise ValueError('When passing a list as loss, '\n 'it should have one entry per model outputs. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss=' +\n str(loss))\n loss_functions = [losses.get(l) for l in loss]\n else:\n loss_function = losses.get(loss)\n loss_functions = [loss_function for _ in range(len(self.outputs))]\n self.loss_functions = loss_functions\n weighted_losses = [\n weighted_masked_objective(fn) for fn in loss_functions]\n skip_target_indices = []\n skip_target_weighing_indices = []\n self._feed_outputs = []\n self._feed_output_names = []\n self._feed_output_shapes = []\n self._feed_loss_fns = []\n for i in range(len(weighted_losses)):\n if weighted_losses[i] is None:\n skip_target_indices.append(i)\n skip_target_weighing_indices.append(i)\n\n # Prepare output masks.\n masks = self.compute_mask(self.inputs, mask=None)\n if masks is None:\n masks = [None for _ in self.outputs]\n masks = to_list(masks)\n\n # Prepare loss weights.\n if loss_weights is None:\n loss_weights_list = [1. for _ in range(len(self.outputs))]\n elif isinstance(loss_weights, dict):\n for name in loss_weights:\n if name not in self.output_names:\n raise ValueError('Unknown entry in loss_weights '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' +\n str(self.output_names))\n loss_weights_list = []\n for name in self.output_names:\n loss_weights_list.append(loss_weights.get(name, 1.))\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(self.outputs):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n loss_weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) +\n ' - expected a list of dicts.')\n\n # Prepare targets of model.\n self.targets = []\n self._feed_targets = []\n if target_tensors is not None:\n if isinstance(target_tensors, list):\n if len(target_tensors) != len(self.outputs):\n raise ValueError(\n 'When passing a list as `target_tensors`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed target_tensors=' +\n str(target_tensors))\n elif isinstance(target_tensors, dict):\n for name in target_tensors:\n if name not in self.output_names:\n raise ValueError('Unknown entry in `target_tensors` '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' +\n str(self.output_names))\n tmp_target_tensors = []\n for name in self.output_names:\n tmp_target_tensors.append(target_tensors.get(name, None))\n target_tensors = tmp_target_tensors\n elif K.is_tensor(target_tensors):\n if len(self.outputs) != 1:\n raise ValueError('The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed a single tensor as '\n '`target_tensors`. Expected a list or a dict '\n 'of tensors.')\n target_tensors = [target_tensors]\n else:\n raise TypeError('Expected `target_tensors` to be a tensor, '\n 'a list of tensors, or dict of tensors, but got:',\n target_tensors)\n\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n self.targets.append(None)\n else:\n shape = K.int_shape(self.outputs[i])\n name = self.output_names[i]\n if target_tensors is not None:\n target = target_tensors[i]\n else:\n target = None\n if target is None or K.is_placeholder(target):\n if target is None:\n target = K.placeholder(\n ndim=len(shape),\n name=name + '_target',\n sparse=K.is_sparse(self.outputs[i]),\n dtype=K.dtype(self.outputs[i]))\n self._feed_targets.append(target)\n self._feed_outputs.append(self.outputs[i])\n self._feed_output_names.append(name)\n self._feed_output_shapes.append(shape)\n self._feed_loss_fns.append(self.loss_functions[i])\n else:\n skip_target_weighing_indices.append(i)\n self.targets.append(target)\n\n # Prepare sample weights.\n sample_weights = []\n sample_weight_modes = []\n if isinstance(sample_weight_mode, dict):\n for name in sample_weight_mode:\n if name not in self.output_names:\n raise ValueError('Unknown entry in '\n 'sample_weight_mode dictionary: \"' +\n name + '\". '\n 'Only expected the following keys: ' +\n str(self.output_names))\n for i, name in enumerate(self.output_names):\n if i in skip_target_weighing_indices:\n weight = None\n sample_weight_modes.append(None)\n else:\n if name not in sample_weight_mode:\n raise ValueError('Output \"' + name +\n '\" missing from sample_weight_modes '\n 'dictionary')\n if sample_weight_mode.get(name) == 'temporal':\n weight = K.placeholder(ndim=2,\n name=name + '_sample_weights')\n sample_weight_modes.append('temporal')\n else:\n weight = K.placeholder(ndim=1,\n name=name + '_sample_weights')\n sample_weight_modes.append(None)\n sample_weights.append(weight)\n elif isinstance(sample_weight_mode, list):\n if len(sample_weight_mode) != len(self.outputs):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed '\n 'sample_weight_mode=' +\n str(sample_weight_mode))\n for i in range(len(self.output_names)):\n if i in skip_target_weighing_indices:\n weight = None\n sample_weight_modes.append(None)\n else:\n mode = sample_weight_mode[i]\n name = self.output_names[i]\n if mode == 'temporal':\n weight = K.placeholder(ndim=2,\n name=name + '_sample_weights')\n sample_weight_modes.append('temporal')\n else:\n weight = K.placeholder(ndim=1,\n name=name + '_sample_weights')\n sample_weight_modes.append(None)\n sample_weights.append(weight)\n else:\n for i, name in enumerate(self.output_names):\n if i in skip_target_weighing_indices:\n sample_weight_modes.append(None)\n sample_weights.append(None)\n else:\n if sample_weight_mode == 'temporal':\n sample_weights.append(\n K.placeholder(ndim=2,\n name=name + '_sample_weights'))\n sample_weight_modes.append('temporal')\n else:\n sample_weights.append(\n K.placeholder(ndim=1,\n name=name + '_sample_weights'))\n sample_weight_modes.append(None)\n self.sample_weight_modes = sample_weight_modes\n self._feed_sample_weight_modes = []\n for i in range(len(self.outputs)):\n if i not in skip_target_weighing_indices:\n self._feed_sample_weight_modes.append(\n self.sample_weight_modes[i])\n\n # Prepare metrics.\n self.metrics_names = ['loss']\n self.metrics_tensors = []\n\n # Compute total loss.\n total_loss = None\n with K.name_scope('loss'):\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n continue\n y_true = self.targets[i]\n y_pred = self.outputs[i]\n weighted_loss = weighted_losses[i]\n sample_weight = sample_weights[i]\n mask = masks[i]\n loss_weight = loss_weights_list[i]\n with K.name_scope(self.output_names[i] + '_loss'):\n output_loss = weighted_loss(y_true, y_pred,\n sample_weight, mask)\n if len(self.outputs) > 1:\n self.metrics_tensors.append(output_loss)\n self.metrics_names.append(self.output_names[i] + '_loss')\n if total_loss is None:\n total_loss = loss_weight * output_loss\n else:\n total_loss += loss_weight * output_loss\n if total_loss is None:\n if not self.losses:\n raise ValueError('The model cannot be compiled '\n 'because it has no loss to optimize.')\n else:\n total_loss = 0.\n\n # Add regularization penalties\n # and other layer-specific losses.\n for loss_tensor in self.losses:\n total_loss += loss_tensor\n\n # List of same size as output_names.\n # contains tuples (metrics for output, names of metrics).\n nested_metrics = collect_metrics(metrics, self.output_names)\n nested_weighted_metrics = collect_metrics(weighted_metrics,\n self.output_names)\n self.metrics_updates = []\n self.stateful_metric_names = []\n self.stateful_metric_functions = []\n\n def handle_metrics(metrics, weights=None):\n metric_name_prefix = 'weighted_' if weights is not None else ''\n\n for metric in metrics:\n if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):\n # custom handling of accuracy/crossentropy\n # (because of class mode duality)\n output_shape = K.int_shape(self.outputs[i])\n if (output_shape[-1] == 1 or\n self.loss_functions[i] == losses.binary_crossentropy):\n # case: binary accuracy/crossentropy\n if metric in ('accuracy', 'acc'):\n metric_fn = metrics_module.binary_accuracy\n elif metric in ('crossentropy', 'ce'):\n metric_fn = metrics_module.binary_crossentropy\n elif (self.loss_functions[i] ==\n losses.sparse_categorical_crossentropy):\n # case: categorical accuracy/crossentropy\n # with sparse targets\n if metric in ('accuracy', 'acc'):\n metric_fn = metrics_module.sparse_categorical_accuracy\n elif metric in ('crossentropy', 'ce'):\n metric_fn = (\n metrics_module.sparse_categorical_crossentropy)\n else:\n # case: categorical accuracy/crossentropy\n if metric in ('accuracy', 'acc'):\n metric_fn = metrics_module.categorical_accuracy\n elif metric in ('crossentropy', 'ce'):\n metric_fn = metrics_module.categorical_crossentropy\n if metric in ('accuracy', 'acc'):\n suffix = 'acc'\n elif metric in ('crossentropy', 'ce'):\n suffix = 'ce'\n weighted_metric_fn = weighted_masked_objective(metric_fn)\n metric_name = metric_name_prefix + suffix\n else:\n metric_fn = metrics_module.get(metric)\n weighted_metric_fn = weighted_masked_objective(metric_fn)\n # Get metric name as string\n if hasattr(metric_fn, 'name'):\n metric_name = metric_fn.name\n else:\n metric_name = metric_fn.__name__\n metric_name = metric_name_prefix + metric_name\n\n with K.name_scope(metric_name):\n metric_result = weighted_metric_fn(y_true, y_pred,\n weights=weights,\n mask=masks[i])\n\n # Append to self.metrics_names, self.metric_tensors,\n # self.stateful_metric_names\n if len(self.output_names) > 1:\n metric_name = self.output_names[i] + '_' + metric_name\n # Dedupe name\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = base_metric_name + '_' + str(j)\n j += 1\n self.metrics_names.append(metric_name)\n self.metrics_tensors.append(metric_result)\n\n # Keep track of state updates created by\n # stateful metrics (i.e. metrics layers).\n if isinstance(metric_fn, Layer) and metric_fn.stateful:\n self.stateful_metric_names.append(metric_name)\n self.stateful_metric_functions.append(metric_fn)\n self.metrics_updates += metric_fn.updates\n with K.name_scope('metrics'):\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n continue\n\n y_true = self.targets[i]\n y_pred = self.outputs[i]\n weights = sample_weights[i]\n output_metrics = nested_metrics[i]\n output_weighted_metrics = nested_weighted_metrics[i]\n handle_metrics(output_metrics)\n handle_metrics(output_weighted_metrics, weights=weights)\n\n # Prepare gradient updates and state updates.\n self.total_loss = total_loss\n self.sample_weights = sample_weights\n self._feed_sample_weights = []\n for i in range(len(self.sample_weights)):\n if i not in skip_target_weighing_indices:\n self._feed_sample_weights.append(sample_weights[i])\n\n # Functions for train, test and predict will\n # be compiled lazily when required.\n # This saves time when the user is not using all functions.\n self._function_kwargs = kwargs\n\n self.train_function = None\n self.test_function = None\n self.predict_function = None\n\n # Collected trainable weights, sorted in topological order.\n trainable_weights = self.trainable_weights\n self._collected_trainable_weights = trainable_weights\n\n def _check_trainable_weights_consistency(self):\n \"\"\"Check trainable weights count consistency.\n\n This will raise a warning if `trainable_weights` and\n `_collected_trainable_weights` are inconsistent (i.e. have different\n number of parameters).\n Inconsistency will typically arise when one modifies `model.trainable`\n without calling `model.compile` again.\n \"\"\"\n if not hasattr(self, '_collected_trainable_weights'):\n return\n\n if (len(self.trainable_weights) !=\n len(self._collected_trainable_weights)):\n warnings.warn(UserWarning(\n 'Discrepancy between trainable weights and collected trainable'\n ' weights, did you set `model.trainable` without calling'\n ' `model.compile` after ?'))\n\n def _make_train_function(self):\n if not hasattr(self, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n self._check_trainable_weights_consistency()\n if self.train_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n\n with K.name_scope('training'):\n with K.name_scope(self.optimizer.__class__.__name__):\n training_updates = self.optimizer.get_updates(\n params=self._collected_trainable_weights,\n loss=self.total_loss)\n updates = (self.updates +\n training_updates +\n self.metrics_updates)\n # Gets loss and metrics. Updates weights at each call.\n self.train_function = K.function(\n inputs,\n [self.total_loss] + self.metrics_tensors,\n updates=updates,\n name='train_function',\n **self._function_kwargs)\n\n def _make_test_function(self):\n if not hasattr(self, 'test_function'):\n raise RuntimeError('You must compile your model before using it.')\n if self.test_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n # Return loss and metrics, no gradient updates.\n # Does update the network states.\n self.test_function = K.function(\n inputs,\n [self.total_loss] + self.metrics_tensors,\n updates=self.state_updates + self.metrics_updates,\n name='test_function',\n **self._function_kwargs)\n\n def _make_predict_function(self):\n if not hasattr(self, 'predict_function'):\n self.predict_function = None\n if self.predict_function is None:\n if self._uses_dynamic_learning_phase():\n inputs = self._feed_inputs + [K.learning_phase()]\n else:\n inputs = self._feed_inputs\n # Gets network outputs. Does not update weights.\n # Does update the network states.\n kwargs = getattr(self, '_function_kwargs', {})\n self.predict_function = K.function(inputs,\n self.outputs,\n updates=self.state_updates,\n name='predict_function',\n **kwargs)\n\n def _uses_dynamic_learning_phase(self):\n return (self.uses_learning_phase and\n not isinstance(K.learning_phase(), int))\n\n def _set_inputs(self, inputs, outputs=None, training=None):\n \"\"\"Set model's input and output specs based on the input data received.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n # Arguments\n inputs: Single array, or list of arrays. The arrays could be placeholders,\n Numpy arrays, or data tensors.\n - if placeholders: the model is built on top of these placeholders,\n and we expect Numpy data to be fed for them when calling `fit`/etc.\n - if Numpy data: we create placeholders matching the shape of the Numpy\n arrays. We expect Numpy data to be fed for these placeholders\n when calling `fit`/etc.\n - if data tensors: the model is built on top of these tensors.\n We do not expect any Numpy data to be provided when calling `fit`/etc.\n outputs: Optional output tensors (if already computed by running\n the model).\n training: Boolean or None. Only relevant in symbolic mode. Specifies\n whether to build the model's graph in inference mode (False), training\n mode (True), or using the Keras learning phase (None).\n \"\"\"\n if self.__class__.__name__ == 'Sequential':\n # Note: we can't test whether the model\n # is `Sequential` via `isinstance`\n # since `Sequential` depends on `Model`.\n if isinstance(inputs, list):\n assert len(inputs) == 1\n inputs = inputs[0]\n self.build(input_shape=(None,) + inputs.shape[1:])\n return\n\n if self.inputs:\n raise ValueError('Model inputs are already set.')\n\n # On-the-fly setting of symbolic model inputs\n # (either by using the tensor provided,\n # or by creating a placeholder if Numpy data was provided).\n self.inputs = []\n self.input_names = []\n self._feed_inputs = []\n self._feed_input_names = []\n self._feed_input_shapes = []\n inputs = to_list(inputs, allow_tuple=True)\n\n for i, v in enumerate(inputs):\n name = 'input_%d' % (i + 1)\n self.input_names.append(name)\n if isinstance(v, list):\n v = np.asarray(v)\n if v.ndim == 1:\n v = np.expand_dims(v, 1)\n if isinstance(v, (np.ndarray)):\n # We fix the placeholder shape except the batch size.\n # This is suboptimal, but it is the best we can do with the info\n # we have. The user should call `model._set_inputs(placeholders)`\n # to specify custom placeholders if the need arises.\n shape = (None,) + v.shape[1:]\n placeholder = K.placeholder(shape=shape, name=name)\n self.inputs.append(placeholder)\n self._feed_inputs.append(placeholder)\n self._feed_input_names.append(name)\n self._feed_input_shapes.append(shape)\n else:\n # Assumed tensor - TODO(fchollet) additional type check?\n self.inputs.append(v)\n if K.is_placeholder(v):\n self._feed_inputs.append(v)\n self._feed_input_names.append(name)\n self._feed_input_shapes.append(K.int_shape(v))\n\n if outputs is None:\n # Obtain symbolic outputs by calling the model.\n if self._expects_training_arg:\n outputs = self.call(unpack_singleton(self.inputs), training=training)\n else:\n outputs = self.call(unpack_singleton(self.inputs))\n outputs = to_list(outputs, allow_tuple=True)\n self.outputs = outputs\n self.output_names = [\n 'output_%d' % (i + 1) for i in range(len(self.outputs))]\n self.built = True\n\n def _standardize_user_data(self, x,\n y=None,\n sample_weight=None,\n class_weight=None,\n check_array_lengths=True,\n batch_size=None):\n all_inputs = []\n if not self.built:\n # We need to use `x` to set the model inputs.\n # We type-check that `x` and `y` are either single arrays\n # or lists of arrays.\n if isinstance(x, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n K.is_tensor(v) for v in x):\n raise ValueError('Please provide as model inputs '\n 'either a single '\n 'array or a list of arrays. '\n 'You passed: x=' + str(x))\n all_inputs += list(x)\n elif isinstance(x, dict):\n raise ValueError('Please do not pass a dictionary '\n 'as model inputs.')\n else:\n if not isinstance(x, np.ndarray) and not K.is_tensor(x):\n raise ValueError('Please provide as model inputs '\n 'either a single '\n 'array or a list of arrays. '\n 'You passed: x=' + str(x))\n all_inputs.append(x)\n\n # Build the model using the retrieved inputs (value or symbolic).\n # If values, then in symbolic-mode placeholders will be created\n # to match the value shapes.\n if not self.inputs:\n self._set_inputs(x)\n\n if y is not None:\n if not self.optimizer:\n raise RuntimeError('You must compile a model before '\n 'training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n if not self._is_compiled:\n # On-the-fly compilation of the model.\n # We need to use `y` to set the model targets.\n if isinstance(y, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n K.is_tensor(v) for v in y):\n raise ValueError('Please provide as model targets '\n 'either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n elif isinstance(y, dict):\n raise ValueError('Please do not pass a dictionary '\n 'as model targets.')\n else:\n if not isinstance(y, np.ndarray) and not K.is_tensor(y):\n raise ValueError('Please provide as model targets '\n 'either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n # Typecheck that all inputs are *either* value *or* symbolic.\n if y is not None:\n all_inputs += to_list(y, allow_tuple=True)\n if any(K.is_tensor(v) for v in all_inputs):\n if not all(K.is_tensor(v) for v in all_inputs):\n raise ValueError('Do not pass inputs that mix Numpy '\n 'arrays and symbolic tensors. '\n 'You passed: x=' + str(x) +\n '; y=' + str(y))\n\n # Handle target tensors if any passed.\n y = to_list(y, allow_tuple=True)\n target_tensors = [v for v in y if K.is_tensor(v)]\n if not target_tensors:\n target_tensors = None\n self.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics,\n loss_weights=self.loss_weights,\n target_tensors=target_tensors)\n\n # If `x` and `y` were all symbolic,\n # then the model should not be fed any inputs and targets.\n # Note: in this case, `any` and `all` are equivalent since we disallow\n # mixed symbolic/value inputs.\n if any(K.is_tensor(v) for v in all_inputs):\n return [], [], []\n\n # What follows is input validation and standardization to list format,\n # in the case where all inputs are value arrays.\n\n if not self._is_graph_network:\n # Case: symbolic-mode subclassed network.\n # Do not do shape validation.\n feed_input_names = self._feed_input_names\n feed_input_shapes = None\n else:\n # Case: symbolic-mode graph network.\n # In this case, we run extensive shape validation checks.\n feed_input_names = self._feed_input_names\n feed_input_shapes = self._feed_input_shapes\n\n # Standardize the inputs.\n x = standardize_input_data(\n x,\n feed_input_names,\n feed_input_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='input')\n\n if y is not None:\n if not self._is_graph_network:\n feed_output_names = self._feed_output_names\n feed_output_shapes = None\n # Sample weighting not supported in this case.\n # TODO: consider supporting it.\n feed_sample_weight_modes = [None for _ in self.outputs]\n else:\n feed_output_names = self._feed_output_names\n feed_sample_weight_modes = self._feed_sample_weight_modes\n feed_output_shapes = []\n for output_shape, loss_fn in zip(self._feed_output_shapes,\n self._feed_loss_fns):\n if loss_fn is losses.sparse_categorical_crossentropy:\n if K.image_data_format() == 'channels_first' and len(\n output_shape) in [4, 5]:\n feed_output_shapes.append(\n (output_shape[0], 1) + output_shape[2:])\n else:\n feed_output_shapes.append(output_shape[:-1] + (1,))\n elif (not hasattr(loss_fn, '__name__') or\n getattr(losses, loss_fn.__name__, None) is None):\n # If `loss_fn` is not a function (e.g. callable class)\n # or if it not in the `losses` module, then\n # it is a user-defined loss and we make no assumptions\n # about it.\n feed_output_shapes.append(None)\n else:\n feed_output_shapes.append(output_shape)\n\n # Standardize the outputs.\n y = standardize_input_data(\n y,\n feed_output_names,\n feed_output_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='target')\n\n # Generate sample-wise weight values given the `sample_weight` and\n # `class_weight` arguments.\n sample_weights = standardize_sample_weights(\n sample_weight, feed_output_names)\n class_weights = standardize_class_weights(\n class_weight, feed_output_names)\n sample_weights = [\n standardize_weights(ref, sw, cw, mode)\n for (ref, sw, cw, mode) in\n zip(y, sample_weights, class_weights,\n feed_sample_weight_modes)\n ]\n # Check that all arrays have the same length.\n if check_array_lengths:\n check_array_length_consistency(x, y, sample_weights)\n if self._is_graph_network:\n # Additional checks to avoid users mistakenly\n # using improper loss fns.\n check_loss_and_target_compatibility(\n y, self._feed_loss_fns, feed_output_shapes)\n else:\n y = []\n sample_weights = []\n\n if self.stateful and batch_size:\n # Check that for stateful networks, number of samples is a multiple\n # of the static batch size.\n if x[0].shape[0] % batch_size != 0:\n raise ValueError('In a stateful network, '\n 'you should only pass inputs with '\n 'a number of samples that can be '\n 'divided by the batch size. Found: ' +\n str(x[0].shape[0]) + ' samples')\n return x, y, sample_weights\n\n def _get_callback_model(self):\n \"\"\"Returns the Callback Model for this Model.\"\"\"\n if hasattr(self, 'callback_model') and self.callback_model:\n return self.callback_model\n return self\n\n def fit(self,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n **kwargs):\n \"\"\"Trains the model for a given number of epochs (iterations on a dataset).\n\n # Arguments\n x: Numpy array of training data (if the model has a single input),\n or list of Numpy arrays (if the model has multiple inputs).\n If input layers in the model are named, you can also pass a\n dictionary mapping input names to Numpy arrays.\n `x` can be `None` (default) if feeding from\n framework-native tensors (e.g. TensorFlow data tensors).\n y: Numpy array of target (label) data\n (if the model has a single output),\n or list of Numpy arrays (if the model has multiple outputs).\n If output layers in the model are named, you can also pass a\n dictionary mapping output names to Numpy arrays.\n `y` can be `None` (default) if feeding from\n framework-native tensors (e.g. TensorFlow data tensors).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n epochs: Integer. Number of epochs to train the model.\n An epoch is an iteration over the entire `x` and `y`\n data provided.\n Note that in conjunction with `initial_epoch`,\n `epochs` is to be understood as \"final epoch\".\n The model is not trained for a number of iterations\n given by `epochs`, but merely until the epoch\n of index `epochs` is reached.\n verbose: Integer. 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training and validation\n (if ).\n See [callbacks](/callbacks).\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling.\n validation_data: tuple `(x_val, y_val)` or tuple\n `(x_val, y_val, val_sample_weights)` on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n `validation_data` will override `validation_split`.\n shuffle: Boolean (whether to shuffle the training data\n before each epoch) or str (for 'batch').\n 'batch' is a special option for dealing with the\n limitations of HDF5 data; it shuffles in batch-sized chunks.\n Has no effect when `steps_per_epoch` is not `None`.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) value, used for weighting the loss function\n (during training only).\n This can be useful to tell the model to\n \"pay more attention\" to samples from\n an under-represented class.\n sample_weight: Optional Numpy array of weights for\n the training samples, used for weighting the loss function\n (during training only). You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`.\n initial_epoch: Integer.\n Epoch at which to start training\n (useful for resuming a previous training run).\n steps_per_epoch: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined.\n validation_steps: Only relevant if `steps_per_epoch`\n is specified. Total number of steps (batches of samples)\n to validate before stopping.\n validation_freq: Only relevant if validation data is provided. Integer\n or list/tuple/set. If an integer, specifies how many training\n epochs to run before a new validation run is performed, e.g.\n `validation_freq=2` runs validation every 2 epochs. If a list,\n tuple, or set, specifies the epochs on which to run validation,\n e.g. `validation_freq=[1, 2, 10]` runs validation at the end\n of the 1st, 2nd, and 10th epochs.\n\n # Returns\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\n\n # Raises\n RuntimeError: If the model was never compiled.\n ValueError: In case of mismatch between the provided input data\n and what the model expects.\n \"\"\"\n # Backwards compatibility\n if batch_size is None and steps_per_epoch is None:\n batch_size = 32\n # Legacy support\n if 'nb_epoch' in kwargs:\n warnings.warn('The `nb_epoch` argument in `fit` '\n 'has been renamed `epochs`.', stacklevel=2)\n epochs = kwargs.pop('nb_epoch')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n if x is None and y is None and steps_per_epoch is None:\n raise ValueError('If fitting from data tensors, '\n 'you should specify the `steps_per_epoch` '\n 'argument.')\n # Validate user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size)\n # Prepare validation data.\n do_validation = False\n if validation_data:\n do_validation = True\n if len(validation_data) == 2:\n val_x, val_y = validation_data\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data\n else:\n raise ValueError('When passing validation_data, '\n 'it must contain 2 (x_val, y_val) '\n 'or 3 (x_val, y_val, val_sample_weights) '\n 'items, however it contains %d items' %\n len(validation_data))\n\n val_x, val_y, val_sample_weights = self._standardize_user_data(\n val_x, val_y,\n sample_weight=val_sample_weight,\n batch_size=batch_size)\n if self._uses_dynamic_learning_phase():\n val_inputs = val_x + val_y + val_sample_weights + [0.]\n else:\n val_inputs = val_x + val_y + val_sample_weights\n\n elif validation_split and 0. < validation_split < 1.:\n if any(K.is_tensor(t) for t in x):\n raise ValueError(\n 'If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n do_validation = True\n if hasattr(x[0], 'shape'):\n split_at = int(int(x[0].shape[0]) * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (slice_arrays(x, 0, split_at),\n slice_arrays(x, split_at))\n y, val_y = (slice_arrays(y, 0, split_at),\n slice_arrays(y, split_at))\n sample_weights, val_sample_weights = (\n slice_arrays(sample_weights, 0, split_at),\n slice_arrays(sample_weights, split_at))\n if self._uses_dynamic_learning_phase():\n val_inputs = val_x + val_y + val_sample_weights + [0.]\n else:\n val_inputs = val_x + val_y + val_sample_weights\n\n elif validation_steps:\n do_validation = True\n if self._uses_dynamic_learning_phase():\n val_inputs = [0.]\n\n # Prepare input arrays and training function.\n if self._uses_dynamic_learning_phase():\n fit_inputs = x + y + sample_weights + [1.]\n else:\n fit_inputs = x + y + sample_weights\n self._make_train_function()\n fit_function = self.train_function\n\n # Prepare display labels.\n out_labels = self.metrics_names\n\n if do_validation:\n self._make_test_function()\n val_function = self.test_function\n callback_metrics = copy.copy(out_labels) + [\n 'val_' + n for n in out_labels]\n else:\n callback_metrics = copy.copy(out_labels)\n val_function = None\n val_inputs = []\n\n # Delegate logic to `fit_loop`.\n return training_arrays.fit_loop(self, fit_function, fit_inputs,\n out_labels=out_labels,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_function=val_function,\n val_inputs=val_inputs,\n shuffle=shuffle,\n callback_metrics=callback_metrics,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_freq=validation_freq)\n\n def evaluate(self, x=None, y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None):\n \"\"\"Returns the loss value & metrics values for the model in test mode.\n\n Computation is done in batches.\n\n # Arguments\n x: Numpy array of test data (if the model has a single input),\n or list of Numpy arrays (if the model has multiple inputs).\n If input layers in the model are named, you can also pass a\n dictionary mapping input names to Numpy arrays.\n `x` can be `None` (default) if feeding from\n framework-native tensors (e.g. TensorFlow data tensors).\n y: Numpy array of target (label) data\n (if the model has a single output),\n or list of Numpy arrays (if the model has multiple outputs).\n If output layers in the model are named, you can also pass a\n dictionary mapping output names to Numpy arrays.\n `y` can be `None` (default) if feeding from\n framework-native tensors (e.g. TensorFlow data tensors).\n batch_size: Integer or `None`.\n Number of samples per evaluation step.\n If unspecified, `batch_size` will default to 32.\n verbose: 0 or 1. Verbosity mode.\n 0 = silent, 1 = progress bar.\n sample_weight: Optional Numpy array of weights for\n the test samples, used for weighting the loss function.\n You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`.\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring the evaluation round finished.\n Ignored with the default value of `None`.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during evaluation.\n See [callbacks](/callbacks).\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n \"\"\"\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n if x is None and y is None and steps is None:\n raise ValueError('If evaluating from data tensors, '\n 'you should specify the `steps` '\n 'argument.')\n # Validate user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y,\n sample_weight=sample_weight,\n batch_size=batch_size)\n # Prepare inputs, delegate logic to `test_loop`.\n if self._uses_dynamic_learning_phase():\n ins = x + y + sample_weights + [0.]\n else:\n ins = x + y + sample_weights\n self._make_test_function()\n f = self.test_function\n return training_arrays.test_loop(self, f, ins,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n callbacks=callbacks)\n\n def predict(self, x,\n batch_size=None,\n verbose=0,\n steps=None,\n callbacks=None):\n \"\"\"Generates output predictions for the input samples.\n\n Computation is done in batches.\n\n # Arguments\n x: The input data, as a Numpy array\n (or list of Numpy arrays if the model has multiple inputs).\n batch_size: Integer. If unspecified, it will default to 32.\n verbose: Verbosity mode, 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring the prediction round finished.\n Ignored with the default value of `None`.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during prediction.\n See [callbacks](/callbacks).\n\n # Returns\n Numpy array(s) of predictions.\n\n # Raises\n ValueError: In case of mismatch between the provided\n input data and the model's expectations,\n or in case a stateful model receives a number of samples\n that is not a multiple of the batch size.\n \"\"\"\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n if x is None and steps is None:\n raise ValueError('If predicting from data tensors, '\n 'you should specify the `steps` '\n 'argument.')\n # Validate user data.\n x, _, _ = self._standardize_user_data(x)\n if self.stateful:\n if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:\n raise ValueError('In a stateful network, '\n 'you should only pass inputs with '\n 'a number of samples that can be '\n 'divided by the batch size. Found: ' +\n str(x[0].shape[0]) + ' samples. '\n 'Batch size: ' + str(batch_size) + '.')\n\n # Prepare inputs, delegate logic to `predict_loop`.\n if self._uses_dynamic_learning_phase():\n ins = x + [0.]\n else:\n ins = x\n self._make_predict_function()\n f = self.predict_function\n return training_arrays.predict_loop(self, f, ins,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n callbacks=callbacks)\n\n def train_on_batch(self, x, y,\n sample_weight=None,\n class_weight=None):\n \"\"\"Runs a single gradient update on a single batch of data.\n\n # Arguments\n x: Numpy array of training data,\n or list of Numpy arrays if the model has multiple inputs.\n If all inputs in the model are named,\n you can also pass a dictionary\n mapping input names to Numpy arrays.\n y: Numpy array of target data,\n or list of Numpy arrays if the model has multiple outputs.\n If all outputs in the model are named,\n you can also pass a dictionary\n mapping output names to Numpy arrays.\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile().\n class_weight: Optional dictionary mapping\n class indices (integers) to\n a weight (float) to apply to the model's loss for the samples\n from this class during training.\n This can be useful to tell the model to \"pay more attention\" to\n samples from an under-represented class.\n\n # Returns\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n \"\"\"\n x, y, sample_weights = self._standardize_user_data(\n x, y,\n sample_weight=sample_weight,\n class_weight=class_weight)\n if self._uses_dynamic_learning_phase():\n ins = x + y + sample_weights + [1.]\n else:\n ins = x + y + sample_weights\n self._make_train_function()\n outputs = self.train_function(ins)\n return unpack_singleton(outputs)\n\n def test_on_batch(self, x, y, sample_weight=None):\n \"\"\"Test the model on a single batch of samples.\n\n # Arguments\n x: Numpy array of test data,\n or list of Numpy arrays if the model has multiple inputs.\n If all inputs in the model are named,\n you can also pass a dictionary\n mapping input names to Numpy arrays.\n y: Numpy array of target data,\n or list of Numpy arrays if the model has multiple outputs.\n If all outputs in the model are named,\n you can also pass a dictionary\n mapping output names to Numpy arrays.\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile().\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n \"\"\"\n x, y, sample_weights = self._standardize_user_data(\n x, y,\n sample_weight=sample_weight)\n if self._uses_dynamic_learning_phase():\n ins = x + y + sample_weights + [0.]\n else:\n ins = x + y + sample_weights\n self._make_test_function()\n outputs = self.test_function(ins)\n return unpack_singleton(outputs)\n\n def predict_on_batch(self, x):\n \"\"\"Returns predictions for a single batch of samples.\n\n # Arguments\n x: Input samples, as a Numpy array.\n\n # Returns\n Numpy array(s) of predictions.\n \"\"\"\n x, _, _ = self._standardize_user_data(x)\n if self._uses_dynamic_learning_phase():\n ins = x + [0.]\n else:\n ins = x\n self._make_predict_function()\n outputs = self.predict_function(ins)\n return unpack_singleton(outputs)\n\n @interfaces.legacy_generator_methods_support\n def fit_generator(self, generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_data=None,\n validation_steps=None,\n validation_freq=1,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0):\n \"\"\"Trains the model on data generated batch-by-batch by a Python generator\n (or an instance of `Sequence`).\n\n The generator is run in parallel to the model, for efficiency.\n For instance, this allows you to do real-time data augmentation\n on images on CPU in parallel to training your model on GPU.\n\n The use of `keras.utils.Sequence` guarantees the ordering\n and guarantees the single use of every input per epoch when\n using `use_multiprocessing=True`.\n\n # Arguments\n generator: A generator or an instance of `Sequence`\n (`keras.utils.Sequence`) object in order to avoid\n duplicate data when using multiprocessing.\n The output of the generator must be either\n - a tuple `(inputs, targets)`\n - a tuple `(inputs, targets, sample_weights)`.\n This tuple (a single output of the generator) makes a single\n batch. Therefore, all arrays in this tuple must have the same\n length (equal to the size of this batch). Different batches may\n have different sizes. For example, the last batch of the epoch\n is commonly smaller than the others, if the size of the dataset\n is not divisible by the batch size.\n The generator is expected to loop over its data\n indefinitely. An epoch finishes when `steps_per_epoch`\n batches have been seen by the model.\n steps_per_epoch: Integer.\n Total number of steps (batches of samples)\n to yield from `generator` before declaring one epoch\n finished and starting the next epoch. It should typically\n be equal to `ceil(num_samples / batch_size)`\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n epochs: Integer. Number of epochs to train the model.\n An epoch is an iteration over the entire data provided,\n as defined by `steps_per_epoch`.\n Note that in conjunction with `initial_epoch`,\n `epochs` is to be understood as \"final epoch\".\n The model is not trained for a number of iterations\n given by `epochs`, but merely until the epoch\n of index `epochs` is reached.\n verbose: Integer. 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See [callbacks](/callbacks).\n validation_data: This can be either\n - a generator or a `Sequence` object for the validation data\n - tuple `(x_val, y_val)`\n - tuple `(x_val, y_val, val_sample_weights)`\n on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n validation_steps: Only relevant if `validation_data`\n is a generator. Total number of steps (batches of samples)\n to yield from `validation_data` generator before stopping\n at the end of every epoch. It should typically\n be equal to the number of samples of your\n validation dataset divided by the batch size.\n Optional for `Sequence`: if unspecified, will use\n the `len(validation_data)` as a number of steps.\n validation_freq: Only relevant if validation data is provided. Integer\n or `collections.Container` instance (e.g. list, tuple, etc.). If an\n integer, specifies how many training epochs to run before a new\n validation run is performed, e.g. `validation_freq=2` runs\n validation every 2 epochs. If a Container, specifies the epochs on\n which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n validation at the end of the 1st, 2nd, and 10th epochs.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) value, used for weighting the loss function\n (during training only). This can be useful to tell the model to\n \"pay more attention\" to samples\n from an under-represented class.\n max_queue_size: Integer. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation\n relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n shuffle: Boolean. Whether to shuffle the order of the batches at\n the beginning of each epoch. Only used with instances\n of `Sequence` (`keras.utils.Sequence`).\n Has no effect when `steps_per_epoch` is not `None`.\n initial_epoch: Integer.\n Epoch at which to start training\n (useful for resuming a previous training run).\n\n # Returns\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\n\n # Raises\n ValueError: In case the generator yields data in an invalid format.\n\n # Example\n\n ```python\n def generate_arrays_from_file(path):\n while True:\n with open(path) as f:\n for line in f:\n # create numpy arrays of input data\n # and labels, from each line in the file\n x1, x2, y = process_line(line)\n yield ({'input_1': x1, 'input_2': x2}, {'output': y})\n\n model.fit_generator(generate_arrays_from_file('/my_file.txt'),\n steps_per_epoch=10000, epochs=10)\n ```\n \"\"\"\n return training_generator.fit_generator(\n self, generator,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n validation_freq=validation_freq,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n\n @interfaces.legacy_generator_methods_support\n def evaluate_generator(self, generator,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Evaluates the model on a data generator.\n\n The generator should return the same kind of data\n as accepted by `test_on_batch`.\n\n # Arguments\n generator: Generator yielding tuples (inputs, targets)\n or (inputs, targets, sample_weights)\n or an instance of Sequence (keras.utils.Sequence)\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See [callbacks](/callbacks).\n max_queue_size: maximum size for the generator queue\n workers: Integer. Maximum number of processes to spin up\n when using process based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: if True, use process based threading.\n Note that because\n this implementation relies on multiprocessing,\n you should not pass\n non picklable arguments to the generator\n as they can't be passed\n easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n # Raises\n ValueError: In case the generator yields\n data in an invalid format.\n \"\"\"\n return training_generator.evaluate_generator(\n self, generator,\n steps=steps,\n callbacks=callbacks,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n @interfaces.legacy_generator_methods_support\n def predict_generator(self, generator,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Generates predictions for the input samples from a data generator.\n\n The generator should return the same kind of data as accepted by\n `predict_on_batch`.\n\n # Arguments\n generator: Generator yielding batches of input samples\n or an instance of Sequence (keras.utils.Sequence)\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See [callbacks](/callbacks).\n max_queue_size: Maximum size for the generator queue.\n workers: Integer. Maximum number of processes to spin up\n when using process based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: If `True`, use process based threading.\n Note that because\n this implementation relies on multiprocessing,\n you should not pass\n non picklable arguments to the generator\n as they can't be passed\n easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n # Returns\n Numpy array(s) of predictions.\n\n # Raises\n ValueError: In case the generator yields\n data in an invalid format.\n \"\"\"\n return training_generator.predict_generator(\n self, generator,\n steps=steps,\n callbacks=callbacks,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n" ]
[ [ "numpy.asarray", "numpy.expand_dims" ] ]
gpleiss/uncertainty-baselines
[ "60b08e50e8d64cbd4d09689a35d5be81ed15e624" ]
[ "baselines/cifar/sngp_ensemble.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Ensemble of SNGP models on CIFAR.\n\nThis script only performs evaluation, not training. We recommend training\nensembles by launching independent runs of `sngp.py` over different\nseeds.\n\"\"\"\n\nimport functools\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport edward2 as ed\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport uncertainty_baselines as ub\nimport utils # local file import\nimport uncertainty_metrics as um\n\nflags.DEFINE_string('checkpoint_dir', None,\n 'The directory where the model weights are stored.')\nflags.mark_flag_as_required('checkpoint_dir')\nflags.DEFINE_integer('seed', 42, 'Random seed.')\nflags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')\nflags.DEFINE_enum('dataset', 'cifar10',\n enum_values=['cifar10', 'cifar100'],\n help='Dataset.')\n# TODO(ghassen): consider adding CIFAR-100-C to TFDS.\nflags.DEFINE_string('cifar100_c_path', None,\n 'Path to the TFRecords files for CIFAR-100-C. Only valid '\n '(and required) if dataset is cifar100 and corruptions.')\nflags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')\nflags.DEFINE_string('output_dir', '/tmp/cifar', 'Output directory.')\n\n# SNGP ensemble flags\nflags.DEFINE_float(\n 'gp_mean_field_factor_ensemble', 0.0005,\n 'The tunable multiplicative factor used in the mean-field approximation '\n 'for the posterior mean of softmax Gaussian process. If -1 then use '\n 'posterior mode instead of posterior mean.')\n\n# Dropout flags\nflags.DEFINE_bool('use_filterwise_dropout', True,\n 'Whether to use filterwise dropout for the hidden layers.')\nflags.DEFINE_bool('use_mc_dropout', False,\n 'Whether to use Monte Carlo dropout for the hidden layers.')\nflags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate.')\n\n# SNGP flags.\nflags.DEFINE_bool('use_spec_norm', True,\n 'Whether to apply spectral normalization.')\nflags.DEFINE_bool('use_gp_layer', True,\n 'Whether to use Gaussian process as the output layer.')\n\n# Spectral normalization flags.\nflags.DEFINE_integer(\n 'spec_norm_iteration', 1,\n 'Number of power iterations to perform for estimating '\n 'the spectral norm of weight matrices.')\nflags.DEFINE_float('spec_norm_bound', 6.,\n 'Upper bound to spectral norm of weight matrices.')\n\n# Gaussian process flags.\nflags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')\nflags.DEFINE_float(\n 'gp_scale', 2.,\n 'The length-scale parameter for the RBF kernel of the GP layer.')\nflags.DEFINE_integer(\n 'gp_input_dim', 128,\n 'The dimension to reduce the neural network input for the GP layer '\n '(via random Gaussian projection which preserves distance by the '\n ' Johnson-Lindenstrauss lemma). If -1, no dimension reduction.')\nflags.DEFINE_integer(\n 'gp_hidden_dim', 1024,\n 'The hidden dimension of the GP layer, which corresponds to the number of '\n 'random features used for the approximation.')\nflags.DEFINE_bool(\n 'gp_input_normalization', True,\n 'Whether to normalize the input using LayerNorm for GP layer.'\n 'This is similar to automatic relevance determination (ARD) in the classic '\n 'GP learning.')\nflags.DEFINE_float('gp_cov_ridge_penalty', 1e-3,\n 'Ridge penalty parameter for GP posterior covariance.')\nflags.DEFINE_float(\n 'gp_cov_discount_factor', 0.999,\n 'The discount factor to compute the moving average of precision matrix.')\n\n# Accelerator flags.\nflags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')\nflags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')\nflags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n del argv # unused arg\n if not FLAGS.use_gpu:\n raise ValueError('Only GPU is currently supported.')\n if FLAGS.num_cores > 1:\n raise ValueError('Only a single accelerator is currently supported.')\n tf.random.set_seed(FLAGS.seed)\n tf.io.gfile.makedirs(FLAGS.output_dir)\n\n ds_info = tfds.builder(FLAGS.dataset).info\n batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores\n steps_per_eval = ds_info.splits['test'].num_examples // batch_size\n num_classes = ds_info.features['label'].num_classes\n\n dataset = utils.load_dataset(\n split=tfds.Split.TEST,\n name=FLAGS.dataset,\n batch_size=batch_size,\n use_bfloat16=FLAGS.use_bfloat16)\n test_datasets = {'clean': dataset}\n corruption_types, max_intensity = utils.load_corrupted_test_info(\n FLAGS.dataset)\n for name in corruption_types:\n for intensity in range(1, max_intensity + 1):\n dataset_name = '{0}_{1}'.format(name, intensity)\n if FLAGS.dataset == 'cifar10':\n load_c_dataset = utils.load_cifar10_c\n else:\n load_c_dataset = functools.partial(utils.load_cifar100_c,\n path=FLAGS.cifar100_c_path)\n dataset = load_c_dataset(\n corruption_name=name,\n corruption_intensity=intensity,\n batch_size=batch_size,\n use_bfloat16=FLAGS.use_bfloat16)\n test_datasets[dataset_name] = dataset\n\n model = ub.models.wide_resnet_sngp(\n input_shape=ds_info.features['image'].shape,\n batch_size=FLAGS.per_core_batch_size,\n depth=28,\n width_multiplier=10,\n num_classes=num_classes,\n l2=0.,\n use_mc_dropout=FLAGS.use_mc_dropout,\n use_filterwise_dropout=FLAGS.use_filterwise_dropout,\n dropout_rate=FLAGS.dropout_rate,\n use_gp_layer=FLAGS.use_gp_layer,\n gp_input_dim=FLAGS.gp_input_dim,\n gp_hidden_dim=FLAGS.gp_hidden_dim,\n gp_scale=FLAGS.gp_scale,\n gp_bias=FLAGS.gp_bias,\n gp_input_normalization=FLAGS.gp_input_normalization,\n gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,\n gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,\n use_spec_norm=FLAGS.use_spec_norm,\n spec_norm_iteration=FLAGS.spec_norm_iteration,\n spec_norm_bound=FLAGS.spec_norm_bound)\n logging.info('Model input shape: %s', model.input_shape)\n logging.info('Model output shape: %s', model.output_shape)\n logging.info('Model number of weights: %s', model.count_params())\n\n # Search for checkpoints from their index file; then remove the index suffix.\n ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,\n '**/*.index'))\n ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]\n ensemble_size = len(ensemble_filenames)\n logging.info('Ensemble size: %s', ensemble_size)\n logging.info('Ensemble number of weights: %s',\n ensemble_size * model.count_params())\n logging.info('Ensemble filenames: %s', str(ensemble_filenames))\n checkpoint = tf.train.Checkpoint(model=model)\n\n # Write model predictions to files.\n num_datasets = len(test_datasets)\n for m, ensemble_filename in enumerate(ensemble_filenames):\n checkpoint.restore(ensemble_filename)\n for n, (name, test_dataset) in enumerate(test_datasets.items()):\n filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)\n filename = os.path.join(FLAGS.output_dir, filename)\n if not tf.io.gfile.exists(filename):\n logits = []\n test_iterator = iter(test_dataset)\n for _ in range(steps_per_eval):\n features, _ = next(test_iterator) # pytype: disable=attribute-error\n logits_member = model(features, training=False)\n if isinstance(logits_member, tuple):\n # If model returns a tuple of (logits, covmat), extract both\n logits_member, covmat_member = logits_member\n else:\n covmat_member = tf.eye(FLAGS.per_core_batch_size)\n logits_member = ed.layers.utils.mean_field_logits(\n logits_member, covmat_member, FLAGS.gp_mean_field_factor_ensemble)\n logits.append(logits_member)\n\n logits = tf.concat(logits, axis=0)\n with tf.io.gfile.GFile(filename, 'w') as f:\n np.save(f, logits.numpy())\n percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)\n message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '\n 'Dataset {:d}/{:d}'.format(percent,\n m + 1,\n ensemble_size,\n n + 1,\n num_datasets))\n logging.info(message)\n\n metrics = {\n 'test/negative_log_likelihood': tf.keras.metrics.Mean(),\n 'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),\n 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),\n }\n corrupt_metrics = {}\n for name in test_datasets:\n corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()\n corrupt_metrics['test/accuracy_{}'.format(name)] = (\n tf.keras.metrics.SparseCategoricalAccuracy())\n corrupt_metrics['test/ece_{}'.format(name)] = (\n um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))\n\n # Evaluate model predictions.\n for n, (name, test_dataset) in enumerate(test_datasets.items()):\n logits_dataset = []\n for m in range(ensemble_size):\n filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)\n filename = os.path.join(FLAGS.output_dir, filename)\n with tf.io.gfile.GFile(filename, 'rb') as f:\n logits_dataset.append(np.load(f))\n\n logits_dataset = tf.convert_to_tensor(logits_dataset)\n test_iterator = iter(test_dataset)\n for step in range(steps_per_eval):\n _, labels = next(test_iterator) # pytype: disable=attribute-error\n logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]\n labels = tf.cast(labels, tf.int32)\n negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)\n per_probs = tf.nn.softmax(logits)\n probs = tf.reduce_mean(per_probs, axis=0)\n if name == 'clean':\n gibbs_ce = um.gibbs_cross_entropy(labels, logits)\n metrics['test/negative_log_likelihood'].update_state(\n negative_log_likelihood)\n metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)\n metrics['test/accuracy'].update_state(labels, probs)\n metrics['test/ece'].update_state(labels, probs)\n else:\n corrupt_metrics['test/nll_{}'.format(name)].update_state(\n negative_log_likelihood)\n corrupt_metrics['test/accuracy_{}'.format(name)].update_state(\n labels, probs)\n corrupt_metrics['test/ece_{}'.format(name)].update_state(\n labels, probs)\n\n message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(\n (n + 1) / num_datasets, n + 1, num_datasets))\n logging.info(message)\n\n corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,\n corruption_types,\n max_intensity)\n total_results = {name: metric.result() for name, metric in metrics.items()}\n total_results.update(corrupt_results)\n logging.info('Metrics: %s', total_results)\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.random.set_seed", "tensorflow.reduce_mean", "tensorflow.train.Checkpoint", "tensorflow.io.gfile.exists", "tensorflow.cast", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.GFile", "numpy.load", "tensorflow.eye", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.metrics.Mean" ] ]
zurk/hmr2.0
[ "3ce454ce7a94f9c4786d2ccb7095940be5e31582" ]
[ "keypoint_annotation_tool/loader/smpl_loader.py" ]
[ "import pickle\nfrom glob import glob\n\nimport cv2\nimport numpy as np\n\n\nclass SmplLoader:\n\n def __init__(self):\n super(SmplLoader, self).__init__()\n\n def init_model(self, file_name):\n with open(file_name, \"rb\") as f:\n model = pickle.load(f)\n\n self.faces = model['f']\n self.v_template = model[\"v_template\"]\n self.pose_dirs = model[\"posedirs\"]\n self.shape_dirs = model[\"shapedirs\"]\n self.j_regressor = model[\"J_regressor\"]\n self.weights = model['weights']\n self.parent_id = model['kintree_table'][0].astype(np.int32)\n self.identity = np.eye(3)\n\n if 'cocoplus_regressor' in model:\n self.coco_regressor = model[\"cocoplus_regressor\"]\n\n self.custom_regressor = None\n\n def init_custom_regressors(self, path):\n files = glob(path)\n if len(files) > 0:\n regressors = []\n for file in files:\n regressor = np.load(file)\n regressors.append(regressor)\n\n self.custom_regressor = np.concatenate(regressors, 1).T\n\n def load_vertices(self, pose=None, shape=None, trans=None):\n\n if pose is None:\n pose = np.zeros([len(self.parent_id), 3])\n\n if shape is None:\n shape = np.zeros(self.shape_dirs.shape[-1])\n\n if trans is None:\n trans = np.zeros([1, 3])\n\n v_shaped = self.shape_dirs.dot(shape) + self.v_template\n\n x = np.matmul(self.j_regressor, v_shaped[:, 0])\n y = np.matmul(self.j_regressor, v_shaped[:, 1])\n z = np.matmul(self.j_regressor, v_shaped[:, 2])\n joints = np.vstack((x, y, z)).T\n\n rotation = self.relative_rotation(pose)\n v_posed = v_shaped + self.pose_dirs.dot(rotation)\n\n joints = self.global_rigid_transform(pose, joints)\n joints = joints.dot(self.weights.T)\n\n rest_shape_h = np.vstack((v_posed.T, np.ones((1, v_posed.shape[0]))))\n\n verts = (joints[:, 0, :] * rest_shape_h[0, :].reshape((1, -1)) +\n joints[:, 1, :] * rest_shape_h[1, :].reshape((1, -1)) +\n joints[:, 2, :] * rest_shape_h[2, :].reshape((1, -1)) +\n joints[:, 3, :] * rest_shape_h[3, :].reshape((1, -1))).T\n\n verts = verts[:, :3] + trans\n return verts\n\n def relative_rotation(self, pose):\n pose = pose[1:, :] # ignore global rotation\n pose = [cv2.Rodrigues(p)[0] - self.identity for p in pose]\n return np.concatenate(pose).ravel()\n\n def global_rigid_transform(self, pose, joints):\n homogeneous = np.array([[0.0, 0.0, 0.0, 1.0]])\n zeros = np.zeros([4, 3])\n\n def rotate_joint(pose_vec, joint):\n rot_joint = np.hstack([cv2.Rodrigues(pose_vec)[0], joint.reshape([3, 1])])\n return np.vstack([rot_joint, homogeneous])\n\n # create result list with root rotation\n result = [rotate_joint(pose[0, :], joints[0, :])]\n\n # joint rotations\n for i in range(1, len(self.parent_id)):\n joint = (joints[i, :] - joints[self.parent_id[i], :])\n rot_joint = rotate_joint(pose[i, :], joint)\n result.append(result[self.parent_id[i]].dot(rot_joint))\n\n # Skinning based on final_bone - init_bone\n for i in range(len(result)):\n joint = result[i].dot(np.concatenate([joints[i, :], [0]]))\n joint = np.hstack([zeros, joint.reshape([4, 1])])\n result[i] = result[i] - joint\n\n return np.dstack(result)\n" ]
[ [ "numpy.eye", "numpy.matmul", "numpy.dstack", "numpy.ones", "numpy.concatenate", "numpy.load", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
Luma-1994/lama
[ "60d802e2e4cce789f03eea11b038212ba5f7fd1b" ]
[ "FactorNet/CEBPB/meta_Unique35_DGF/dataloader.py" ]
[ "\"\"\"Basenji dataloader\n\"\"\"\n# python2, 3 compatibility\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport pybedtools\nfrom pybedtools import BedTool\nfrom genomelake.extractors import FastaExtractor, BigwigExtractor\nfrom kipoi.data import Dataset\nfrom kipoi.metadata import GenomicRanges\nfrom kipoi.specs import RemoteFile\nfrom kipoi.external.torchvision.dataset_utils import download_url\nfrom kipoi.utils import makedir_exist_ok\nimport linecache\nfrom six.moves.urllib.request import urlretrieve\n\n# Get the local path\nimport inspect\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nthis_dir = os.path.dirname(os.path.abspath(filename))\n# --------------------------------------------\n\n# TODO - include this files also on Zenodo\ndef download_gencode_dir(output_dir):\n \"\"\"Download all the required gencode files\n \"\"\"\n makedir_exist_ok(output_dir)\n\n url_template = (\"https://s3.eu-central-1.amazonaws.com/kipoi-models/\"\n \"dataloader_files/FactorNet/dataloader_files/gencode_features/{}\")\n\n # url_template = \"https://github.com/uci-cbcl/FactorNet/blob/master/resources/{}?raw=true\"\n fnames = [('cpgisland.bed.gz', 'ac7dc007d7019c05adb7a331d1d6721d'),\n ('wgEncodeGencodeBasicV19.cds.merged.bed.gz', '4ec9883932932efe87e4adc6c84ced1c'),\n ('wgEncodeGencodeBasicV19.intron.merged.bed.gz', 'd2db7e3255323d2b5b04e1c0c59ecd2d'),\n ('wgEncodeGencodeBasicV19.promoter.merged.bed.gz', '48fe1ab3aa0e9f5d11f3e5dfedbd47b6'),\n ('wgEncodeGencodeBasicV19.utr5.merged.bed.gz', 'de87c14d4ff055226afeb01446aba6e6'),\n ('wgEncodeGencodeBasicV19.utr3.merged.bed.gz', '8bbe08f5fba86306dfbef56d756856f1')]\n for fname, md5 in fnames:\n output_file = os.path.join(output_dir, fname)\n rf = RemoteFile(url=url_template.format(fname), md5=md5)\n if not os.path.exists(output_file) or not rf.validate(output_file):\n rf.get_file(output_file)\n\n\nclass BedToolLinecache(BedTool):\n \"\"\"Faster BedTool accessor by Ziga Avsec\n Normal BedTools loops through the whole file to get the\n line of interest. Hence the access it o(n)\n Note: this might load the whole bedfile into memory\n \"\"\"\n\n def __getitem__(self, idx):\n line = linecache.getline(self.fn, idx + 1)\n return pybedtools.create_interval_from_list(line.strip().split(\"\\t\"))\n\n\nclass SeqDataset(Dataset):\n \"\"\"\n Args:\n intervals_file: bed3 file containing intervals\n fasta_file: file path; Genome sequence\n target_file: file path; path to the targets in the csv format\n \"\"\"\n\n SEQ_WIDTH = 1002\n\n def __init__(self,\n intervals_file,\n fasta_file,\n dnase_file,\n cell_line=None,\n RNAseq_PC_file=None,\n mappability_file=None,\n use_linecache=True):\n\n # intervals\n if use_linecache:\n linecache.clearcache()\n BT = BedToolLinecache\n else:\n BT = BedTool\n\n self.bt = BT(intervals_file)\n\n # Fasta\n self.fasta_file = fasta_file\n self.fasta_extractor = None # initialize later\n # DNase\n self.dnase_file = dnase_file\n self.dnase_extractor = None\n # mappability\n if mappability_file is None:\n # download the mappability file if not existing\n common_dl_dir = os.path.join(this_dir, \"../../template/downloaded/dataloader_files\")\n makedir_exist_ok(common_dl_dir)\n rf = RemoteFile(url=\"http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeMapability/wgEncodeDukeMapabilityUniqueness35bp.bigWig\",\n md5=\"1d15ddafe2c8df51cf08495db96679e7\")\n mappability_file = os.path.join(common_dl_dir, \"wgEncodeDukeMapabilityUniqueness35bp.bigWig\")\n if not os.path.exists(mappability_file) or not rf.validate(mappability_file):\n # download the path\n rf.get_file(mappability_file)\n self.mappability_file = mappability_file\n self.mappability_extractor = None\n # Get the metadata features\n if cell_line is None:\n if RNAseq_PC_file is None:\n raise ValueError(\"RNAseq_PC_file has to be specified when cell_line=None\")\n assert os.path.exists(RNAseq_PC_file)\n else:\n # Using the pre-defined cell-line\n output_dir = os.path.join(this_dir, \"../../template/downloaded/dataloader_files/RNAseq_features/\")\n makedir_exist_ok(output_dir)\n RNAseq_PC_file = os.path.join(output_dir, cell_line, \"meta.txt\")\n url_template = ('https://s3.eu-central-1.amazonaws.com/kipoi-models/dataloader_files/'\n 'FactorNet/dataloader_files/RNAseq_features/{}/meta.txt')\n # rf = RemoteFile(url=url_template.format(cell_line))\n if not os.path.exists(RNAseq_PC_file): # or not rf.validate(mappability_file):\n # download the path\n download_url(url_template.format(cell_line), os.path.join(output_dir, cell_line), \"meta.txt\")\n # rf.get_file(RNAseq_PC_file)\n\n self.meta_feat = pd.read_csv(RNAseq_PC_file,\n sep=\"\\t\", header=None)[0].values\n\n def __len__(self):\n return len(self.bt)\n\n def __getitem__(self, idx):\n if self.fasta_extractor is None:\n # Fasta\n self.fasta_extractor = FastaExtractor(self.fasta_file)\n # DNase\n self.dnase_extractor = BigwigExtractor(self.dnase_file)\n self.mappability_extractor = BigwigExtractor(self.mappability_file)\n\n # Get the interval\n interval = self.bt[idx]\n if interval.stop - interval.start != self.SEQ_WIDTH:\n center = (interval.start + interval.stop) // 2\n interval.start = center - self.SEQ_WIDTH // 2\n interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2\n\n # Run the fasta extractor\n seq = np.squeeze(self.fasta_extractor([interval]), axis=0)\n seq_rc = seq[::-1, ::-1]\n\n # Dnase\n dnase = np.squeeze(self.dnase_extractor([interval], axis=0))[:, np.newaxis]\n dnase[np.isnan(dnase)] = 0 # NA fill\n dnase_rc = dnase[::-1]\n\n bigwig_list = [seq]\n bigwig_rc_list = [seq_rc]\n mappability = np.squeeze(self.mappability_extractor([interval], axis=0))[:, np.newaxis]\n mappability[np.isnan(mappability)] = 0 # NA fill\n mappability_rc = mappability[::-1]\n bigwig_list.append(mappability)\n bigwig_rc_list.append(mappability_rc)\n bigwig_list.append(dnase)\n bigwig_rc_list.append(dnase_rc)\n\n ranges = GenomicRanges.from_interval(interval)\n ranges_rc = GenomicRanges.from_interval(interval)\n ranges_rc.strand = \"-\"\n\n return {\n \"inputs\": [\n np.concatenate(bigwig_list, axis=-1), # stack along the last axis\n np.concatenate(bigwig_rc_list, axis=-1), # RC version\n self.meta_feat\n ],\n \"targets\": {}, # No Targets\n \"metadata\": {\n \"ranges\": ranges,\n \"ranges_rc\": ranges_rc\n }\n }\n" ]
[ [ "numpy.isnan", "pandas.read_csv", "numpy.concatenate" ] ]
sayef/coref
[ "6c2d99886189000b44bf4cc5c1f65dd06267d348" ]
[ "util.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport errno\nimport codecs\nimport collections\nimport shutil\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport pyhocon\n\nfrom . import independent\nfrom . import overlap\n\n_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef get_model(config):\n if config['model_type'] == 'independent':\n return independent.CorefModel(config)\n elif config['model_type'] == 'overlap':\n return overlap.CorefModel(config)\n else:\n raise NotImplementedError('Undefined model type')\n\ndef initialize_from_env(eval_test=False):\n if \"GPU\" in os.environ:\n set_gpus(int(os.environ[\"GPU\"]))\n\n name = os.environ['model_name']\n print(\"Running experiment: {}\".format(name))\n\n if eval_test:\n config = pyhocon.ConfigFactory.parse_file(os.path.join(_dir, \"test.experiments.conf\"))[name]\n else:\n config = pyhocon.ConfigFactory.parse_file(os.path.join(_dir, \"experiments.conf\"))[name]\n config[\"log_dir\"] = mkdirs(os.path.join(config[\"log_root\"], name))\n\n print(pyhocon.HOCONConverter.convert(config, \"hocon\"))\n return config\n\ndef copy_checkpoint(source, target):\n for ext in (\".index\", \".data-00000-of-00001\"):\n shutil.copyfile(source + ext, target + ext)\n\ndef make_summary(value_dict):\n return tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k,v in value_dict.items()])\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\ndef set_gpus(*gpus):\n # pass\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(str(g) for g in gpus)\n print(\"Setting CUDA_VISIBLE_DEVICES to: {}\".format(os.environ[\"CUDA_VISIBLE_DEVICES\"]))\n\ndef mkdirs(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n return path\n\ndef load_char_dict(char_vocab_path):\n vocab = [u\"<unk>\"]\n with codecs.open(char_vocab_path, encoding=\"utf-8\") as f:\n vocab.extend(l.strip() for l in f.readlines())\n char_dict = collections.defaultdict(int)\n char_dict.update({c:i for i, c in enumerate(vocab)})\n return char_dict\n\ndef maybe_divide(x, y):\n return 0 if y == 0 else x / float(y)\n\ndef projection(inputs, output_size, initializer=tf.truncated_normal_initializer(stddev=0.02)):\n return ffnn(inputs, 0, -1, output_size, dropout=None, output_weights_initializer=initializer)\n\ndef highway(inputs, num_layers, dropout):\n for i in range(num_layers):\n with tf.variable_scope(\"highway_{}\".format(i)):\n j, f = tf.split(projection(inputs, 2 * shape(inputs, -1)), 2, -1)\n f = tf.sigmoid(f)\n j = tf.nn.relu(j)\n if dropout is not None:\n j = tf.nn.dropout(j, dropout)\n inputs = f * j + (1 - f) * inputs\n return inputs\n\ndef shape(x, dim):\n return x.get_shape()[dim].value or tf.shape(x)[dim]\n\ndef ffnn(inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=tf.truncated_normal_initializer(stddev=0.02), hidden_initializer=tf.truncated_normal_initializer(stddev=0.02)):\n if len(inputs.get_shape()) > 3:\n raise ValueError(\"FFNN with rank {} not supported\".format(len(inputs.get_shape())))\n\n if len(inputs.get_shape()) == 3:\n batch_size = shape(inputs, 0)\n seqlen = shape(inputs, 1)\n emb_size = shape(inputs, 2)\n current_inputs = tf.reshape(inputs, [batch_size * seqlen, emb_size])\n else:\n current_inputs = inputs\n\n for i in range(num_hidden_layers):\n hidden_weights = tf.get_variable(\"hidden_weights_{}\".format(i), [shape(current_inputs, 1), hidden_size], initializer=hidden_initializer)\n hidden_bias = tf.get_variable(\"hidden_bias_{}\".format(i), [hidden_size], initializer=tf.zeros_initializer())\n current_outputs = tf.nn.relu(tf.nn.xw_plus_b(current_inputs, hidden_weights, hidden_bias))\n\n if dropout is not None:\n current_outputs = tf.nn.dropout(current_outputs, dropout)\n current_inputs = current_outputs\n\n output_weights = tf.get_variable(\"output_weights\", [shape(current_inputs, 1), output_size], initializer=output_weights_initializer)\n output_bias = tf.get_variable(\"output_bias\", [output_size], initializer=tf.zeros_initializer())\n outputs = tf.nn.xw_plus_b(current_inputs, output_weights, output_bias)\n\n if len(inputs.get_shape()) == 3:\n outputs = tf.reshape(outputs, [batch_size, seqlen, output_size])\n return outputs\n\ndef linear(inputs, output_size):\n if len(inputs.get_shape()) == 3:\n batch_size = shape(inputs, 0)\n seqlen = shape(inputs, 1)\n emb_size = shape(inputs, 2)\n current_inputs = tf.reshape(inputs, [batch_size * seqlen, emb_size])\n else:\n current_inputs = inputs\n hidden_weights = tf.get_variable(\"linear_w\", [shape(current_inputs, 1), output_size])\n hidden_bias = tf.get_variable(\"bias\", [output_size])\n current_outputs = tf.nn.xw_plus_b(current_inputs, hidden_weights, hidden_bias)\n return current_outputs\n\ndef cnn(inputs, filter_sizes, num_filters):\n num_words = shape(inputs, 0)\n num_chars = shape(inputs, 1)\n input_size = shape(inputs, 2)\n outputs = []\n for i, filter_size in enumerate(filter_sizes):\n with tf.variable_scope(\"conv_{}\".format(i)):\n w = tf.get_variable(\"w\", [filter_size, input_size, num_filters])\n b = tf.get_variable(\"b\", [num_filters])\n conv = tf.nn.conv1d(inputs, w, stride=1, padding=\"VALID\") # [num_words, num_chars - filter_size, num_filters]\n h = tf.nn.relu(tf.nn.bias_add(conv, b)) # [num_words, num_chars - filter_size, num_filters]\n pooled = tf.reduce_max(h, 1) # [num_words, num_filters]\n outputs.append(pooled)\n return tf.concat(outputs, 1) # [num_words, num_filters * len(filter_sizes)]\n\ndef batch_gather(emb, indices):\n batch_size = shape(emb, 0)\n seqlen = shape(emb, 1)\n if len(emb.get_shape()) > 2:\n emb_size = shape(emb, 2)\n else:\n emb_size = 1\n flattened_emb = tf.reshape(emb, [batch_size * seqlen, emb_size]) # [batch_size * seqlen, emb]\n offset = tf.expand_dims(tf.range(batch_size) * seqlen, 1) # [batch_size, 1]\n gathered = tf.gather(flattened_emb, indices + offset) # [batch_size, num_indices, emb]\n if len(emb.get_shape()) == 2:\n gathered = tf.squeeze(gathered, 2) # [batch_size, num_indices]\n return gathered\n\nclass RetrievalEvaluator(object):\n def __init__(self):\n self._num_correct = 0\n self._num_gold = 0\n self._num_predicted = 0\n\n def update(self, gold_set, predicted_set):\n self._num_correct += len(gold_set & predicted_set)\n self._num_gold += len(gold_set)\n self._num_predicted += len(predicted_set)\n\n def recall(self):\n return maybe_divide(self._num_correct, self._num_gold)\n\n def precision(self):\n return maybe_divide(self._num_correct, self._num_predicted)\n\n def metrics(self):\n recall = self.recall()\n precision = self.precision()\n f1 = maybe_divide(2 * recall * precision, precision + recall)\n return recall, precision, f1\n\nclass EmbeddingDictionary(object):\n def __init__(self, info, normalize=True, maybe_cache=None):\n self._size = info[\"size\"]\n self._normalize = normalize\n self._path = info[\"path\"]\n if maybe_cache is not None and maybe_cache._path == self._path:\n assert self._size == maybe_cache._size\n self._embeddings = maybe_cache._embeddings\n else:\n self._embeddings = self.load_embedding_dict(self._path)\n\n @property\n def size(self):\n return self._size\n\n def load_embedding_dict(self, path):\n print(\"Loading word embeddings from {}...\".format(path))\n default_embedding = np.zeros(self.size)\n embedding_dict = collections.defaultdict(lambda:default_embedding)\n if len(path) > 0:\n vocab_size = None\n with open(path) as f:\n for i, line in enumerate(f.readlines()):\n word_end = line.find(\" \")\n word = line[:word_end]\n embedding = np.fromstring(line[word_end + 1:], np.float32, sep=\" \")\n assert len(embedding) == self.size\n embedding_dict[word] = embedding\n if vocab_size is not None:\n assert vocab_size == len(embedding_dict)\n print(\"Done loading word embeddings.\")\n return embedding_dict\n\n def __getitem__(self, key):\n embedding = self._embeddings[key]\n if self._normalize:\n embedding = self.normalize(embedding)\n return embedding\n\n def normalize(self, v):\n norm = np.linalg.norm(v)\n if norm > 0:\n return v / norm\n else:\n return v\n\nclass CustomLSTMCell(tf.contrib.rnn.RNNCell):\n def __init__(self, num_units, batch_size, dropout):\n self._num_units = num_units\n self._dropout = dropout\n self._dropout_mask = tf.nn.dropout(tf.ones([batch_size, self.output_size]), dropout)\n self._initializer = self._block_orthonormal_initializer([self.output_size] * 3)\n initial_cell_state = tf.get_variable(\"lstm_initial_cell_state\", [1, self.output_size])\n initial_hidden_state = tf.get_variable(\"lstm_initial_hidden_state\", [1, self.output_size])\n self._initial_state = tf.contrib.rnn.LSTMStateTuple(initial_cell_state, initial_hidden_state)\n\n @property\n def state_size(self):\n return tf.contrib.rnn.LSTMStateTuple(self.output_size, self.output_size)\n\n @property\n def output_size(self):\n return self._num_units\n\n @property\n def initial_state(self):\n return self._initial_state\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\n with tf.variable_scope(scope or type(self).__name__): # \"CustomLSTMCell\"\n c, h = state\n h *= self._dropout_mask\n concat = projection(tf.concat([inputs, h], 1), 3 * self.output_size, initializer=self._initializer)\n i, j, o = tf.split(concat, num_or_size_splits=3, axis=1)\n i = tf.sigmoid(i)\n new_c = (1 - i) * c + i * tf.tanh(j)\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n return new_h, new_state\n\n def _orthonormal_initializer(self, scale=1.0):\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n M1 = np.random.randn(shape[0], shape[0]).astype(np.float32)\n M2 = np.random.randn(shape[1], shape[1]).astype(np.float32)\n Q1, R1 = np.linalg.qr(M1)\n Q2, R2 = np.linalg.qr(M2)\n Q1 = Q1 * np.sign(np.diag(R1))\n Q2 = Q2 * np.sign(np.diag(R2))\n n_min = min(shape[0], shape[1])\n params = np.dot(Q1[:, :n_min], Q2[:n_min, :]) * scale\n return params\n return _initializer\n\n def _block_orthonormal_initializer(self, output_sizes):\n def _initializer(shape, dtype=np.float32, partition_info=None):\n assert len(shape) == 2\n assert sum(output_sizes) == shape[1]\n initializer = self._orthonormal_initializer()\n params = np.concatenate([initializer([shape[0], o], dtype, partition_info) for o in output_sizes], 1)\n return params\n return _initializer\n" ]
[ [ "numpy.diag", "numpy.dot", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.tanh", "numpy.random.randn", "numpy.linalg.qr", "tensorflow.nn.conv1d", "tensorflow.truncated_normal_initializer", "tensorflow.squeeze", "tensorflow.gather", "numpy.zeros", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.split", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.range", "tensorflow.reshape", "tensorflow.sigmoid", "numpy.linalg.norm", "tensorflow.ones", "tensorflow.Summary.Value", "numpy.fromstring" ] ]
kangzh015/radiomics
[ "1291d918272e46daeb98dd819800a7f9f89fffa6" ]
[ "skradiomics/feature_extraction/firstorder.py" ]
[ "# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author: jeremy.zhang(szujeremy@gmail.com, Shenzhen University, China)\n\nimport copy\nimport collections\nimport numpy as np\nimport SimpleITK as sitk\n\nfrom .base import RadiomicsBase, bin_image\nfrom skradiomics.utils.modules import FEATURE_EXTRACTORS\n\n#\n# @FEATURE_EXTRACTORS.register_module(name='firstorder')\n# class RadiomicsFirstorder(RadiomicsBase):\n# def __init__(self, **kwargs):\n# super(RadiomicsFirstorder, self).__init__(**kwargs)\n\n\n@FEATURE_EXTRACTORS.register_module(name='firstorder')\ndef radiomics_firstorder(image, mask, settings, **kwargs):\n feature_vector = collections.OrderedDict()\n\n spacing = image.GetSpacing()\n np_image = sitk.GetArrayFromImage(image)\n np_mask = sitk.GetArrayFromImage(mask)\n\n np_discretized_image, _ = bin_image(image, mask, settings, **kwargs)\n _, p_i = np.unique(np_discretized_image[np_mask != 0], return_counts=True)\n p_i = p_i.reshape((1, -1))\n\n sum_bins = np.sum(p_i, axis=1, keepdims=True)\n sum_bins[sum_bins == 0] = 1\n p_i = p_i / sum_bins\n\n np_roi_array = np_image[np_mask != 0]\n voxel_shift = settings.get('voxelArrayShift', 0)\n\n energy = np.nansum((np_roi_array + voxel_shift)**2)\n feature_vector['Energy'] = energy\n\n total_energy = energy * np.prod(spacing)\n feature_vector['TotalEnergy'] = total_energy\n\n epsilon = np.spacing(1)\n entropy = -1 * np.sum(p_i * np.log2(p_i + epsilon))\n feature_vector['Entropy'] = entropy\n\n minimum = np.nanmin(np_roi_array)\n feature_vector['Minimum'] = minimum\n\n maximum = np.nanmax(np_roi_array)\n feature_vector['Maximum'] = maximum\n\n mean_value = np.nanmean(np_roi_array)\n feature_vector['Mean'] = mean_value\n\n median_value = np.nanmedian(np_roi_array)\n feature_vector['Median'] = median_value\n\n percentile10 = np.nanpercentile(np_roi_array, 10)\n feature_vector['Percentile10'] = percentile10\n\n percentile90 = np.nanpercentile(np_roi_array, 90)\n feature_vector['Percentile90'] = percentile90\n\n interquartile_range = np.nanpercentile(np_roi_array, 75) - np.nanpercentile(np_roi_array, 25)\n feature_vector['InterquartileRange'] = interquartile_range\n\n voxel_range = maximum - minimum\n feature_vector['Range'] = voxel_range\n\n mean_absolute_deviation = np.nanmean(np.absolute(np_roi_array - mean_value))\n feature_vector['MeanAbsoluteDeviation'] = mean_absolute_deviation\n\n percentile_array = copy.deepcopy(np_roi_array)\n percentile_mask = ~np.isnan(percentile_array)\n percentile_mask[percentile_mask] = ((percentile_array - percentile10)[percentile_mask] < 0) | ((percentile_array - percentile90)[percentile_mask] > 0)\n percentile_array[percentile_mask] = np.nan\n robust_mean_absolute_deviation = np.nanmean(np.absolute(percentile_array - np.nanmean(percentile_array)))\n feature_vector['RobustMeanAbsoluteDeviation'] = robust_mean_absolute_deviation\n\n n_voxel = np.sum(~np.isnan(np_roi_array))\n root_mean_squared = np.sqrt(energy / n_voxel)\n feature_vector['RootMeanSquared'] = root_mean_squared\n\n standard_deviation = np.nanstd(np_roi_array)\n feature_vector['StandardDeviation'] = standard_deviation\n\n m2 = np.nanmean(np.power(np_roi_array - mean_value, 2))\n m3 = np.nanmean(np.power(np_roi_array - mean_value, 3))\n skewness = m3 / (m2 + epsilon) ** 1.5\n feature_vector['Skewness'] = skewness\n\n m4 = np.nanmean(np.power(np_roi_array - mean_value, 4))\n kurtosis = m4 / (m2 + epsilon) ** 2\n feature_vector['Kurtosis'] = kurtosis\n\n variance_value = standard_deviation ** 2\n feature_vector['Variance'] = variance_value\n\n uniformity = np.nansum(p_i ** 2)\n feature_vector['Uniformity'] = uniformity\n\n return feature_vector\n\n\n" ]
[ [ "numpy.nanmax", "numpy.nanpercentile", "numpy.nanmedian", "numpy.spacing", "numpy.sqrt", "numpy.absolute", "numpy.unique", "numpy.isnan", "numpy.power", "numpy.log2", "numpy.nanmin", "numpy.nansum", "numpy.nanmean", "numpy.prod", "numpy.nanstd", "numpy.sum" ] ]
riga/tensorfunk
[ "f607c554911cc39795c3ca696cdb0a747e291e04" ]
[ "tests/models/simple2.py" ]
[ "# -*- coding: utf-8 -*-\n\n\nimport tensorflow as tf\nimport tfdeploy as td\n\n\nsess = tf.Session()\n\nx = tf.placeholder(tf.float32, shape=[None, 10], name=\"input\")\nkeep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n\nW = tf.Variable(tf.truncated_normal([10, 5], stddev=0.05))\nb = tf.Variable(tf.zeros([5]))\n\nW_drop = tf.nn.dropout(W, keep_prob)\n\ny = tf.nn.softmax(tf.matmul(x, W_drop) + b, name=\"output\")\n\nif td._tf_version[:3] < (0, 12, 0):\n sess.run(tf.initialize_all_variables())\nelse:\n sess.run(tf.global_variables_initializer())\n" ]
[ [ "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.zeros", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.initialize_all_variables", "tensorflow.Session", "tensorflow.nn.dropout" ] ]
cvtower/InsightFace_Pytorch
[ "51b15c52527a521ff15b703d93b15feb788d456f" ]
[ "config.py" ]
[ "from easydict import EasyDict as edict\nfrom pathlib import Path\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torchvision import transforms as trans\n\ndef get_config(training = True):\n conf = edict()\n conf.data_path = Path('data')\n conf.work_path = Path('work_space/')\n conf.model_path = conf.work_path/'models'\n conf.log_path = conf.work_path/'log'\n conf.save_path = conf.work_path/'save'\n conf.input_size = [112,112]\n conf.embedding_size = 512\n conf.use_mobilfacenet = False\n conf.net_depth = 50\n conf.drop_ratio = 0.6\n conf.net_mode = 'ir_se' # or 'ir'\n conf.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n conf.test_transform = trans.Compose([\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n conf.data_mode = 'emore'\n conf.vgg_folder = conf.data_path/'faces_vgg_112x112'\n conf.ms1m_folder = conf.data_path/'faces_ms1m_112x112'\n conf.emore_folder = conf.data_path/'faces_emore'\n conf.batch_size = 100 # irse net depth 50 \n# conf.batch_size = 200 # mobilefacenet\n#--------------------Training Config ------------------------ \n if training: \n conf.log_path = conf.work_path/'log'\n conf.save_path = conf.work_path/'save'\n # conf.weight_decay = 5e-4\n conf.lr = 1e-3\n# conf.milestones = [3,4,5] # mobildefacenet\n conf.milestones = [4,6,7] # arcface\n conf.momentum = 0.9\n conf.pin_memory = True\n# conf.num_workers = 4 # when batchsize is 200\n conf.num_workers = 3\n conf.ce_loss = CrossEntropyLoss() \n#--------------------Inference Config ------------------------\n else:\n conf.facebank_path = conf.data_path/'facebank'\n conf.threshold = 1.5\n conf.face_limit = 10 \n #when inference, at maximum detect 10 faces in one image, my laptop is slow\n conf.min_face_size = 30 \n # the larger this value, the faster deduction, comes with tradeoff in small faces\n return conf" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.is_available" ] ]
diane-wagner/dino
[ "94175993abde84179449d79e22eab7ea28dec14b" ]
[ "eval_video_segmentation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSome parts are taken from https://github.com/Liusifei/UVC\n\"\"\"\nimport os\nimport copy\nimport glob\nimport queue\nfrom urllib.request import urlopen\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nimport utils\nimport vision_transformer as vits\n\n\n@torch.no_grad()\ndef eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette):\n \"\"\"\n Evaluate tracking on a video given first frame & segmentation\n \"\"\"\n video_folder = os.path.join(args.output_dir, video_dir.split('/')[-1])\n os.makedirs(video_folder, exist_ok=True)\n\n # The queue stores the n preceeding frames\n que = queue.Queue(args.n_last_frames)\n\n # first frame\n frame1, ori_h, ori_w = read_frame(frame_list[0])\n # extract first frame feature\n frame1_feat = extract_feature(model, frame1).T # dim x h*w\n\n # saving first segmentation\n out_path = os.path.join(video_folder, \"00000.png\")\n imwrite_indexed(out_path, seg_ori, color_palette)\n mask_neighborhood = None\n for cnt in tqdm(range(1, len(frame_list))):\n frame_tar = read_frame(frame_list[cnt])[0]\n\n # we use the first segmentation and the n previous ones\n used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)]\n used_segs = [first_seg] + [pair[1] for pair in list(que.queue)]\n\n frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, mask_neighborhood)\n\n # pop out oldest frame if neccessary\n if que.qsize() == args.n_last_frames:\n que.get()\n # push current results into queue\n seg = copy.deepcopy(frame_tar_avg)\n que.put([feat_tar, seg])\n\n # upsampling & argmax\n frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0]\n frame_tar_avg = norm_mask(frame_tar_avg)\n _, frame_tar_seg = torch.max(frame_tar_avg, dim=0)\n\n # saving to disk\n frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8)\n frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0))\n frame_nm = frame_list[cnt].split('/')[-1].replace(\".jpg\", \".png\")\n imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette)\n\n\ndef restrict_neighborhood(h, w):\n # We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'')\n mask = torch.zeros(h, w, h, w)\n for i in range(h):\n for j in range(w):\n for p in range(2 * args.size_mask_neighborhood + 1):\n for q in range(2 * args.size_mask_neighborhood + 1):\n if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h:\n continue\n if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w:\n continue\n mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1\n\n mask = mask.reshape(h * w, h * w)\n return mask.cuda(non_blocking=True)\n\n\ndef norm_mask(mask):\n c, h, w = mask.size()\n for cnt in range(c):\n mask_cnt = mask[cnt,:,:]\n if(mask_cnt.max() > 0):\n mask_cnt = (mask_cnt - mask_cnt.min())\n mask_cnt = mask_cnt/mask_cnt.max()\n mask[cnt,:,:] = mask_cnt\n return mask\n\n\ndef label_propagation(args, model, frame_tar, list_frame_feats, list_segs, mask_neighborhood=None):\n \"\"\"\n propagate segs of frames in list_frames to frame_tar\n \"\"\"\n ## we only need to extract feature of the target frame\n feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True)\n\n return_feat_tar = feat_tar.T # dim x h*w\n\n ncontext = len(list_frame_feats)\n feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w\n\n feat_tar = F.normalize(feat_tar, dim=1, p=2)\n feat_sources = F.normalize(feat_sources, dim=1, p=2)\n\n feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1)\n aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys)\n\n if args.size_mask_neighborhood > 0:\n if mask_neighborhood is None:\n mask_neighborhood = restrict_neighborhood(h, w)\n mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)\n aff *= mask_neighborhood\n\n aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)\n tk_val, _ = torch.topk(aff, dim=0, k=args.topk)\n tk_val_min, _ = torch.min(tk_val, dim=0)\n aff[aff < tk_val_min] = 0\n\n aff = aff / torch.sum(aff, keepdim=True, axis=0)\n\n list_segs = [s.cuda() for s in list_segs]\n segs = torch.cat(list_segs)\n nmb_context, C, h, w = segs.shape\n segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w\n seg_tar = torch.mm(segs, aff)\n seg_tar = seg_tar.reshape(1, C, h, w)\n return seg_tar, return_feat_tar, mask_neighborhood\n \n\ndef extract_feature(model, frame, return_h_w=False):\n \"\"\"Extract one frame feature everytime.\"\"\"\n out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0]\n out = out[:, 1:, :] # we discard the [CLS] token\n h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size)\n dim = out.shape[-1]\n out = out[0].reshape(h, w, dim)\n out = out.reshape(-1, dim)\n if return_h_w:\n return out, h, w\n return out\n\n\ndef imwrite_indexed(filename, array, color_palette):\n \"\"\" Save indexed png for DAVIS.\"\"\"\n if np.atleast_3d(array).shape[2] != 1:\n raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n im = Image.fromarray(array)\n im.putpalette(color_palette.ravel())\n im.save(filename, format='PNG')\n\n\ndef to_one_hot(y_tensor, n_dims=None):\n \"\"\"\n Take integer y (tensor or variable) with n dims &\n convert it to 1-hot representation with n+1 dims.\n \"\"\"\n if(n_dims is None):\n n_dims = int(y_tensor.max()+ 1)\n _,h,w = y_tensor.size()\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)\n y_one_hot = y_one_hot.view(h,w,n_dims)\n return y_one_hot.permute(2, 0, 1).unsqueeze(0)\n\n\ndef read_frame_list(video_dir):\n frame_list = [img for img in glob.glob(os.path.join(video_dir,\"*.jpg\"))]\n frame_list = sorted(frame_list)\n return frame_list\n\n\ndef read_frame(frame_dir, scale_size=[480]):\n \"\"\"\n read a single frame & preprocess\n \"\"\"\n img = cv2.imread(frame_dir)\n ori_h, ori_w, _ = img.shape\n if len(scale_size) == 1:\n if(ori_h > ori_w):\n tw = scale_size[0]\n th = (tw * ori_h) / ori_w\n th = int((th // 64) * 64)\n else:\n th = scale_size[0]\n tw = (th * ori_w) / ori_h\n tw = int((tw // 64) * 64)\n else:\n th, tw = scale_size\n img = cv2.resize(img, (tw, th))\n img = img.astype(np.float32)\n img = img / 255.0\n img = img[:, :, ::-1]\n img = np.transpose(img.copy(), (2, 0, 1))\n img = torch.from_numpy(img).float()\n img = color_normalize(img)\n return img, ori_h, ori_w\n\n\ndef read_seg(seg_dir, factor, scale_size=[480]):\n seg = Image.open(seg_dir)\n _w, _h = seg.size # note PIL.Image.Image's size is (w, h)\n if len(scale_size) == 1:\n if(_w > _h):\n _th = scale_size[0]\n _tw = (_th * _w) / _h\n _tw = int((_tw // 64) * 64)\n else:\n _tw = scale_size[0]\n _th = (_tw * _h) / _w\n _th = int((_th // 64) * 64)\n else:\n _th = scale_size[1]\n _tw = scale_size[0]\n small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0))\n small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0)\n return to_one_hot(small_seg), np.asarray(seg)\n\n\ndef color_normalize(x, mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]):\n for t, m, s in zip(x, mean, std):\n t.sub_(m)\n t.div_(s)\n return x\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Evaluation with video object segmentation on DAVIS 2017')\n parser.add_argument('--pretrained_weights', default='', type=str, help=\"Path to pretrained weights to evaluate.\")\n parser.add_argument('--arch', default='vit_small', type=str,\n choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')\n parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')\n parser.add_argument(\"--checkpoint_key\", default=\"teacher\", type=str, help='Key to use in the checkpoint (example: \"teacher\")')\n parser.add_argument('--output_dir', default=\".\", help='Path where to save segmentations')\n parser.add_argument('--data_path', default='/path/to/davis/', type=str)\n parser.add_argument(\"--n_last_frames\", type=int, default=7, help=\"number of preceeding frames\")\n parser.add_argument(\"--size_mask_neighborhood\", default=12, type=int,\n help=\"We restrict the set of source nodes considered to a spatial neighborhood of the query node\")\n parser.add_argument(\"--topk\", type=int, default=5, help=\"accumulate label from top k neighbors\")\n parser.add_argument(\"--bs\", type=int, default=6, help=\"Batch size, try to reduce if OOM\")\n args = parser.parse_args()\n\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n print(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n\n # building network\n model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)\n print(f\"Model {args.arch} {args.patch_size}x{args.patch_size} built.\")\n model.cuda()\n utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)\n for param in model.parameters():\n param.requires_grad = False\n model.eval()\n\n color_palette = []\n for line in urlopen(\"https://raw.githubusercontent.com/Liusifei/UVC/master/libs/data/palette.txt\"):\n color_palette.append([int(i) for i in line.decode(\"utf-8\").split('\\n')[0].split(\" \")])\n color_palette = np.asarray(color_palette, dtype=np.uint8).reshape(-1,3)\n\n video_list = open(os.path.join(args.data_path, \"ImageSets/2017/val.txt\")).readlines()\n for i, video_name in enumerate(video_list):\n video_name = video_name.strip()\n print(f'[{i}/{len(video_list)}] Begin to segmentate video {video_name}.')\n video_dir = os.path.join(args.data_path, \"JPEGImages/480p/\", video_name)\n frame_list = read_frame_list(video_dir)\n seg_path = frame_list[0].replace(\"JPEGImages\", \"Annotations\").replace(\"jpg\", \"png\")\n first_seg, seg_ori = read_seg(seg_path, args.patch_size)\n eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette)\n" ]
[ [ "torch.nn.functional.normalize", "torch.mm", "torch.max", "torch.zeros", "torch.cat", "torch.topk", "torch.min", "numpy.asarray", "torch.sum", "torch.from_numpy", "numpy.atleast_3d", "torch.no_grad", "torch.bmm", "torch.nn.functional.interpolate", "torch.stack" ] ]
danielhrisca/vedo
[ "487568b7956a67b87752e3d518ba3f7e87b327a6", "487568b7956a67b87752e3d518ba3f7e87b327a6", "487568b7956a67b87752e3d518ba3f7e87b327a6" ]
[ "examples/pyplot/plot3_pip.py", "examples/pyplot/plot7_stream.py", "examples/pyplot/plot0_multi.py" ]
[ "\"\"\"Picture in picture plotting\"\"\"\nfrom vedo import show\nfrom vedo.pyplot import plot, settings\nimport numpy as np\n\nsettings.defaultFont = 'Theemim'\n\nx = np.arange(0, 4, 0.1)\ny1 = 3*np.exp(-x)\ny2 = 3*np.exp(-x)*np.cos(2*x)**2\n\naxes_opts = dict(numberOfDivisions=3, xyPlaneColor='lavender', xyAlpha=1)\n\n# Build first plot and its axes:\nplt1 = plot(x, y1,\n title=__doc__,\n xtitle='time in seconds',\n ytitle='some function [a.u.]',\n)\n\n# Build second plot and its axes:\nplt2 = plot(x, y2,\n title='my second plot',\n xtitle='time in seconds',\n ytitle='some other function',\n lc='red',\n pad=0, # no margins\n axes=axes_opts,\n)\n\n# Scale the plot2 to make it small\n# and position it anywhere in the scene:\nplt2.scale(0.5).pos(2, 1.4, 0.01)\n\nshow(plt1, plt2, zoom=1.1)\n\n", "\"\"\"Plot streamlines of the 2D field:\n\nu(x,y) = -1 - x\\^2 + y\nv(x,y) = 1 + x - y\\^2\n\"\"\"\nfrom vedo import Points, show\nfrom vedo.pyplot import streamplot\nimport numpy as np\n\n# a grid with a vector field (U,V):\nX, Y = np.mgrid[-5:5 :15j, -4:4 :15j]\nU = -1 - X**2 + Y\nV = 1 + X - Y**2\n\n# optionally, pick some random points as seeds:\nprob_pts = np.random.rand(200, 2)*8 - [4,4]\n\nsp = streamplot(X,Y, U,V,\n lw=0.001, # line width in abs. units\n direction='forward', # 'both' or 'backward'\n probes=prob_pts,\n )\n\npts = Points(prob_pts, r=5, c='white')\n\nshow(sp, pts, __doc__, axes=1, bg='bb')\n", "\"\"\"Use of plot() function analogous to matplotlib\"\"\"\nimport numpy as np, vtk\nfrom vedo import *\nfrom vedo.pyplot import plot\n\nx = np.linspace(0, 5, 10)\n\nplt1 = plot(x, x*x, 'sg-', title='Plot1: y=x*x')\nplt2 = plot(x, cos(x), 'pr--', title='Plot2: y=cos(x)')\nplt3 = plot(x, sqrt(x),'Db-', title='Plot3: y=sqrt(x)')\nplt4 = plot(x, sin(x), '*t--', title='Plot4: y=sin(x)')\n\n# window shape can be expressed as \"n/m\" or \"n|m\"\nshow(plt1, plt2, plt3, plt4, shape=\"3|1\", sharecam=False, size=(1300,900))\n\nprintc('plt1 is vtkAssembly?', isinstance(plt1, vtk.vtkAssembly))" ]
[ [ "numpy.arange", "numpy.exp", "numpy.cos" ], [ "numpy.random.rand" ], [ "numpy.linspace" ] ]
bisite/TwitterProfiling
[ "60148b7eaa93c2e5437af373c323601597a843ad" ]
[ "twitterprofiling/__init__.py" ]
[ "import twitterprofiling.twitter_manager as tm\nfrom twitterprofiling.User import User\nfrom twitterprofiling.auxiliar import *\nfrom twitterprofiling.analysis import *\nimport pandas as pd\nimport pickle\nimport pkg_resources\n\nname = \"twitterprofiling\"\n\n\nclass TwitterProfiling:\n\n \"\"\"\n The main class of the package. It offers methods to manage all the functionality\n\n :param user_name: User's Twitter name (@name).\n :param consumer_key: consumer key generated by creating Twitter application.\n :param consumer_secret: consumer secret key generated by creating Twitter application.\n :param access_token: access token key generated by creating Twitter application.\n :param access_token_secret: access token secret key generated by creating Twitter application.\n :param number_of_tweets: Number of tweets to retrieve\n \"\"\"\n\n # Class attribute configured via class method.\n api = None\n name = None\n user_name = None\n image = None\n description = None\n tweets = None\n analysis_results = None\n tweets_terms = None\n hashtags_terms = None\n topics_top_terms = None\n nmf = None\n tfidf = None\n tfidf_vectorizer = None\n\n def __init__(self, user_name, consumer_key, consumer_secret, access_token, access_token_secret, number_of_tweets=1200):\n self.api = tm.do_authentication(consumer_key, consumer_secret, access_token, access_token_secret)\n result = tm.search_user_tweets(self.api, user_name, number_of_tweets)\n self.user_name = user_name\n self.image = result[1]\n self.name = result[2]\n self.description = result[3]\n self.tweets = pd.DataFrame(result[0])\n self.analysis_results = {'nmf': None, 'kmeans': None}\n\n # Classify using NMF with the best hyperparameter configuration acquired in training phase.\n def get_user_classification(self):\n\n \"\"\"\n This function launch the classification. First of all, it gets the model variables.\n\n :return: analysis dictionary.\n \"\"\"\n\n if self.nmf is None:\n # Create routes\n resource_package = __name__\n resource_path = '/'.join(('data', 'models', 'nmf', 'nmf.pickle'))\n resource_path2 = '/'.join(('data', 'models', 'nmf', 'tfidf.pickle'))\n resource_path3 = '/'.join(('data', 'models', 'nmf', 'tfidf_vectorizer.pickle'))\n\n # If exists, load the models\n if pkg_resources.resource_exists(resource_package, resource_path) and \\\n pkg_resources.resource_exists(resource_package, resource_path2) and \\\n pkg_resources.resource_exists(resource_package, resource_path3):\n\n with open(pkg_resources.resource_filename(resource_package, resource_path), 'rb') as f:\n self.nmf = pickle.load(f)\n\n with open(pkg_resources.resource_filename(resource_package, resource_path2), 'rb') as f:\n self.tfidf = pickle.load(f)\n\n with open(pkg_resources.resource_filename(resource_package, resource_path3), 'rb') as f:\n self.tfidf_vectorizer = pickle.load(f)\n\n if self.analysis_results['nmf'] is None:\n doc = \" \".join(self.tweets['preprocessed_tweet'])\n self.analysis_results['nmf'] = apply_nmf(self.nmf, self.tfidf, self.tfidf_vectorizer, doc)\n\n return self.analysis_results['nmf']\n\n def get_user_name(self):\n\n \"\"\"\n This function returns user name.\n\n :return: name of the user.\n \"\"\"\n return self.name\n\n def get_user_username(self):\n\n \"\"\"\n This function returns user username.\n\n :return: username of the user.\n \"\"\"\n return self.user_name\n\n def get_user_description(self):\n\n \"\"\"\n This function returns user description.\n\n :return: user description.\n \"\"\"\n return self.description\n\n def get_user_image(self):\n\n \"\"\"\n This function returns user image path.\n\n :return: user image path.\n \"\"\"\n return self.image\n\n # Returns a dict with shape {name_of_category: [{text: term_i, count: 21}, {text: term_j, count: 15} ...], ...}\n def get_topics_top_terms(self, nterms=30):\n\n \"\"\"\n This function gets model variables and launch the construction of topics top terms.\n\n :param nterms: Number of top terms per topic.\n :return: dict containing top terms per topic.\n \"\"\"\n if self.nmf is None:\n # Create routes\n resource_package = __name__\n resource_path = '/'.join(('data', 'models', 'nmf', 'nmf.pickle'))\n resource_path2 = '/'.join(('data', 'models', 'nmf', 'tfidf.pickle'))\n resource_path3 = '/'.join(('data', 'models', 'nmf', 'tfidf_vectorizer.pickle'))\n\n # If exists, load the models\n if pkg_resources.resource_exists(resource_package, resource_path) and \\\n pkg_resources.resource_exists(resource_package, resource_path2) and \\\n pkg_resources.resource_exists(resource_package, resource_path3):\n with open(pkg_resources.resource_filename(resource_package, resource_path), 'rb') as f:\n self.nmf = pickle.load(f)\n\n with open(pkg_resources.resource_filename(resource_package, resource_path2), 'rb') as f:\n self.tfidf = pickle.load(f)\n\n with open(pkg_resources.resource_filename(resource_package, resource_path3), 'rb') as f:\n self.tfidf_vectorizer = pickle.load(f)\n\n if self.topics_top_terms is None:\n self.topics_top_terms = generate_top_terms_dictionary(self.nmf, self.tfidf_vectorizer, nterms)\n\n return self.topics_top_terms\n\n # Returns a list of dictionaries with shape {text: #hashtag, count: 12}\n def get_hashtags_terms_count(self):\n\n \"\"\"\n This function launch the construction of a term count array for hashtags terms.\n\n :return: list containing hashtags terms and its occurrences values.\n \"\"\"\n\n if self.tweets is None:\n return \"error, user tweets have not been searched yet.\"\n\n if self.hashtags_terms is None:\n self.hashtags_terms = generate_occurences_dictionay([l for l in self.tweets['hashtags'] if l])\n return self.hashtags_terms\n\n # Returns a list of dictionaries with shape {text: term, count: 12}\n def get_tweets_terms_count(self):\n\n \"\"\"\n This function launch the construction of a term count array for tweets terms.\n\n :return: list containing tweets terms and its occurrences values.\n \"\"\"\n if self.tweets is None:\n return \"error, user tweets have not been searched yet.\"\n\n if self.tweets_terms is None:\n self.tweets_terms = generate_occurences_dictionay([l for l in self.tweets['preprocessed_tokens'] if l])\n return self.tweets_terms\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
EstebM/QRevPy
[ "3f788d1f84dfd075bbf25cf8d4b47943f9bfe682", "3f788d1f84dfd075bbf25cf8d4b47943f9bfe682" ]
[ "UI/CrossSection.py", "UI/BeamDepths.py" ]
[ "import numpy as np\r\nfrom PyQt5 import QtCore\r\n\r\n\r\nclass CrossSection(object):\r\n \"\"\"Class to generate final cross sections using the user settings.\r\n What cross sections are plotted are controlled by the user through checkboxes.\r\n\r\n Attributes\r\n ----------\r\n canvas: MplCanvas\r\n Object of MplCanvas a FigureCanvas\r\n fig: Object\r\n Figure object of the canvas\r\n units: dict\r\n Dictionary of units conversions\r\n cb_beam_cs: QCheckBox\r\n Checkbox to plot cross section based on 4 beam average\r\n cb_vert_cs: QCheckBox\r\n Checkbox to plot cross section based on vertical beam\r\n cb_ds_cs: QCheckBox\r\n Checkbox to plot cross section based on depth sounder\r\n cb_final_cs: QCheckBox\r\n Checkbox to plot final cross section based on user selections\r\n beam_cs: list\r\n Plot reference for 4 beam average cross section\r\n vb_cs: list\r\n Plot reference for vertical beam cross section\r\n ds_cs: list\r\n Plot reference for depth sounder cross section\r\n final_cs: list\r\n Plot reference for final cross section\r\n hover_connection: int\r\n Index to data cursor connection\r\n annot: Annotation\r\n Annotation object for data cursor\r\n \"\"\"\r\n\r\n def __init__(self, canvas):\r\n \"\"\"Initialize object using the specified canvas.\r\n\r\n Parameters\r\n ----------\r\n canvas: MplCanvas\r\n Object of MplCanvas\r\n \"\"\"\r\n\r\n # Initialize attributes\r\n self.canvas = canvas\r\n self.fig = canvas.fig\r\n self.units = None\r\n self.cb_beam_cs = None\r\n self.cb_vert_cs = None\r\n self.cb_ds_cs = None\r\n self.cb_final_cs = None\r\n self.beam_cs = None\r\n self.vb_cs = None\r\n self.ds_cs = None\r\n self.final_cs = None\r\n self.hover_connection = None\r\n self.annot = None\r\n\r\n def create(self, transect, units, cb_beam_cs=None, cb_vert_cs=None, cb_ds_cs=None, cb_final_cs=None):\r\n\r\n \"\"\"Create the axes and lines for the figure.\r\n\r\n Parameters\r\n ----------\r\n transect: TransectData\r\n Object of TransectData containing boat speeds to be plotted\r\n units: dict\r\n Dictionary of units conversions\r\n cb_beam_cs: QCheckBox\r\n Checkbox to plot cross section based on 4 beam average\r\n cb_vert_cs: QCheckBox\r\n Checkbox to plot cross section based on vertical beam\r\n cb_ds_cs: QCheckBox\r\n Checkbox to plot cross section based on depth sounder\r\n cb_final_cs: QCheckBox\r\n Checkbox to plot final cross section based on user selections\r\n \"\"\"\r\n\r\n # Assign and save parameters\r\n self.cb_beam_cs = cb_beam_cs\r\n self.cb_vert_cs = cb_vert_cs\r\n self.cb_ds_cs = cb_ds_cs\r\n self.cb_final_cs = cb_final_cs\r\n\r\n # Clear the plot\r\n self.fig.clear()\r\n\r\n # Configure axis\r\n self.fig.ax = self.fig.add_subplot(1, 1, 1)\r\n\r\n # Set margins and padding for figure\r\n self.fig.subplots_adjust(left=0.08, bottom=0.2, right=0.98, top=0.98, wspace=0.1, hspace=0)\r\n self.fig.ax.set_xlabel(self.canvas.tr('Length' + units['label_L']))\r\n self.fig.ax.set_ylabel(self.canvas.tr('Depth' + units['label_L']))\r\n self.fig.ax.grid()\r\n self.fig.ax.xaxis.label.set_fontsize(12)\r\n self.fig.ax.yaxis.label.set_fontsize(12)\r\n self.fig.ax.tick_params(axis='both', direction='in', bottom=True, top=True, left=True, right=True)\r\n\r\n # Initialize max trackers\r\n max_vb = np.nan\r\n max_ds = np.nan\r\n\r\n # Compute x axis data\r\n boat_track = transect.boat_vel.compute_boat_track(transect=transect)\r\n x = boat_track['distance_m']\r\n if not np.alltrue(np.isnan(boat_track['track_x_m'])):\r\n depth_selected = getattr(transect.depths, transect.depths.selected)\r\n beam_depths = depth_selected.depth_processed_m\r\n\r\n # Plot Final\r\n self.final_cs = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n 'k-')\r\n max_final = np.nanmax(beam_depths)\r\n\r\n # Plot 4 beam average\r\n beam_depths = transect.depths.bt_depths.depth_processed_m\r\n self.beam_cs = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n 'r-')\r\n max_beam = np.nanmax(beam_depths)\r\n\r\n # Plot vertical beam\r\n if transect.depths.vb_depths is not None:\r\n beam_depths = transect.depths.vb_depths.depth_processed_m\r\n self.vb_cs = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n color='#aa00ff',\r\n linestyle='-')\r\n max_vb = np.nanmax(beam_depths)\r\n\r\n # Plot depth sounder\r\n if transect.depths.ds_depths is not None:\r\n beam_depths = transect.depths.ds_depths.depth_processed_m\r\n self.ds_cs = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n color='#00aaff',\r\n linestyle='-')\r\n max_ds = np.nanmax(beam_depths)\r\n\r\n # Based on checkbox control make cross sections visible or not\r\n if cb_beam_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam_cs:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam_cs:\r\n item.set_visible(False)\r\n\r\n if cb_vert_cs.isEnabled():\r\n if cb_vert_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.vb_cs:\r\n item.set_visible(True)\r\n elif self.vb_cs is not None:\r\n for item in self.vb_cs:\r\n item.set_visible(False)\r\n\r\n if cb_ds_cs.isEnabled():\r\n if cb_ds_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.ds_cs:\r\n item.set_visible(True)\r\n elif self.ds_cs is not None:\r\n for item in self.ds_cs:\r\n item.set_visible(False)\r\n\r\n # Set axis limits\r\n max_y = np.nanmax([max_beam, max_vb, max_ds, max_final]) * 1.1\r\n self.fig.ax.invert_yaxis()\r\n self.fig.ax.set_ylim(bottom=np.ceil(max_y * units['L']), top=0)\r\n self.fig.ax.set_xlim(left=-1 * x[-1] * 0.02 * units['L'], right=x[-1] * 1.02 * units['L'])\r\n\r\n if transect.start_edge == 'Right':\r\n self.fig.ax.invert_xaxis()\r\n self.fig.ax.set_xlim(right=-1 * x[-1] * 0.02 * units['L'], left=x[-1] * 1.02 * units['L'])\r\n\r\n # Initialize annotation for data cursor\r\n self.annot = self.fig.ax.annotate(\"\", xy=(0, 0), xytext=(-20, 20), textcoords=\"offset points\",\r\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\r\n arrowprops=dict(arrowstyle=\"->\"))\r\n\r\n self.annot.set_visible(False)\r\n\r\n self.canvas.draw()\r\n\r\n def change(self):\r\n \"\"\"Changes the visibility of the available beams based on user input via checkboxes.\r\n \"\"\"\r\n\r\n # Set visibility of beams based on user input\r\n if self.cb_beam_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam_cs:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam_cs:\r\n item.set_visible(False)\r\n\r\n if self.cb_vert_cs.isEnabled():\r\n if self.cb_vert_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.vb_cs:\r\n item.set_visible(True)\r\n else:\r\n for item in self.vb_cs:\r\n item.set_visible(False)\r\n\r\n if self.cb_ds_cs.isEnabled():\r\n if self.cb_ds_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.ds_cs:\r\n item.set_visible(True)\r\n else:\r\n for item in self.ds_cs:\r\n item.set_visible(False)\r\n\r\n if self.cb_final_cs.checkState() == QtCore.Qt.Checked:\r\n for item in self.final_cs:\r\n item.set_visible(True)\r\n else:\r\n for item in self.final_cs:\r\n item.set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()\r\n\r\n def update_annot(self, ind, plt_ref, ref_label):\r\n \"\"\"Updates the location and text and makes visible the previously initialized and hidden annotation.\r\n\r\n Parameters\r\n ----------\r\n ind: dict\r\n Contains data selected.\r\n plt_ref: Line2D\r\n Reference containing plotted data\r\n ref_label: str\r\n Label used to ID data type in annotation\r\n \"\"\"\r\n\r\n # Get selected data coordinates\r\n pos = plt_ref._xy[ind[\"ind\"][0]]\r\n\r\n # Shift annotation box left or right depending on which half of the axis the pos x is located and the\r\n # direction of x increasing.\r\n if plt_ref.axes.viewLim.intervalx[0] < plt_ref.axes.viewLim.intervalx[1]:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -20\r\n else:\r\n self.annot._x = -80\r\n else:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -80\r\n else:\r\n self.annot._x = -20\r\n\r\n # Shift annotation box up or down depending on which half of the axis the pos y is located and the\r\n # direction of y increasing.\r\n if plt_ref.axes.viewLim.intervaly[0] < plt_ref.axes.viewLim.intervaly[1]:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = -40\r\n else:\r\n self.annot._y = 20\r\n else:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = 20\r\n else:\r\n self.annot._y = -40\r\n self.annot.xy = pos\r\n\r\n # Format and display text\r\n text = 'x: {:.2f}, {}: {:.2f}'.format(pos[0], ref_label, pos[1])\r\n self.annot.set_text(text)\r\n\r\n def hover(self, event):\r\n \"\"\"Determines if the user has selected a location with data and makes\r\n annotation visible and calls method to update the text of the annotation. If the\r\n location is not valid the existing annotation is hidden.\r\n\r\n Parameters\r\n ----------\r\n event: MouseEvent\r\n Triggered when mouse button is pressed.\r\n \"\"\"\r\n\r\n # Set annotation to visible\r\n vis = self.annot.get_visible()\r\n\r\n # Determine if mouse location references a data point in the plot and update the annotation.\r\n if event.inaxes == self.fig.ax:\r\n cont_final = False\r\n cont_vb = False\r\n cont_ds = False\r\n cont_4b = False\r\n ind_final = None\r\n ind_vb = None\r\n ind_ds = None\r\n ind_4b = None\r\n\r\n if self.final_cs is not None:\r\n cont_final, ind_final = self.final_cs[0].contains(event)\r\n if self.vb_cs is not None:\r\n cont_vb, ind_vb = self.vb_cs[0].contains(event)\r\n if self.ds_cs is not None:\r\n cont_ds, ind_ds = self.ds_cs[0].contains(event)\r\n if self.beam_cs is not None:\r\n cont_4b, ind_4b = self.beam_cs[0].contains(event)\r\n\r\n if cont_final and self.final_cs[0].get_visible():\r\n self.update_annot(ind_final, self.final_cs[0], 'Final')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_vb and self.vb_cs[0].get_visible():\r\n self.update_annot(ind_vb, self.vb_cs[0], 'VB')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_ds and self.ds_cs[0].get_visible():\r\n self.update_annot(ind_ds, self.ds_cs[0], 'DS')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_4b and self.beam_cs[0].get_visible():\r\n self.update_annot(ind_4b, self.beam_cs[0], 'DS')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n else:\r\n # If the cursor location is not associated with the plotted data hide the annotation.\r\n if vis:\r\n self.annot.set_visible(False)\r\n self.canvas.draw_idle()\r\n\r\n def set_hover_connection(self, setting):\r\n \"\"\"Turns the connection to the mouse event on or off.\r\n\r\n Parameters\r\n ----------\r\n setting: bool\r\n Boolean to specify whether the connection for the mouse event is active or not.\r\n \"\"\"\r\n if setting and self.hover_connection is None:\r\n # self.hover_connection = self.canvas.mpl_connect(\"motion_notify_event\", self.hover)\r\n self.hover_connection = self.canvas.mpl_connect('button_press_event', self.hover)\r\n elif not setting:\r\n self.canvas.mpl_disconnect(self.hover_connection)\r\n self.hover_connection = None\r\n self.annot.set_visible(False)\r\n self.canvas.draw_idle()\r\n", "import numpy as np\r\nfrom PyQt5 import QtCore\r\n\r\n\r\nclass BeamDepths(object):\r\n \"\"\"Class to generate cross section using the depths of each beam available.\r\n What beams are plotted are controlled by the user through checkboxes.\r\n\r\n Attributes\r\n ----------\r\n canvas: MplCanvas\r\n Object of MplCanvas a FigureCanvas\r\n fig: Object\r\n Figure object of the canvas\r\n units: dict\r\n Dictionary of units conversions\r\n cb_beam1: QCheckBox\r\n Checkbox to plot beam 1\r\n cb_beam2: QCheckBox\r\n Checkbox to plot beam 2\r\n cb_beam3: QCheckBox\r\n Checkbox to plot beam 3\r\n cb_beam4: QCheckBox\r\n Checkbox to plot beam 4\r\n cb_vert: QCheckBox\r\n Checkbox to plot vertical beam\r\n cb_ds: QCheckBox\r\n Checkbox to plot depth sounder\r\n beam1: list\r\n Plot reference for beam 1\r\n beam2: list\r\n Plot reference for beam 2\r\n beam3: list\r\n Plot reference for beam 3\r\n beam4: list\r\n Plot reference for beam 4\r\n vb: list\r\n Plot reference for vertical beam\r\n ds: list\r\n Plot reference for depth sounder\r\n hover_connection: int\r\n Index to data cursor connection\r\n annot: Annotation\r\n Annotation for data cursor\r\n \"\"\"\r\n\r\n def __init__(self, canvas):\r\n \"\"\"Initialize object using the specified canvas.\r\n\r\n Parameters\r\n ----------\r\n canvas: MplCanvas\r\n Object of MplCanvas\r\n \"\"\"\r\n\r\n # Initialize attributes\r\n self.canvas = canvas\r\n self.fig = canvas.fig\r\n self.units = None\r\n self.cb_beam1 = None\r\n self.cb_beam2 = None\r\n self.cb_beam3 = None\r\n self.cb_beam4 = None\r\n self.cb_vert = None\r\n self.cb_ds = None\r\n self.beam1 = None\r\n self.beam2 = None\r\n self.beam3 = None\r\n self.beam4 = None\r\n self.vb = None\r\n self.ds = None\r\n self.hover_connection = None\r\n self.annot = None\r\n\r\n def create(self, transect, units, cb_beam1=None, cb_beam2=None, cb_beam3=None, cb_beam4=None,\r\n cb_vert=None, cb_ds=None):\r\n\r\n \"\"\"Create the axes and lines for the figure.\r\n\r\n Parameters\r\n ----------\r\n transect: TransectData\r\n Object of TransectData containing boat speeds to be plotted\r\n units: dict\r\n Dictionary of units conversions\r\n cb_beam1: QCheckBox\r\n Checkbox to plot beam 1\r\n cb_beam2: QCheckBox\r\n Checkbox to plot beam 2\r\n cb_beam3: QCheckBox\r\n Checkbox to plot beam 3\r\n cb_beam4: QCheckBox\r\n Checkbox to plot beam 4\r\n cb_vert: QCheckBox\r\n Checkbox to plot vertical beam\r\n cb_ds: QCheckBox\r\n Checkbox to plot depth sounder\r\n \"\"\"\r\n\r\n # Assign and save parameters\r\n self.cb_beam1 = cb_beam1\r\n self.cb_beam2 = cb_beam2\r\n self.cb_beam3 = cb_beam3\r\n self.cb_beam4 = cb_beam4\r\n self.cb_vert = cb_vert\r\n self.cb_ds = cb_ds\r\n\r\n # Clear the plot\r\n self.fig.clear()\r\n\r\n # Configure axis\r\n self.fig.ax = self.fig.add_subplot(1, 1, 1)\r\n\r\n # Set margins and padding for figure\r\n self.fig.subplots_adjust(left=0.08, bottom=0.2, right=0.98, top=0.98, wspace=0.1, hspace=0)\r\n\r\n # Configure axes\r\n self.fig.ax.set_xlabel(self.canvas.tr('Length' + units['label_L']))\r\n self.fig.ax.set_ylabel(self.canvas.tr('Depth' + units['label_L']))\r\n self.fig.ax.grid()\r\n self.fig.ax.xaxis.label.set_fontsize(12)\r\n self.fig.ax.yaxis.label.set_fontsize(12)\r\n self.fig.ax.tick_params(axis='both', direction='in', bottom=True, top=True, left=True, right=True)\r\n\r\n # Initialize max trackers\r\n max_vert = np.nan\r\n max_ds = np.nan\r\n\r\n # Compute x axis data\r\n boat_track = transect.boat_vel.compute_boat_track(transect=transect)\r\n\r\n # Check to make sure there is valib boat track data\r\n if not np.alltrue(np.isnan(boat_track['track_x_m'])):\r\n x = boat_track['distance_m']\r\n invalid_beams = np.logical_not(transect.depths.bt_depths.valid_beams)\r\n beam_depths = transect.depths.bt_depths.depth_beams_m\r\n\r\n # Plot beams\r\n self.beam1 = self.fig.ax.plot(x * units['L'],\r\n beam_depths[0, :] * units['L'],\r\n 'r-')\r\n self.beam1.append(self.fig.ax.plot(x[invalid_beams[0, :]] * units['L'],\r\n beam_depths[0, invalid_beams[0, :]] * units['L'],\r\n 'r', linestyle='',\r\n marker='$O$')[0])\r\n\r\n self.beam2 = self.fig.ax.plot(x * units['L'],\r\n beam_depths[1, :] * units['L'],\r\n color='#005500')\r\n self.beam2.append(self.fig.ax.plot(x[invalid_beams[1, :]] * units['L'],\r\n beam_depths[1, invalid_beams[1, :]] * units['L'],\r\n color='#005500',\r\n linestyle='',\r\n marker='$O$')[0])\r\n\r\n self.beam3 = self.fig.ax.plot(x * units['L'],\r\n beam_depths[2, :] * units['L'],\r\n 'b-')\r\n self.beam3.append(self.fig.ax.plot(x[invalid_beams[2, :]] * units['L'],\r\n beam_depths[2, invalid_beams[2, :]] * units['L'],\r\n 'b',\r\n linestyle='',\r\n marker='$O$')[0])\r\n\r\n self.beam4 = self.fig.ax.plot(x * units['L'],\r\n beam_depths[3, :] * units['L'],\r\n color='#aa5500',\r\n linestyle='-')\r\n self.beam4.append(self.fig.ax.plot(x[invalid_beams[3, :]] * units['L'],\r\n beam_depths[3, invalid_beams[3, :]] * units['L'],\r\n color='#aa5500',\r\n linestyle='',\r\n marker='$O$')[0])\r\n # Compute max depth from beams\r\n max_beams = np.nanmax(np.nanmax(transect.depths.bt_depths.depth_beams_m))\r\n\r\n # Based on checkbox control make beams visible\r\n if cb_beam1.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam1:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam1:\r\n item.set_visible(False)\r\n\r\n if cb_beam2.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam2:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam2:\r\n item.set_visible(False)\r\n\r\n if cb_beam3.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam3:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam3:\r\n item.set_visible(False)\r\n\r\n if cb_beam4.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam4:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam4:\r\n item.set_visible(False)\r\n\r\n # Plot vertical beam\r\n if transect.depths.vb_depths is not None:\r\n invalid_beams = np.logical_not(transect.depths.vb_depths.valid_beams[0, :])\r\n beam_depths = transect.depths.vb_depths.depth_beams_m[0, :]\r\n self.vb = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n color='#aa00ff',\r\n linestyle='-')\r\n self.vb.append(self.fig.ax.plot(x[invalid_beams] * units['L'],\r\n beam_depths[invalid_beams] * units['L'],\r\n color='#aa00ff',\r\n linestyle='',\r\n marker='$O$')[0])\r\n\r\n if cb_vert.checkState() == QtCore.Qt.Checked:\r\n for item in self.vb:\r\n item.set_visible(True)\r\n else:\r\n for item in self.vb:\r\n item.set_visible(False)\r\n\r\n max_vert = np.nanmax(beam_depths)\r\n\r\n # Plot depth sounder\r\n if transect.depths.ds_depths is not None:\r\n invalid_beams = np.logical_not(transect.depths.ds_depths.valid_beams[0, :])\r\n beam_depths = transect.depths.ds_depths.depth_beams_m[0, :]\r\n self.ds = self.fig.ax.plot(x * units['L'],\r\n beam_depths * units['L'],\r\n color='#00aaff')\r\n self.ds.append(self.fig.ax.plot(x[invalid_beams] * units['L'],\r\n beam_depths[invalid_beams] * units['L'],\r\n color='#00aaff',\r\n linestyle='',\r\n marker='$O$')[0])\r\n\r\n if cb_ds.checkState() == QtCore.Qt.Checked:\r\n for item in self.ds:\r\n item.set_visible(True)\r\n else:\r\n for item in self.ds:\r\n item.set_visible(False)\r\n\r\n max_ds = np.nanmax(beam_depths)\r\n\r\n # Set axis limits\r\n max_y = np.nanmax([max_beams, max_vert, max_ds]) * 1.1\r\n self.fig.ax.invert_yaxis()\r\n self.fig.ax.set_ylim(bottom=np.ceil(max_y * units['L']), top=0)\r\n self.fig.ax.set_xlim(left=-1 * x[-1] * 0.02 * units['L'], right=x[-1] * 1.02 * units['L'])\r\n\r\n # Plot all transects from left to right\r\n if transect.start_edge == 'Right':\r\n self.fig.ax.invert_xaxis()\r\n self.fig.ax.set_xlim(right=-1 * x[-1] * 0.02 * units['L'], left=x[-1] * 1.02 * units['L'])\r\n\r\n self.annot = self.fig.ax.annotate(\"\", xy=(0, 0), xytext=(-20, 20), textcoords=\"offset points\",\r\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\r\n arrowprops=dict(arrowstyle=\"->\"))\r\n\r\n self.annot.set_visible(False)\r\n\r\n self.canvas.draw()\r\n\r\n def change(self):\r\n \"\"\"Changes the visibility of the available beams based on user input via checkboxes.\r\n \"\"\"\r\n\r\n # Set visibility of beams based on user input\r\n if self.cb_beam1.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam1:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam1:\r\n item.set_visible(False)\r\n\r\n if self.cb_beam2.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam2:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam2:\r\n item.set_visible(False)\r\n\r\n if self.cb_beam3.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam3:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam3:\r\n item.set_visible(False)\r\n\r\n if self.cb_beam4.checkState() == QtCore.Qt.Checked:\r\n for item in self.beam4:\r\n item.set_visible(True)\r\n else:\r\n for item in self.beam4:\r\n item.set_visible(False)\r\n\r\n if self.cb_vert.isEnabled():\r\n if self.cb_vert.checkState() == QtCore.Qt.Checked:\r\n for item in self.vb:\r\n item.set_visible(True)\r\n else:\r\n for item in self.vb:\r\n item.set_visible(False)\r\n\r\n if self.cb_ds.isEnabled():\r\n if self.cb_ds.checkState() == QtCore.Qt.Checked:\r\n for item in self.ds:\r\n item.set_visible(True)\r\n else:\r\n for item in self.ds:\r\n item.set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()\r\n\r\n def update_annot(self, ind, plt_ref, ref_label):\r\n \"\"\"Creates annotation for data cursor event.\r\n\r\n Parameters\r\n ----------\r\n ind: dict\r\n Contains array of position data from mouse click.\r\n plt_ref: Line2D\r\n Line on graph that was clicked\r\n ref_label: str\r\n Label for line that was clicked\r\n\r\n \"\"\"\r\n\r\n pos = plt_ref._xy[ind[\"ind\"][0]]\r\n\r\n # Shift annotation box left or right depending on which half of the axis the pos x is located and the\r\n # direction of x increasing.\r\n if plt_ref.axes.viewLim.intervalx[0] < plt_ref.axes.viewLim.intervalx[1]:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -20\r\n else:\r\n self.annot._x = -80\r\n else:\r\n if pos[0] < (plt_ref.axes.viewLim.intervalx[0] + plt_ref.axes.viewLim.intervalx[1]) / 2:\r\n self.annot._x = -80\r\n else:\r\n self.annot._x = -20\r\n\r\n # Shift annotation box up or down depending on which half of the axis the pos y is located and the\r\n # direction of y increasing.\r\n if plt_ref.axes.viewLim.intervaly[0] < plt_ref.axes.viewLim.intervaly[1]:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = -40\r\n else:\r\n self.annot._y = 20\r\n else:\r\n if pos[1] > (plt_ref.axes.viewLim.intervaly[0] + plt_ref.axes.viewLim.intervaly[1]) / 2:\r\n self.annot._y = 20\r\n else:\r\n self.annot._y = -40\r\n\r\n # Create annotation box\r\n self.annot.xy = pos\r\n text = 'x: {:.2f}, {}: {:.2f}'.format(pos[0], ref_label, pos[1])\r\n self.annot.set_text(text)\r\n\r\n def hover(self, event):\r\n \"\"\"Handles data cursor events.\r\n\r\n Parameters\r\n ----------\r\n event: MouseEvent\r\n Results of mouse click\r\n \"\"\"\r\n\r\n vis = self.annot.get_visible()\r\n\r\n # Check to see if event is associated with the figure\r\n if event.inaxes == self.fig.ax:\r\n # Intialize variables\r\n cont_beam1 = False\r\n cont_beam2 = False\r\n cont_beam3 = False\r\n cont_beam4 = False\r\n cont_vb = False\r\n cont_ds = False\r\n ind_beam1 = None\r\n ind_beam2 = None\r\n ind_beam3 = None\r\n ind_beam4 = None\r\n ind_vb = None\r\n ind_ds = None\r\n\r\n # Identify beam selected\r\n if self.beam1 is not None:\r\n cont_beam1, ind_beam1 = self.beam1[0].contains(event)\r\n if self.beam2 is not None:\r\n cont_beam2, ind_beam2 = self.beam2[0].contains(event)\r\n if self.beam3 is not None:\r\n cont_beam3, ind_beam3 = self.beam3[0].contains(event)\r\n if self.beam4 is not None:\r\n cont_beam4, ind_beam4 = self.beam4[0].contains(event)\r\n if self.vb is not None:\r\n cont_vb, ind_vb = self.vb[0].contains(event)\r\n if self.ds is not None:\r\n cont_ds, ind_ds = self.ds[0].contains(event)\r\n\r\n # Display result\r\n if cont_beam1 and self.beam1[0].get_visible():\r\n self.update_annot(ind_beam1, self.beam1[0], 'B1')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_beam2 and self.beam2[0].get_visible():\r\n self.update_annot(ind_beam2, self.beam2[0], 'B2')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_beam3 and self.beam3[0].get_visible():\r\n self.update_annot(ind_beam3, self.beam3[0], 'B3')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n if cont_beam4 and self.beam4[0].get_visible():\r\n self.update_annot(ind_beam4, self.beam4[0], 'B4')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_vb and self.vb[0].get_visible():\r\n self.update_annot(ind_vb, self.vb[0], 'VB')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n elif cont_ds and self.ds[0].get_visible():\r\n self.update_annot(ind_ds, self.ds[0], 'VTG')\r\n self.annot.set_visible(True)\r\n self.canvas.draw_idle()\r\n else:\r\n if vis:\r\n self.annot.set_visible(False)\r\n self.canvas.draw_idle()\r\n\r\n def set_hover_connection(self, setting):\r\n \"\"\"Provides connection between data cursor and canvas.\r\n Parameters\r\n ----------\r\n setting: bool\r\n Identifies if the data cursor is selected.\r\n \"\"\"\r\n\r\n if setting and self.hover_connection is None:\r\n self.hover_connection = self.canvas.mpl_connect('button_press_event', self.hover)\r\n elif not setting:\r\n self.canvas.mpl_disconnect(self.hover_connection)\r\n self.hover_connection = None\r\n self.annot.set_visible(False)\r\n self.canvas.draw_idle()\r\n" ]
[ [ "numpy.nanmax", "numpy.ceil", "numpy.isnan" ], [ "numpy.isnan", "numpy.logical_not", "numpy.ceil", "numpy.nanmax" ] ]
Neuraxio/NeuraAxle
[ "93699871e475ef3b275498488b6243390e18596c" ]
[ "testing/steps/test_numpy_steps.py" ]
[ "\"\"\"\nTests for NumPy Steps\n========================================\n\n..\n Copyright 2019, Neuraxio Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n\nimport numpy as np\nfrom neuraxle.steps.numpy import (NumpyConcatenateInnerFeatures,\n NumpyFlattenDatum, NumpyShapePrinter,\n NumpyTranspose, NumpyFFT, NumpyRavel)\n\n\ndef test_flatten_datum():\n flat = NumpyFlattenDatum()\n data = np.random.random((10, 4, 5, 2)) # 4D array (could be ND with N>=2).\n expected_data = np.copy(data).reshape(10, 4 * 5 * 2) # 2D array.\n\n flat, received_data = flat.fit_transform(data)\n\n assert (received_data == expected_data).all()\n\n\ndef test_concat_features():\n concat = NumpyConcatenateInnerFeatures()\n # ND arrays\n data1 = np.random.random((10, 4, 5, 2))\n data2 = np.random.random((10, 4, 5, 10))\n expected_all_data = np.concatenate([data1, data2], axis=-1)\n\n concat, received_all_data = concat.fit_transform([data1, data2])\n\n assert tuple(received_all_data.shape) == tuple(expected_all_data.shape)\n assert (received_all_data == expected_all_data).all()\n\n\ndef test_numpy_transpose():\n tr = NumpyTranspose()\n data = np.random.random((10, 7))\n expected_data = np.copy(data).transpose()\n\n tr, received_data = tr.fit_transform(data)\n\n assert (received_data == expected_data).all()\n\n\ndef test_numpy_shape_printer():\n pr = NumpyShapePrinter()\n pr.fit_transform(np.ones((10, 11)))\n\n\ndef test_numpy_fft():\n fft = NumpyFFT()\n fft.fit_transform(np.ones((10, 11)))\n\n\ndef test_numpy_ravel():\n nr = NumpyRavel()\n nr, out = nr.fit_transform(np.ones((10, 11)))\n assert out.shape == (110,)\n" ]
[ [ "numpy.concatenate", "numpy.copy", "numpy.random.random", "numpy.ones" ] ]
biemann/rl-testbed-for-energyplus
[ "a01be4d12eda970b352729ff6cb4a3eea8ddee6a" ]
[ "gym_energyplus/envs/energyplus_model.py" ]
[ "# Copyright (c) IBM Corp. 2018. All Rights Reserved.\n# Project name: Reinforcement Learning Testbed for Power Consumption Optimization\n# This project is licensed under the MIT License, see LICENSE\n\nimport json\nimport math\nimport os\nimport sys\nimport time\nfrom abc import ABCMeta, abstractmethod\nfrom datetime import datetime, timedelta\nfrom glob import glob\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.widgets import Slider, Button\n\n\nclass EnergyPlusModel(metaclass=ABCMeta):\n\n def __init__(self, model_file, log_dir=None, elec_price_case_study=True, verbose=False):\n\n self.action_space = None\n self.observation_space = None\n self.log_dir = log_dir\n self.monitor_file = None\n self.model_basename = os.path.splitext(os.path.basename(model_file))[0]\n self.low_action = None\n self.high_action = None\n self.low_obs = None\n self.high_obs = None\n self.elec_price_case_study = elec_price_case_study\n self.setup_spaces()\n self.action = 0.5 * (self.action_space.low + self.action_space.high)\n self.action_prev = self.action\n self.raw_state = None\n self.verbose = verbose\n self.timestamp_csv = None\n self.sl_episode = None\n self.fig = None\n self.axprogress = None\n self.axslider = None\n\n\n # Progress data\n self.num_episodes = 0\n self.num_episodes_last = 0\n\n self.reward = None\n self.reward_mean = None\n\n def reset(self):\n pass\n\n # Parse date/time format from EnergyPlus and return datetime object with correction for 24:00 case\n @staticmethod\n def _parse_datetime(dstr):\n # ' MM/DD HH:MM:SS' or 'MM/DD HH:MM:SS'\n # Dirty hack\n if dstr[0] != ' ':\n dstr = ' ' + dstr\n # year = 2017\n year = 2013 # for CHICAGO_IL_USA TMY2-94846\n month = int(dstr[1:3])\n day = int(dstr[4:6])\n hour = int(dstr[8:10])\n minute = int(dstr[11:13])\n sec = 0\n msec = 0\n if hour == 24:\n hour = 0\n dt = datetime(year, month, day, hour, minute, sec, msec) + timedelta(days=1)\n else:\n dt = datetime(year, month, day, hour, minute, sec, msec)\n return dt\n\n # Convert list of date/time string to list of datetime objects\n def _convert_datetime24(self, dates):\n # ' MM/DD HH:MM:SS'\n dates_new = []\n for d in dates:\n # year = 2017\n # month = int(d[1:3])\n # day = int(d[4:6])\n # hour = int(d[8:10])\n # minute = int(d[11:13])\n # sec = 0\n # msec = 0\n # if hour == 24:\n # hour = 0\n # d_new = datetime(year, month, day, hour, minute, sec, msec) + dt.timedelta(days=1)\n # else:\n # d_new = datetime(year, month, day, hour, minute, sec, msec)\n # dates_new.append(d_new)\n dates_new.append(self._parse_datetime(d))\n return dates_new\n\n # Generate x_pos and x_labels\n def generate_x_pos_x_labels(self, dates):\n time_delta = self._parse_datetime(dates[1]) - self._parse_datetime(dates[0])\n x_pos = []\n x_labels = []\n for i, d in enumerate(dates):\n dt = self._parse_datetime(d) - time_delta\n if dt.hour == 0 and dt.minute == 0:\n x_pos.append(i)\n x_labels.append(dt.strftime('%m/%d'))\n return x_pos, x_labels\n\n def set_action(self, normalized_action):\n # In Stable Baselines, the action seems to be normalized to [-1.0, 1.0].\n # So it must be scaled back into action_space by the environment.\n\n self.action_prev = self.action\n self.action = np.clip(self.action, self.action_space.low, self.action_space.high)\n self.action = self.low_action + (normalized_action + 1.) * 0.5 * (self.high_action - self.low_action)\n\n # self.action_prev = self.action\n # self.action = self.action_space.low + (normalized_action + 1.) * 0.5 * (\n # self.action_space.high - self.action_space.low)\n # self.action = np.clip(self.action, self.action_space.low, self.action_space.high)\n\n\n @abstractmethod\n def setup_spaces(self):\n pass\n\n # Need to handle the case that raw_state is None\n @abstractmethod\n def set_raw_state(self, raw_state):\n pass\n\n def elec_get_state(self, elec_price):\n return self.elec_format_state(self.raw_state, elec_price)\n\n def get_state(self):\n return self.format_state(self.raw_state)\n\n @abstractmethod\n def compute_reward(self, observation, elec_price):\n pass\n\n @abstractmethod\n def elec_format_state(self, raw_state, elec_price):\n pass\n\n @abstractmethod\n def format_state(self, raw_state):\n pass\n\n # --------------------------------------------------\n # Plotting staffs follow\n # --------------------------------------------------\n def plot(self, log_dir='', csv_file='', **kwargs):\n if log_dir is not '':\n if not os.path.isdir(log_dir):\n print('energyplus_model.plot: {} is not a directory'.format(log_dir))\n return\n print('energyplus_plot.plot log={}'.format(log_dir))\n self.log_dir = log_dir\n self.monitor_file = self.log_dir + '/monitor.csv'\n self.show_progress()\n else:\n if not os.path.isfile(csv_file):\n print('energyplus_model.plot: {} is not a file'.format(csv_file))\n return\n print('energyplus_model.plot csv={}'.format(csv_file))\n self.read_episode(csv_file)\n plt.rcdefaults()\n plt.rcParams['font.size'] = 6\n plt.rcParams['lines.linewidth'] = 1.0\n plt.rcParams['legend.loc'] = 'lower right'\n self.fig = plt.figure(1, figsize=(16, 10))\n self.plot_episode(csv_file)\n plt.show()\n\n # Corresponds to the bottom graph (convergence):\n\n def show_progress(self):\n\n # Read progress file\n if not self.read_monitor_file():\n print('Progress data is missing')\n sys.exit(1)\n\n # Initialize graph (Parameters for all plots)\n plt.rcdefaults()\n plt.rcParams['font.size'] = 6\n plt.rcParams['lines.linewidth'] = 1.0\n plt.rcParams['legend.loc'] = 'lower right'\n\n self.fig = plt.figure(1, figsize=(16, 10))\n\n # Show widgets\n axcolor = 'lightgoldenrodyellow'\n self.axprogress = self.fig.add_axes([0.15, 0.10, 0.70, 0.15], facecolor=axcolor)\n self.axslider = self.fig.add_axes([0.15, 0.04, 0.70, 0.02], facecolor=axcolor)\n\n # The following, we don really care about\n axfirst = self.fig.add_axes([0.15, 0.01, 0.03, 0.02])\n axlast = self.fig.add_axes([0.82, 0.01, 0.03, 0.02])\n axprev = self.fig.add_axes([0.46, 0.01, 0.03, 0.02])\n axnext = self.fig.add_axes([0.51, 0.01, 0.03, 0.02])\n\n # Slider is drawn in plot_progress()\n\n # First/Last button\n self.button_first = Button(axfirst, 'First', color=axcolor, hovercolor='0.975')\n self.button_first.on_clicked(self.first_episode_num)\n self.button_last = Button(axlast, 'Last', color=axcolor, hovercolor='0.975')\n self.button_last.on_clicked(self.last_episode_num)\n\n # Next/Prev button\n self.button_prev = Button(axprev, 'Prev', color=axcolor, hovercolor='0.975')\n self.button_prev.on_clicked(self.prev_episode_num)\n self.button_next = Button(axnext, 'Next', color=axcolor, hovercolor='0.975')\n self.button_next.on_clicked(self.next_episode_num)\n\n # Timer (No idea what this is about)\n self.timer = self.fig.canvas.new_timer(interval=1000)\n self.timer.add_callback(self.check_update)\n self.timer.start()\n\n # Progress data (The graph below)\n self.axprogress.set_xmargin(0)\n self.axprogress.set_xlabel('Episodes')\n self.axprogress.set_ylabel('Reward')\n self.axprogress.grid(True)\n self.plot_progress()\n\n # Plot latest episode (the various graphs above)\n self.update_episode(self.num_episodes - 1)\n\n plt.show()\n\n def check_update(self):\n if self.read_monitor_file():\n self.plot_progress()\n\n def plot_progress(self):\n # Redraw all lines\n self.axprogress.lines = []\n self.axprogress.plot(self.reward, color='#1f77b4', label='Reward')\n self.axprogress.legend()\n # Redraw slider\n if self.sl_episode is None or int(round(self.sl_episode.val)) == self.num_episodes - 2:\n cur_ep = self.num_episodes - 1\n else:\n cur_ep = int(round(self.sl_episode.val))\n self.axslider.clear()\n self.sl_episode = Slider(self.axslider, 'Episode (0..{})'.format(self.num_episodes - 1), 0,\n self.num_episodes - 1, valinit=cur_ep, valfmt='%6.0f')\n self.sl_episode.on_changed(self.set_episode_num)\n\n def read_monitor_file(self):\n # For the very first call, Wait until monitor.csv is created\n if self.timestamp_csv is None:\n while not os.path.isfile(self.monitor_file):\n time.sleep(1)\n self.timestamp_csv = os.stat(\n self.monitor_file).st_mtime - 1 # '-1' is a hack to prevent losing the first set of data\n\n num_ep = 0\n ts = os.stat(self.monitor_file).st_mtime\n if ts > self.timestamp_csv:\n # Monitor file is updated.\n self.timestamp_csv = ts\n f = open(self.monitor_file)\n firstline = f.readline()\n assert firstline.startswith('#')\n metadata = json.loads(firstline[1:])\n assert metadata['env_id'] == \"EnergyPlus-v0\"\n assert set(metadata.keys()) == {'env_id', 't_start'}, \"Incorrect keys in monitor metadata\"\n df = pd.read_csv(f, index_col=None)\n assert set(df.keys()) == {'l', 't', 'r'}, \"Incorrect keys in monitor logline\"\n f.close()\n\n self.reward = []\n self.reward_mean = []\n self.episode_dirs = []\n self.num_episodes = 0\n for rew, len, time_ in zip(df['r'], df['l'], df['t']):\n self.reward.append(rew / len)\n self.reward_mean.append(rew / len)\n self.episode_dirs.append(self.log_dir + '/output/episode-{:08d}'.format(self.num_episodes))\n self.num_episodes += 1\n if self.num_episodes > self.num_episodes_last:\n self.num_episodes_last = self.num_episodes\n return True\n else:\n return False\n\n def update_episode(self, ep):\n self.plot_episode(ep)\n\n def set_episode_num(self, val):\n ep = int(round(self.sl_episode.val))\n self.update_episode(ep)\n\n def first_episode_num(self, val):\n self.sl_episode.set_val(0)\n\n def last_episode_num(self, val):\n self.sl_episode.set_val(self.num_episodes - 1)\n\n def prev_episode_num(self, val):\n ep = int(round(self.sl_episode.val))\n if ep > 0:\n ep -= 1\n self.sl_episode.set_val(ep)\n\n def next_episode_num(self, val):\n ep = int(round(self.sl_episode.val))\n if ep < self.num_episodes - 1:\n ep += 1\n self.sl_episode.set_val(ep)\n\n # Prints data statistics (average, minimum, maximum and standard deviation)\n\n def show_statistics(self, title, series):\n print('{:25} ave={:5,.2f}, min={:5,.2f}, max={:5,.2f}, std={:5,.2f}'.format(title, np.average(series),\n np.min(series), np.max(series),\n np.std(series)))\n\n def get_statistics(self, series):\n return np.average(series), np.min(series), np.max(series), np.std(series)\n\n # Prints the distribution of the temperatures, between 17-28 degrees for each decimal\n\n def show_distrib(self, title, series):\n dist = [0 for i in range(1000)]\n for v in series:\n idx = int(math.floor(v * 10))\n if idx >= 1000:\n idx = 999\n if idx < 0:\n idx = 0\n dist[idx] += 1\n print(title)\n print(' degree 0.0-0.9 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9')\n print(' -------------------------------------------------------------------------')\n for t in range(170, 280, 10):\n print(' {:4.1f}C {:5.1%} '.format(t / 10.0, sum(dist[t:(t + 10)]) / len(series)), end='')\n for tt in range(t, t + 10):\n print(' {:5.1%}'.format(dist[tt] / len(series)), end='')\n print('')\n\n def get_episode_list(self, log_dir='', csv_file=''):\n if (log_dir is not '' and csv_file is not '') or (log_dir is '' and csv_file is ''):\n print('Either one of log_dir or csv_file must be specified')\n quit()\n if log_dir is not '':\n if not os.path.isdir(log_dir):\n print('energyplus_model.dump: {} is not a directory'.format(log_dir))\n return\n print('energyplus_plot.dump: log={}'.format(log_dir))\n # self.log_dir = log_dir\n\n # Make a list of all episodes\n # Note: Somethimes csv file is missing in the episode directories\n # We accept gziped csv file also.\n csv_list = glob(log_dir + '/output/episode-????????/eplusout.csv') \\\n + glob(log_dir + '/output/episode-????????/eplusout.csv.gz')\n self.episode_dirs = list(set([os.path.dirname(i) for i in csv_list]))\n self.episode_dirs.sort()\n self.num_episodes = len(self.episode_dirs)\n else: # csv_file != ''\n self.episode_dirs = [os.path.dirname(csv_file)]\n self.num_episodes = len(self.episode_dirs)\n\n # Model dependent methods\n @abstractmethod\n def read_episode(self, ep):\n pass\n\n @abstractmethod\n def plot_episode(self, ep):\n pass\n\n @abstractmethod\n def dump_timesteps(self, log_dir='', csv_file='', **kwargs):\n pass\n\n @abstractmethod\n def dump_episodes(self, log_dir='', csv_file='', **kwargs):\n pass\n" ]
[ [ "pandas.read_csv", "numpy.clip", "matplotlib.widgets.Button", "numpy.min", "numpy.max", "numpy.std", "numpy.average", "matplotlib.pyplot.rcdefaults", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
anishjain18/Character-level-language-modeling
[ "deb544ba4ce726c1342fde98c0a2b9f7e13ea5d0" ]
[ "Character level language modeling/utils.py" ]
[ "import numpy as np\n\ndef softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\ndef smooth(loss, cur_loss):\n return loss * 0.999 + cur_loss * 0.001\n\ndef print_sample(sample_ix, ix_to_char):\n txt = ''.join(ix_to_char[ix] for ix in sample_ix)\n txt = txt[0].upper() + txt[1:] # capitalize first character \n print ('%s' % (txt, ), end='')\n \n\ndef get_sample(sample_ix, ix_to_char):\n txt = ''.join(ix_to_char[ix] for ix in sample_ix)\n txt = txt[0].upper() + txt[1:] # capitalize first character \n return txt\n\ndef get_initial_loss(vocab_size, seq_length):\n return -np.log(1.0/vocab_size)*seq_length\n\ndef softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\ndef initialize_parameters(n_a, n_x, n_y):\n \"\"\"\n Initialize parameters with small random values\n \n Returns:\n parameters -- python dictionary containing:\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n b -- Bias, numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n \"\"\"\n np.random.seed(1)\n Wax = np.random.randn(n_a, n_x)*0.01 # input to hidden\n Waa = np.random.randn(n_a, n_a)*0.01 # hidden to hidden\n Wya = np.random.randn(n_y, n_a)*0.01 # hidden to output\n b = np.zeros((n_a, 1)) # hidden bias\n by = np.zeros((n_y, 1)) # output bias\n \n parameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b,\"by\": by}\n \n return parameters\n\ndef rnn_step_forward(parameters, a_prev, x):\n \n Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']\n a_next = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b) # hidden state\n p_t = softmax(np.dot(Wya, a_next) + by) # unnormalized log probabilities for next chars # probabilities for next chars \n \n return a_next, p_t\n\ndef rnn_step_backward(dy, gradients, parameters, x, a, a_prev):\n \n gradients['dWya'] += np.dot(dy, a.T)\n gradients['dby'] += dy\n da = np.dot(parameters['Wya'].T, dy) + gradients['da_next'] # backprop into h\n daraw = (1 - a * a) * da # backprop through tanh nonlinearity\n gradients['db'] += daraw\n gradients['dWax'] += np.dot(daraw, x.T)\n gradients['dWaa'] += np.dot(daraw, a_prev.T)\n gradients['da_next'] = np.dot(parameters['Waa'].T, daraw)\n return gradients\n\ndef update_parameters(parameters, gradients, lr):\n\n parameters['Wax'] += -lr * gradients['dWax']\n parameters['Waa'] += -lr * gradients['dWaa']\n parameters['Wya'] += -lr * gradients['dWya']\n parameters['b'] += -lr * gradients['db']\n parameters['by'] += -lr * gradients['dby']\n return parameters\n\ndef rnn_forward(X, Y, a0, parameters, vocab_size = 27):\n \n # Initialize x, a and y_hat as empty dictionaries\n x, a, y_hat = {}, {}, {}\n \n a[-1] = np.copy(a0)\n \n # initialize your loss to 0\n loss = 0\n \n for t in range(len(X)):\n \n # Set x[t] to be the one-hot vector representation of the t'th character in X.\n # if X[t] == None, we just have x[t]=0. This is used to set the input for the first timestep to the zero vector. \n x[t] = np.zeros((vocab_size,1)) \n if (X[t] != None):\n x[t][X[t]] = 1\n \n # Run one step forward of the RNN\n a[t], y_hat[t] = rnn_step_forward(parameters, a[t-1], x[t])\n \n # Update the loss by substracting the cross-entropy term of this time-step from it.\n loss -= np.log(y_hat[t][Y[t],0])\n \n cache = (y_hat, a, x)\n \n return loss, cache\n\ndef rnn_backward(X, Y, parameters, cache):\n # Initialize gradients as an empty dictionary\n gradients = {}\n \n # Retrieve from cache and parameters\n (y_hat, a, x) = cache\n Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']\n \n # each one should be initialized to zeros of the same dimension as its corresponding parameter\n gradients['dWax'], gradients['dWaa'], gradients['dWya'] = np.zeros_like(Wax), np.zeros_like(Waa), np.zeros_like(Wya)\n gradients['db'], gradients['dby'] = np.zeros_like(b), np.zeros_like(by)\n gradients['da_next'] = np.zeros_like(a[0])\n \n ### START CODE HERE ###\n # Backpropagate through time\n for t in reversed(range(len(X))):\n dy = np.copy(y_hat[t])\n dy[Y[t]] -= 1\n gradients = rnn_step_backward(dy, gradients, parameters, x[t], a[t], a[t-1])\n ### END CODE HERE ###\n \n return gradients, a\n\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.random.seed", "numpy.max", "numpy.copy", "numpy.zeros_like", "numpy.random.randn", "numpy.zeros" ] ]
LoniQin/tensorflow
[ "b632883f3a009a4fb2b7b3d2108f7d66971e3785" ]
[ "tensorflow/python/keras/distribute/custom_training_loop_metrics_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for custom training loops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations as ds_combinations\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_combinations as combinations\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.keras.distribute import strategy_combinations\nfrom tensorflow.python.platform import test\n\n\nclass KerasMetricsTest(test.TestCase, parameterized.TestCase):\n\n @ds_combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies +\n strategy_combinations.multiworker_strategies,\n mode=[\"eager\"]\n ))\n def test_multiple_keras_metrics_experimental_run(self, distribution):\n with distribution.scope():\n loss_metric = metrics.Mean(\"loss\", dtype=np.float32)\n loss_metric_2 = metrics.Mean(\"loss_2\", dtype=np.float32)\n\n @def_function.function\n def train_step():\n def step_fn():\n loss = constant_op.constant(5.0, dtype=np.float32)\n loss_metric.update_state(loss)\n loss_metric_2.update_state(loss)\n\n distribution.run(step_fn)\n\n train_step()\n self.assertEqual(loss_metric.result().numpy(),\n loss_metric_2.result().numpy())\n self.assertEqual(loss_metric.result().numpy(), 5.0)\n\n @ds_combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies+\n strategy_combinations.multiworker_strategies,\n mode=[\"eager\"]\n ))\n def test_update_keras_metric_declared_in_strategy_scope(self, distribution):\n with distribution.scope():\n metric = metrics.Mean(\"test_metric\", dtype=np.float32)\n\n dataset = dataset_ops.Dataset.range(10).batch(2)\n dataset = distribution.experimental_distribute_dataset(dataset)\n\n @def_function.function\n def step_fn(i):\n metric.update_state(i)\n\n for i in dataset:\n distribution.run(step_fn, args=(i,))\n\n # This should be the mean of integers 0-9 which has a sum of 45 and a count\n # of 10 resulting in mean of 4.5.\n self.assertEqual(metric.result().numpy(), 4.5)\n\n @ds_combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_update_keras_metric_outside_strategy_scope_cross_replica(\n self, distribution):\n metric = metrics.Mean(\"test_metric\", dtype=np.float32)\n\n with distribution.scope():\n for i in range(10):\n metric.update_state(i)\n\n # This should be the mean of integers 0-9 which has a sum of 45 and a count\n # of 10 resulting in mean of 4.5.\n self.assertEqual(metric.result().numpy(), 4.5)\n\n\nif __name__ == \"__main__\":\n multi_process_runner.test_main()\n" ]
[ [ "tensorflow.python.distribute.multi_process_runner.test_main", "tensorflow.python.keras.metrics.Mean", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.framework.test_combinations.combine", "tensorflow.python.framework.constant_op.constant" ] ]
maromaSamsa/mobile-semantic-segmentation-master
[ "2cb772f0f17b3c810eae0751dcf3776b0442bb9b" ]
[ "src/mylib/torch/functional.py" ]
[ "import torch\nimport torch.nn.functional as F\n\n\n@torch.jit.script\ndef calculate_distances(p0: torch.Tensor, p1: torch.Tensor) -> torch.Tensor:\n # ReLU prevents negative numbers in sqrt\n Dij = torch.sqrt(F.relu(torch.sum((p0 - p1) ** 2, -1)))\n return Dij\n\n\ndef calculate_torsions(p0: torch.Tensor, p1: torch.Tensor, p2: torch.Tensor, p3: torch.Tensor) -> torch.Tensor:\n b0 = -1.0 * (p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n if p0.dim() == 1:\n b1 /= b1.norm()\n else:\n b1 /= b1.norm(dim=1)[:, None]\n\n v = b0 - torch.sum(b0 * b1, dim=-1, keepdim=True) * b1\n w = b2 - torch.sum(b2 * b1, dim=-1, keepdim=True) * b1\n\n x = torch.sum(v * w, dim=-1)\n y = torch.sum(torch.cross(b1, v) * w, dim=-1)\n\n return torch.atan2(y, x)\n\n\n# %%\nif __name__ == '__main__':\n # %%\n coords = torch.tensor([[10.396, 18.691, 19.127],\n [9.902, 18.231, 20.266],\n [8.736, 17.274, 20.226],\n [7.471, 18.048, 19.846]])\n coords2 = torch.tensor([[7.471, 18.048, 19.846],\n [6.67, 17.583, 18.852],\n [5.494, 18.412, 18.503],\n [4.59, 18.735, 19.711]])\n\n print(calculate_torsions(*coords))\n print(calculate_torsions(*coords2))\n # %%\n # calculate_torsions(*coords[:, None, :])\n a = torch.cat((coords, coords2), 1).reshape(4, -1, 3)\n print(calculate_torsions(*a))\n" ]
[ [ "torch.cat", "torch.sum", "torch.tensor", "torch.atan2", "torch.cross" ] ]
brandontrabucco/bvn
[ "ea8c3d3de590f4127505e33f41bb0da8bdfc27be" ]
[ "bvn/__init__.py" ]
[ "from bvn.bvn_step import bvn_step\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\nTOLERANCE = np.finfo(np.float).eps * 10.\r\n\r\n\r\n@tf.function(input_signature=[\r\n tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),\r\n tf.TensorSpec(shape=None, dtype=tf.int32)])\r\ndef bvn(x, max_iterations):\r\n \"\"\"Returns the Berkhoff-Von-Neumann decomposition of a permutation matrix\r\n using the greedy birkhoff heuristic\r\n\r\n Arguments:\r\n\r\n x: tf.Tensor\r\n a soft permutation matrix in the Birkhoff-Polytope whose shape is\r\n like [batch_dim, sequence_len, sequence_len]\r\n max_iterations: int\r\n the maximum number of matrices to compose to reconstruct\r\n the doubly stochastic matrix x\r\n\r\n Returns:\r\n\r\n permutations: tf.Tensor\r\n a tensor containing the Berkhoff-Von-Neumann perumtation matrices\r\n found using the Berkhoff-Von-Neumann decomposition\r\n shapes like [batch_dim, num_permutations, sequence_len, sequence_len]\r\n coefficients: tf.Tensor\r\n a tensor containing the Berkhoff-Von-Neumann coefficients\r\n found using the Berkhoff-Von-Neumann decomposition\r\n shapes like [batch_dim, num_permutations]\"\"\"\r\n b, n = tf.shape(x)[0], tf.cast(tf.shape(x)[2], tf.float32)\r\n x = x * n\r\n\r\n # keep track of a sequence of all permutations and coefficients\r\n coefficients = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\r\n permutations = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\r\n\r\n j = tf.constant(-1)\r\n d = tf.reduce_all(tf.equal(x, 0), axis=[1, 2])\r\n\r\n # all permutations with coefficient 0 are set to the identity matrix\r\n eye_matrix = tf.eye(tf.shape(x)[2], batch_shape=[b])\r\n\r\n while tf.logical_and(tf.logical_not(\r\n tf.reduce_all(d)), tf.less(j + 1, max_iterations)):\r\n j = j + 1\r\n\r\n # compute the permutation matrix whose coefficient is maximum\r\n # we are done if the coefficient is zero\r\n p, c = bvn_step(x)\r\n d = tf.logical_or(d, tf.equal(c, tf.zeros_like(c)))\r\n\r\n # when we are done set the permutation to the identity matrix and\r\n # the coefficient to zero\r\n p = tf.where(d[:, tf.newaxis, tf.newaxis], eye_matrix, p)\r\n c = tf.where(d, tf.zeros_like(c), c)\r\n\r\n # iteratively subtract from the source matrix x until that matrix\r\n # is approximately zero everywhere\r\n x = x - c[:, tf.newaxis, tf.newaxis] * p\r\n x = tf.where(tf.less(tf.abs(x), TOLERANCE), tf.zeros_like(x), x)\r\n d = tf.logical_or(d, tf.reduce_all(tf.equal(x, 0), axis=[1, 2]))\r\n\r\n permutations = permutations.write(j, p)\r\n coefficients = coefficients.write(j, c)\r\n\r\n # the num_permutations axis is first and needs to be transposed\r\n return (tf.transpose(permutations.stack(), [1, 0, 2, 3]),\r\n tf.transpose(coefficients.stack(), [1, 0]) / n)\r\n" ]
[ [ "tensorflow.constant", "tensorflow.less", "tensorflow.TensorArray", "tensorflow.shape", "tensorflow.equal", "numpy.finfo", "tensorflow.zeros_like", "tensorflow.where", "tensorflow.reduce_all", "tensorflow.TensorSpec", "tensorflow.abs" ] ]
juexinwang/scGNN
[ "09ac50ba0a2bbf87535613a2d284872074804d02" ]
[ "Preprocessing_benchmark.py" ]
[ "# For benchmark preprocessing usage:\n#\n# python Preprocessing_scFile.py --inputfile /home/wangjue/biodata/scData/allBench/9.Chung/T2000_UsingOriginalMatrix/T2000_expression.txt --outputfile /home/wangjue/biodata/scData/9.Chung.csv --cellcount 317 --genecount 2000 --split space --cellheadflag False\n# python Preprocessing_scFile.py --inputfile /home/wangjue/biodata/scData/allBench/11.Kolodziejczyk/T2000_UsingOriginalMatrix/T2000_expression.txt --outputfile /home/wangjue/biodata/scData/11.Kolodziejczyk.csv --cellcount 704 --genecount 2000 --split space --cellheadflag False\n# python Preprocessing_scFile.py --inputfile /home/wangjue/biodata/scData/allBench/12.Klein/T2000_UsingOriginalMatrix/T2000_expression.txt --outputfile /home/wangjue/biodata/scData/12.Klein.csv --cellcount 2717 --genecount 2000 --split space --cellheadflag False\n# python Preprocessing_scFile.py --inputfile /home/wangjue/biodata/scData/allBench/13.Zeisel/T2000_UsingOriginalMatrix/T2000_expression.txt --outputfile /home/wangjue/biodata/scData/13.Zeisel.csv --cellcount 3005 --genecount 2000 --split space --cellheadflag False\n\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--inputfile', type=str, default='/home/wangjue/biodata/scData/scRNA_And_scATAC_Files/Processed_Data/GeneSymbolMat.dic',\n help='inputfile name')\nparser.add_argument('--outputfile', type=str, default='/home/wangjue/biodata/scData/sci-CAR_LTMG.csv',\n help='outputfile name')\nparser.add_argument('--cellcount', type=int, default=317,\n help='total cell count')\nparser.add_argument('--genecount', type=int, default=2000,\n help='total gene count')\nparser.add_argument('--split', type=str, default='space',\n help='comma/blank')\nparser.add_argument('--cellheadflag', type=bool, default=False,\n help='True/False')\nargs = parser.parse_args()\n\ninputfile = args.inputfile\noutputfile = args.outputfile\ncellcount = args.cellcount\ngenecount = args.genecount\nsplitChar = ''\nif args.split == 'space':\n splitChar = ''\nelif args.split == 'comma':\n splitChar = ',' \n\ngeneNamesLine = ''\n\n#cell as the row, col as the gene\ncontentArray = [[0.0] * genecount for i in range(cellcount)]\ncontentArray = np.asarray(contentArray)\n\ncount = -1\nwith open(inputfile, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n if splitChar == '':\n words = line.split()\n else:\n words = line.split(splitChar)\n if count == -1:\n colcount = -1\n for word in words:\n colcount += 1\n else:\n colcount = -1\n for word in words:\n if colcount == -1:\n geneNamesLine = geneNamesLine + word + ','\n else:\n contentArray[colcount,count] = word\n colcount+=1\n count += 1\n f.close()\n\nwith open(outputfile, 'w') as fw:\n fw.write(geneNamesLine[:-1]+'\\n')\n for i in range(contentArray.shape[0]):\n tmpStr = ''\n for j in range(contentArray.shape[1]):\n tmpStr = tmpStr + str(contentArray[i][j])+','\n fw.write(tmpStr[:-1]+'\\n')\n fw.close()\n" ]
[ [ "numpy.asarray" ] ]
Imanflow/pandas
[ "c124e475d0c0798e3bf4c5e947ebac7bd2f1232c" ]
[ "pandas/core/strings.py" ]
[ "import numpy as np\n\nfrom pandas.compat import zip\nfrom pandas.core.dtypes.generic import ABCSeries, ABCIndex\nfrom pandas.core.dtypes.missing import isna, notna\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_categorical_dtype,\n is_object_dtype,\n is_string_like,\n is_list_like,\n is_scalar,\n is_integer,\n is_re)\n\nimport pandas.core.common as com\nfrom pandas.core.algorithms import take_1d\nimport pandas.compat as compat\nfrom pandas.core.base import NoNewAttributesMixin\nfrom pandas.util._decorators import Appender\nimport re\nimport pandas._libs.lib as lib\nimport pandas._libs.ops as libops\nimport warnings\nimport textwrap\nimport codecs\n\n_cpython_optimized_encoders = (\n \"utf-8\", \"utf8\", \"latin-1\", \"latin1\", \"iso-8859-1\", \"mbcs\", \"ascii\"\n)\n_cpython_optimized_decoders = _cpython_optimized_encoders + (\n \"utf-16\", \"utf-32\"\n)\n\n_shared_docs = dict()\n\n\ndef _get_array_list(arr, others):\n from pandas.core.series import Series\n\n if len(others) and isinstance(com._values_from_object(others)[0],\n (list, np.ndarray, Series)):\n arrays = [arr] + list(others)\n else:\n arrays = [arr, others]\n\n return [np.asarray(x, dtype=object) for x in arrays]\n\n\ndef str_cat(arr, others=None, sep=None, na_rep=None):\n \"\"\"\n Concatenate strings in the Series/Index with given separator.\n\n If `others` is specified, this function concatenates the Series/Index\n and elements of `others` element-wise.\n If `others` is not being passed then all values in the Series are\n concatenated in a single string with a given `sep`.\n\n Parameters\n ----------\n others : list-like, or list of list-likes, optional\n List-likes (or a list of them) of the same length as calling object.\n If None, returns str concatenating strings of the Series.\n sep : string or None, default None\n If None, concatenates without any separator.\n na_rep : string or None, default None\n If None, NA in the series are ignored.\n\n Returns\n -------\n concat : Series/Index of objects or str\n\n See Also\n --------\n split : Split each string in the Series/Index\n\n Examples\n --------\n When not passing `other`, all values are concatenated into a single\n string:\n\n >>> s = pd.Series(['a', 'b', np.nan, 'c'])\n >>> s.str.cat(sep=' ')\n 'a b c'\n\n By default, NA values in the Series are ignored. Using `na_rep`, they\n can be given a representation:\n\n >>> pd.Series(['a', 'b', np.nan, 'c']).str.cat(sep=' ', na_rep='?')\n 'a b ? c'\n\n If `others` is specified, corresponding values are\n concatenated with the separator. Result will be a Series of strings.\n\n >>> pd.Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')\n 0 a,A\n 1 b,B\n 2 c,C\n dtype: object\n\n Also, you can pass a list of list-likes.\n\n >>> pd.Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')\n 0 a,x,1\n 1 b,y,2\n dtype: object\n \"\"\"\n if sep is None:\n sep = ''\n\n if others is not None:\n arrays = _get_array_list(arr, others)\n\n n = _length_check(arrays)\n masks = np.array([isna(x) for x in arrays])\n cats = None\n\n if na_rep is None:\n na_mask = np.logical_or.reduce(masks, axis=0)\n\n result = np.empty(n, dtype=object)\n np.putmask(result, na_mask, np.nan)\n\n notmask = ~na_mask\n\n tuples = zip(*[x[notmask] for x in arrays])\n cats = [sep.join(tup) for tup in tuples]\n\n result[notmask] = cats\n else:\n for i, x in enumerate(arrays):\n x = np.where(masks[i], na_rep, x)\n if cats is None:\n cats = x\n else:\n cats = cats + sep + x\n\n result = cats\n\n return result\n else:\n arr = np.asarray(arr, dtype=object)\n mask = isna(arr)\n if na_rep is None and mask.any():\n if sep == '':\n na_rep = ''\n else:\n return sep.join(arr[notna(arr)])\n return sep.join(np.where(mask, na_rep, arr))\n\n\ndef _length_check(others):\n n = None\n for x in others:\n try:\n if n is None:\n n = len(x)\n elif len(x) != n:\n raise ValueError('All arrays must be same length')\n except TypeError:\n raise ValueError(\"Did you mean to supply a `sep` keyword?\")\n return n\n\n\ndef _na_map(f, arr, na_result=np.nan, dtype=object):\n # should really _check_ for NA\n return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)\n\n\ndef _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):\n if not len(arr):\n return np.ndarray(0, dtype=dtype)\n\n if isinstance(arr, ABCSeries):\n arr = arr.values\n if not isinstance(arr, np.ndarray):\n arr = np.asarray(arr, dtype=object)\n if na_mask:\n mask = isna(arr)\n try:\n convert = not all(mask)\n result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)\n except (TypeError, AttributeError) as e:\n # Reraise the exception if callable `f` got wrong number of args.\n # The user may want to be warned by this, instead of getting NaN\n if compat.PY2:\n p_err = r'takes (no|(exactly|at (least|most)) ?\\d+) arguments?'\n else:\n p_err = (r'((takes)|(missing)) (?(2)from \\d+ to )?\\d+ '\n r'(?(3)required )positional arguments?')\n\n if len(e.args) >= 1 and re.search(p_err, e.args[0]):\n raise e\n\n def g(x):\n try:\n return f(x)\n except (TypeError, AttributeError):\n return na_value\n\n return _map(g, arr, dtype=dtype)\n if na_value is not np.nan:\n np.putmask(result, mask, na_value)\n if result.dtype == object:\n result = lib.maybe_convert_objects(result)\n return result\n else:\n return lib.map_infer(arr, f)\n\n\ndef str_count(arr, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the Series/Index.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~pandas.Series`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n <https://docs.python.org/3/howto/regex.html#compilation-flags>`_.\n **kwargs\n For compatability with other string methods. Not used.\n\n Returns\n -------\n counts : Series or Index\n Same type as the calling object containing the integer counts.\n\n Notes\n -----\n Some characters need to be escaped when passing in `pat`.\n eg. ``'$'`` has a special meaning in regex and must be escaped when\n finding this literal character.\n\n See Also\n --------\n re : Standard library module for regular expressions.\n str.count : Standard library version, without regular expression support.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])\n >>> s.str.count('a')\n 0 0.0\n 1 0.0\n 2 2.0\n 3 2.0\n 4 NaN\n 5 0.0\n 6 1.0\n dtype: float64\n\n Escape ``'$'`` to find the literal dollar sign.\n\n >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])\n >>> s.str.count('\\$')\n 0 1\n 1 0\n 2 1\n 3 2\n 4 2\n 5 0\n dtype: int64\n\n This is also available on Index\n\n >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')\n Int64Index([0, 0, 2, 1], dtype='int64')\n \"\"\"\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return _na_map(f, arr, dtype=int)\n\n\ndef str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):\n \"\"\"\n Return boolean Series/``array`` whether given pattern/regex is\n contained in each string in the Series/Index.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n na : default NaN, fill value for missing values.\n regex : bool, default True\n If True use re.search, otherwise use Python in operator\n\n Returns\n -------\n contained : Series/array of boolean values\n\n See Also\n --------\n match : analogous, but stricter, relying on re.match instead of re.search\n\n \"\"\"\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0:\n warnings.warn(\"This pattern has match groups. To actually get the\"\n \" groups, use str.extract.\", UserWarning,\n stacklevel=3)\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n upper_pat = pat.upper()\n f = lambda x: upper_pat in x\n uppered = _na_map(lambda x: x.upper(), arr)\n return _na_map(f, uppered, na, dtype=bool)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_startswith(arr, pat, na=np.nan):\n \"\"\"\n Return boolean Series/``array`` indicating whether each string in the\n Series/Index starts with passed pattern. Equivalent to\n :meth:`str.startswith`.\n\n Parameters\n ----------\n pat : string\n Character sequence\n na : bool, default NaN\n\n Returns\n -------\n startswith : Series/array of boolean values\n \"\"\"\n f = lambda x: x.startswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_endswith(arr, pat, na=np.nan):\n \"\"\"\n Return boolean Series indicating whether each string in the\n Series/Index ends with passed pattern. Equivalent to\n :meth:`str.endswith`.\n\n Parameters\n ----------\n pat : string\n Character sequence\n na : bool, default NaN\n\n Returns\n -------\n endswith : Series/array of boolean values\n \"\"\"\n f = lambda x: x.endswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):\n r\"\"\"\n Replace occurrences of pattern/regex in the Series/Index with\n some other string. Equivalent to :meth:`str.replace` or\n :func:`re.sub`.\n\n Parameters\n ----------\n pat : string or compiled regex\n String can be a character sequence or regular expression.\n\n .. versionadded:: 0.20.0\n `pat` also accepts a compiled regex.\n\n repl : string or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n .. versionadded:: 0.20.0\n `repl` also accepts a callable.\n\n n : int, default -1 (all)\n Number of replacements to make from start\n case : boolean, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : boolean, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n replaced : Series/Index of objects\n\n Raises\n ------\n ValueError\n * if `regex` is False and `repl` is a callable or `pat` is a compiled\n regex\n * if `pat` is a compiled regex and `case` or `flags` is set\n\n Notes\n -----\n When `pat` is a compiled regex, all flags should be included in the\n compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled\n regex will raise an error.\n\n Examples\n --------\n When `pat` is a string and `regex` is True (the default), the given `pat`\n is compiled as a regex. When `repl` is a string, it replaces matching\n regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are\n left as is:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)\n 0 bao\n 1 baz\n 2 NaN\n dtype: object\n\n When `pat` is a string and `regex` is False, every `pat` is replaced with\n `repl` as with :meth:`str.replace`:\n\n >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)\n 0 bao\n 1 fuz\n 2 NaN\n dtype: object\n\n When `repl` is a callable, it is called on every `pat` using\n :func:`re.sub`. The callable should expect one positional argument\n (a regex object) and return a string.\n\n To get the idea:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\n 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo\n 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz\n 2 NaN\n dtype: object\n\n Reverse every lowercase alphabetic word:\n\n >>> repl = lambda m: m.group(0)[::-1]\n >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)\n 0 oof 123\n 1 rab zab\n 2 NaN\n dtype: object\n\n Using regex groups (extract second group and swap case):\n\n >>> pat = r\"(?P<one>\\w+) (?P<two>\\w+) (?P<three>\\w+)\"\n >>> repl = lambda m: m.group('two').swapcase()\n >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)\n 0 tWO\n 1 bAR\n dtype: object\n\n Using a compiled regex with flags\n\n >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')\n 0 foo\n 1 bar\n 2 NaN\n dtype: object\n\n \"\"\"\n\n # Check whether repl is valid (GH 13438, GH 15055)\n if not (is_string_like(repl) or callable(repl)):\n raise TypeError(\"repl must be a string or callable\")\n\n is_compiled_re = is_re(pat)\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\"case and flags cannot be set\"\n \" when pat is a compiled regex\")\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\")\n if callable(repl):\n raise ValueError(\"Cannot use a callable replacement when \"\n \"regex=False\")\n f = lambda x: x.replace(pat, repl, n)\n\n return _na_map(f, arr)\n\n\ndef str_repeat(arr, repeats):\n \"\"\"\n Duplicate each string in the Series/Index by indicated number\n of times.\n\n Parameters\n ----------\n repeats : int or array\n Same value for all (int) or different value per (array)\n\n Returns\n -------\n repeated : Series/Index of objects\n \"\"\"\n if is_scalar(repeats):\n\n def rep(x):\n try:\n return compat.binary_type.__mul__(x, repeats)\n except TypeError:\n return compat.text_type.__mul__(x, repeats)\n\n return _na_map(rep, arr)\n else:\n\n def rep(x, r):\n try:\n return compat.binary_type.__mul__(x, r)\n except TypeError:\n return compat.text_type.__mul__(x, r)\n\n repeats = np.asarray(repeats, dtype=object)\n result = libops.vec_binop(com._values_from_object(arr), repeats, rep)\n return result\n\n\ndef str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=None):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n na : default NaN, fill value for missing values.\n as_indexer\n .. deprecated:: 0.21.0\n\n Returns\n -------\n Series/array of boolean values\n\n See Also\n --------\n contains : analogous, but less strict, relying on re.search instead of\n re.match\n extract : extract matched groups\n\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if (as_indexer is False) and (regex.groups > 0):\n raise ValueError(\"as_indexer=False with a pattern with groups is no \"\n \"longer supported. Use '.str.extract(pat)' instead\")\n elif as_indexer is not None:\n # Previously, this keyword was used for changing the default but\n # deprecated behaviour. This keyword is now no longer needed.\n warnings.warn(\"'as_indexer' keyword was specified but is ignored \"\n \"(match now returns a boolean indexer by default), \"\n \"and will be removed in a future version.\",\n FutureWarning, stacklevel=3)\n\n dtype = bool\n f = lambda x: bool(regex.match(x))\n\n return _na_map(f, arr, na, dtype=dtype)\n\n\ndef _get_single_group_name(rx):\n try:\n return list(rx.groupindex.keys()).pop()\n except IndexError:\n return None\n\n\ndef _groups_or_na_fun(regex):\n \"\"\"Used in both extract_noexpand and extract_frame\"\"\"\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n empty_row = [np.nan] * regex.groups\n\n def f(x):\n if not isinstance(x, compat.string_types):\n return empty_row\n m = regex.search(x)\n if m:\n return [np.nan if item is None else item for item in m.groups()]\n else:\n return empty_row\n return f\n\n\ndef _str_extract_noexpand(arr, pat, flags=0):\n \"\"\"\n Find groups in each string in the Series using passed regular\n expression. This function is called from\n str_extract(expand=False), and can return Series, DataFrame, or\n Index.\n\n \"\"\"\n from pandas import DataFrame, Index\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n\n if regex.groups == 1:\n result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)\n name = _get_single_group_name(regex)\n else:\n if isinstance(arr, Index):\n raise ValueError(\"only one regex group is supported with Index\")\n name = None\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n if arr.empty:\n result = DataFrame(columns=columns, dtype=object)\n else:\n result = DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=arr.index,\n dtype=object)\n return result, name\n\n\ndef _str_extract_frame(arr, pat, flags=0):\n \"\"\"\n For each subject string in the Series, extract groups from the\n first match of regular expression pat. This function is called from\n str_extract(expand=True), and always returns a DataFrame.\n\n \"\"\"\n from pandas import DataFrame\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n\n if len(arr) == 0:\n return DataFrame(columns=columns, dtype=object)\n try:\n result_index = arr.index\n except AttributeError:\n result_index = None\n return DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=result_index,\n dtype=object)\n\n\ndef str_extract(arr, pat, flags=0, expand=True):\n r\"\"\"\n For each subject string in the Series, extract groups from the\n first match of regular expression pat.\n\n Parameters\n ----------\n pat : string\n Regular expression pattern with capturing groups\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n expand : bool, default True\n * If True, return DataFrame.\n * If False, return Series/Index/DataFrame.\n\n .. versionadded:: 0.18.0\n\n Returns\n -------\n DataFrame with one row for each subject string, and one column for\n each group. Any capture group names in regular expression pat will\n be used for column names; otherwise capture group numbers will be\n used. The dtype of each result column is always object, even when\n no match is found. If expand=False and pat has only one capture group,\n then return a Series (if subject is a Series) or Index (if subject\n is an Index).\n\n See Also\n --------\n extractall : returns all matches (not just the first match)\n\n Examples\n --------\n A pattern with two groups will return a DataFrame with two columns.\n Non-matches will be NaN.\n\n >>> s = Series(['a1', 'b2', 'c3'])\n >>> s.str.extract(r'([ab])(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern may contain optional groups.\n\n >>> s.str.extract(r'([ab])?(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN 3\n\n Named groups will become column names in the result.\n\n >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\\d)')\n letter digit\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern with one group will return a DataFrame with one column\n if expand=True.\n\n >>> s.str.extract(r'[ab](\\d)', expand=True)\n 0\n 0 1\n 1 2\n 2 NaN\n\n A pattern with one group will return a Series if expand=False.\n\n >>> s.str.extract(r'[ab](\\d)', expand=False)\n 0 1\n 1 2\n 2 NaN\n dtype: object\n\n \"\"\"\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n if expand:\n return _str_extract_frame(arr._orig, pat, flags=flags)\n else:\n result, name = _str_extract_noexpand(arr._data, pat, flags=flags)\n return arr._wrap_result(result, name=name, expand=expand)\n\n\ndef str_extractall(arr, pat, flags=0):\n r\"\"\"\n For each subject string in the Series, extract groups from all\n matches of regular expression pat. When each subject string in the\n Series has exactly one match, extractall(pat).xs(0, level='match')\n is the same as extract(pat).\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n pat : string\n Regular expression pattern with capturing groups\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n Returns\n -------\n A DataFrame with one row for each match, and one column for each\n group. Its rows have a MultiIndex with first levels that come from\n the subject Series. The last level is named 'match' and indicates\n the order in the subject. Any capture group names in regular\n expression pat will be used for column names; otherwise capture\n group numbers will be used.\n\n See Also\n --------\n extract : returns first match only (not all matches)\n\n Examples\n --------\n A pattern with one group will return a DataFrame with one column.\n Indices with no matches will not appear in the result.\n\n >>> s = Series([\"a1a2\", \"b1\", \"c1\"], index=[\"A\", \"B\", \"C\"])\n >>> s.str.extractall(r\"[ab](\\d)\")\n 0\n match\n A 0 1\n 1 2\n B 0 1\n\n Capture group names are used for column names of the result.\n\n >>> s.str.extractall(r\"[ab](?P<digit>\\d)\")\n digit\n match\n A 0 1\n 1 2\n B 0 1\n\n A pattern with two groups will return a DataFrame with two columns.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n\n Optional groups that do not match are NaN in the result.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])?(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n C 0 NaN 1\n\n \"\"\"\n\n regex = re.compile(pat, flags=flags)\n # the regex must contain capture groups.\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n\n if isinstance(arr, ABCIndex):\n arr = arr.to_series().reset_index(drop=True)\n\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n match_list = []\n index_list = []\n is_mi = arr.index.nlevels > 1\n\n for subject_key, subject in arr.iteritems():\n if isinstance(subject, compat.string_types):\n\n if not is_mi:\n subject_key = (subject_key, )\n\n for match_i, match_tuple in enumerate(regex.findall(subject)):\n if isinstance(match_tuple, compat.string_types):\n match_tuple = (match_tuple,)\n na_tuple = [np.NaN if group == \"\" else group\n for group in match_tuple]\n match_list.append(na_tuple)\n result_key = tuple(subject_key + (match_i, ))\n index_list.append(result_key)\n\n from pandas import MultiIndex\n index = MultiIndex.from_tuples(\n index_list, names=arr.index.names + [\"match\"])\n\n result = arr._constructor_expanddim(match_list, index=index,\n columns=columns)\n return result\n\n\ndef str_get_dummies(arr, sep='|'):\n \"\"\"\n Split each string in the Series by sep and return a frame of\n dummy/indicator variables.\n\n Parameters\n ----------\n sep : string, default \"|\"\n String to split on.\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n >>> Series(['a|b', 'a', 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 1 0 0\n 2 1 0 1\n\n >>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 0 0 0\n 2 1 0 1\n\n See Also\n --------\n pandas.get_dummies\n \"\"\"\n arr = arr.fillna('')\n try:\n arr = sep + arr + sep\n except TypeError:\n arr = sep + arr.astype(str) + sep\n\n tags = set()\n for ts in arr.str.split(sep):\n tags.update(ts)\n tags = sorted(tags - set([\"\"]))\n\n dummies = np.empty((len(arr), len(tags)), dtype=np.int64)\n\n for i, t in enumerate(tags):\n pat = sep + t + sep\n dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)\n return dummies, tags\n\n\ndef str_join(arr, sep):\n \"\"\"\n Join lists contained as elements in the Series/Index with\n passed delimiter. Equivalent to :meth:`str.join`.\n\n Parameters\n ----------\n sep : string\n Delimiter\n\n Returns\n -------\n joined : Series/Index of objects\n \"\"\"\n return _na_map(sep.join, arr)\n\n\ndef str_findall(arr, pat, flags=0):\n \"\"\"\n Find all occurrences of pattern or regular expression in the Series/Index.\n\n Equivalent to applying :func:`re.findall` to all the elements in the\n Series/Index.\n\n Parameters\n ----------\n pat : string\n Pattern or regular expression.\n flags : int, default 0\n ``re`` module flags, e.g. `re.IGNORECASE` (default is 0, which means\n no flags).\n\n Returns\n -------\n Series/Index of lists of strings\n All non-overlapping matches of pattern or regular expression in each\n string of this Series/Index.\n\n See Also\n --------\n count : Count occurrences of pattern or regular expression in each string\n of the Series/Index.\n extractall : For each string in the Series, extract groups from all matches\n of regular expression and return a DataFrame with one row for each\n match and one column for each group.\n re.findall : The equivalent ``re`` function to all non-overlapping matches\n of pattern or regular expression in string, as a list of strings.\n\n Examples\n --------\n\n >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])\n\n The search for the pattern 'Monkey' returns one match:\n\n >>> s.str.findall('Monkey')\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n On the other hand, the search for the pattern 'MONKEY' doesn't return any\n match:\n\n >>> s.str.findall('MONKEY')\n 0 []\n 1 []\n 2 []\n dtype: object\n\n Flags can be added to the pattern or regular expression. For instance,\n to find the pattern 'MONKEY' ignoring the case:\n\n >>> import re\n >>> s.str.findall('MONKEY', flags=re.IGNORECASE)\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n When the pattern matches more than one string in the Series, all matches\n are returned:\n\n >>> s.str.findall('on')\n 0 [on]\n 1 [on]\n 2 []\n dtype: object\n\n Regular expressions are supported too. For instance, the search for all the\n strings ending with the word 'on' is shown next:\n\n >>> s.str.findall('on$')\n 0 [on]\n 1 []\n 2 []\n dtype: object\n\n If the pattern is found more than once in the same string, then a list of\n multiple strings is returned:\n\n >>> s.str.findall('b')\n 0 []\n 1 []\n 2 [b, b]\n dtype: object\n\n \"\"\"\n regex = re.compile(pat, flags=flags)\n return _na_map(regex.findall, arr)\n\n\ndef str_find(arr, sub, start=0, end=None, side='left'):\n \"\"\"\n Return indexes in each strings in the Series/Index where the\n substring is fully contained between [start:end]. Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Specifies a starting side, equivalent to ``find`` or ``rfind``\n\n Returns\n -------\n found : Series/Index of integer values\n \"\"\"\n\n if not isinstance(sub, compat.string_types):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'find'\n elif side == 'right':\n method = 'rfind'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_index(arr, sub, start=0, end=None, side='left'):\n if not isinstance(sub, compat.string_types):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'index'\n elif side == 'right':\n method = 'rindex'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_pad(arr, width, side='left', fillchar=' '):\n \"\"\"\n Pad strings in the Series/Index with an additional character to\n specified side.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with spaces\n side : {'left', 'right', 'both'}, default 'left'\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n padded : Series/Index of objects\n \"\"\"\n\n if not isinstance(fillchar, compat.string_types):\n msg = 'fillchar must be a character, not {0}'\n raise TypeError(msg.format(type(fillchar).__name__))\n\n if len(fillchar) != 1:\n raise TypeError('fillchar must be a character, not str')\n\n if not is_integer(width):\n msg = 'width must be of integer type, not {0}'\n raise TypeError(msg.format(type(width).__name__))\n\n if side == 'left':\n f = lambda x: x.rjust(width, fillchar)\n elif side == 'right':\n f = lambda x: x.ljust(width, fillchar)\n elif side == 'both':\n f = lambda x: x.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n return _na_map(f, arr)\n\n\ndef str_split(arr, pat=None, n=None):\n \"\"\"\n Split strings around given separator/delimiter.\n\n Split each string in the caller's values by given\n pattern, propagating NaN values. Equivalent to :meth:`str.split`.\n\n Parameters\n ----------\n pat : str, optional\n String or regular expression to split on.\n If not specified, split on whitespace.\n n : int, default -1 (all)\n Limit number of splits in output.\n ``None``, 0 and -1 will be interpreted as return all splits.\n expand : bool, default False\n Expand the splitted strings into separate columns.\n\n * If ``True``, return DataFrame/MultiIndex expanding dimensionality.\n * If ``False``, return Series/Index, containing lists of strings.\n\n Returns\n -------\n Series, Index, DataFrame or MultiIndex\n Type matches caller unless ``expand=True`` (see Notes).\n\n Notes\n -----\n The handling of the `n` keyword depends on the number of found splits:\n\n - If found splits > `n`, make first `n` splits only\n - If found splits <= `n`, make all splits\n - If for a certain row the number of found splits < `n`,\n append `None` for padding up to `n` if ``expand=True``\n\n If using ``expand=True``, Series and Index callers return DataFrame and\n MultiIndex objects, respectively.\n\n See Also\n --------\n str.split : Standard library version of this method.\n Series.str.get_dummies : Split each string into dummy variables.\n Series.str.partition : Split string on a separator, returning\n the before, separator, and after components.\n\n Examples\n --------\n >>> s = pd.Series([\"this is good text\", \"but this is even better\"])\n\n By default, split will return an object of the same size\n having lists containing the split elements\n\n >>> s.str.split()\n 0 [this, is, good, text]\n 1 [but, this, is, even, better]\n dtype: object\n >>> s.str.split(\"random\")\n 0 [this is good text]\n 1 [but this is even better]\n dtype: object\n\n When using ``expand=True``, the split elements will expand out into\n separate columns.\n\n For Series object, output return type is DataFrame.\n\n >>> s.str.split(expand=True)\n 0 1 2 3 4\n 0 this is good text None\n 1 but this is even better\n >>> s.str.split(\" is \", expand=True)\n 0 1\n 0 this good text\n 1 but this even better\n\n For Index object, output return type is MultiIndex.\n\n >>> i = pd.Index([\"ba 100 001\", \"ba 101 002\", \"ba 102 003\"])\n >>> i.str.split(expand=True)\n MultiIndex(levels=[['ba'], ['100', '101', '102'], ['001', '002', '003']],\n labels=[[0, 0, 0], [0, 1, 2], [0, 1, 2]])\n\n Parameter `n` can be used to limit the number of splits in the output.\n\n >>> s.str.split(\"is\", n=1)\n 0 [th, is good text]\n 1 [but th, is even better]\n dtype: object\n >>> s.str.split(\"is\", n=1, expand=True)\n 0 1\n 0 th is good text\n 1 but th is even better\n\n If NaN is present, it is propagated throughout the columns\n during the split.\n\n >>> s = pd.Series([\"this is good text\", \"but this is even better\", np.nan])\n >>> s.str.split(n=3, expand=True)\n 0 1 2 3\n 0 this is good text\n 1 but this is even better\n 2 NaN NaN NaN NaN\n \"\"\"\n if pat is None:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if len(pat) == 1:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if n is None or n == -1:\n n = 0\n regex = re.compile(pat)\n f = lambda x: regex.split(x, maxsplit=n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_rsplit(arr, pat=None, n=None):\n \"\"\"\n Split each string in the Series/Index by the given delimiter\n string, starting at the end of the string and working to the front.\n Equivalent to :meth:`str.rsplit`.\n\n Parameters\n ----------\n pat : string, default None\n Separator to split on. If None, splits on whitespace\n n : int, default -1 (all)\n None, 0 and -1 will be interpreted as return all splits\n expand : bool, default False\n * If True, return DataFrame/MultiIndex expanding dimensionality.\n * If False, return Series/Index.\n\n Returns\n -------\n split : Series/Index or DataFrame/MultiIndex of objects\n \"\"\"\n if n is None or n == 0:\n n = -1\n f = lambda x: x.rsplit(pat, n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_slice(arr, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the Series/Index\n\n Parameters\n ----------\n start : int or None\n stop : int or None\n step : int or None\n\n Returns\n -------\n sliced : Series/Index of objects\n \"\"\"\n obj = slice(start, stop, step)\n f = lambda x: x[obj]\n return _na_map(f, arr)\n\n\ndef str_slice_replace(arr, start=None, stop=None, repl=None):\n \"\"\"\n Replace a slice of each string in the Series/Index with another\n string.\n\n Parameters\n ----------\n start : int or None\n stop : int or None\n repl : str or None\n String for replacement\n\n Returns\n -------\n replaced : Series/Index of objects\n \"\"\"\n if repl is None:\n repl = ''\n\n def f(x):\n if x[start:stop] == '':\n local_stop = start\n else:\n local_stop = stop\n y = ''\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return _na_map(f, arr)\n\n\ndef str_strip(arr, to_strip=None, side='both'):\n \"\"\"\n Strip whitespace (including newlines) from each string in the\n Series/Index.\n\n Parameters\n ----------\n to_strip : str or unicode\n side : {'left', 'right', 'both'}, default 'both'\n\n Returns\n -------\n stripped : Series/Index of objects\n \"\"\"\n if side == 'both':\n f = lambda x: x.strip(to_strip)\n elif side == 'left':\n f = lambda x: x.lstrip(to_strip)\n elif side == 'right':\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n return _na_map(f, arr)\n\n\ndef str_wrap(arr, width, **kwargs):\n r\"\"\"\n Wrap long strings in the Series/Index to be formatted in\n paragraphs with length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line-width\n expand_tabs : bool, optional\n If true, tab characters will be expanded to spaces (default: True)\n replace_whitespace : bool, optional\n If true, each whitespace character (as defined by string.whitespace)\n remaining after tab expansion will be replaced by a single space\n (default: True)\n drop_whitespace : bool, optional\n If true, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True)\n break_long_words : bool, optional\n If true, then words longer than width will be broken in order to ensure\n that no lines are longer than width. If it is false, long words will\n not be broken, and some lines may be longer than width. (default: True)\n break_on_hyphens : bool, optional\n If true, wrapping will occur preferably on whitespace and right after\n hyphens in compound words, as it is customary in English. If false,\n only whitespaces will be considered as potentially good places for line\n breaks, but you need to set break_long_words to false if you want truly\n insecable words. (default: True)\n\n Returns\n -------\n wrapped : Series/Index of objects\n\n Notes\n -----\n Internally, this method uses a :class:`textwrap.TextWrapper` instance with\n default settings. To achieve behavior matching R's stringr library str_wrap\n function, use the arguments:\n\n - expand_tabs = False\n - replace_whitespace = True\n - drop_whitespace = True\n - break_long_words = False\n - break_on_hyphens = False\n\n Examples\n --------\n\n >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])\n >>> s.str.wrap(12)\n 0 line to be\\nwrapped\n 1 another line\\nto be\\nwrapped\n \"\"\"\n kwargs['width'] = width\n\n tw = textwrap.TextWrapper(**kwargs)\n\n return _na_map(lambda s: '\\n'.join(tw.wrap(s)), arr)\n\n\ndef str_translate(arr, table, deletechars=None):\n \"\"\"\n Map all characters in the string through the given mapping table.\n Equivalent to standard :meth:`str.translate`. Note that the optional\n argument deletechars is only valid if you are using python 2. For python 3,\n character deletion should be specified via the table argument.\n\n Parameters\n ----------\n table : dict (python 3), str or None (python 2)\n In python 3, table is a mapping of Unicode ordinals to Unicode\n ordinals, strings, or None. Unmapped characters are left untouched.\n Characters mapped to None are deleted. :meth:`str.maketrans` is a\n helper function for making translation tables.\n In python 2, table is either a string of length 256 or None. If the\n table argument is None, no translation is applied and the operation\n simply removes the characters in deletechars. :func:`string.maketrans`\n is a helper function for making translation tables.\n deletechars : str, optional (python 2)\n A string of characters to delete. This argument is only valid\n in python 2.\n\n Returns\n -------\n translated : Series/Index of objects\n \"\"\"\n if deletechars is None:\n f = lambda x: x.translate(table)\n else:\n if compat.PY3:\n raise ValueError(\"deletechars is not a valid argument for \"\n \"str.translate in python 3. You should simply \"\n \"specify character deletions in the table \"\n \"argument\")\n f = lambda x: x.translate(table, deletechars)\n return _na_map(f, arr)\n\n\ndef str_get(arr, i):\n \"\"\"\n Extract element from lists, tuples, or strings in each element in the\n Series/Index.\n\n Parameters\n ----------\n i : int\n Integer index (location)\n\n Returns\n -------\n items : Series/Index of objects\n \"\"\"\n f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan\n return _na_map(f, arr)\n\n\ndef str_decode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in\n python3.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n decoded : Series/Index of objects\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n # CPython optimized implementation\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef str_encode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.encode`.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : Series/Index of objects\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n # CPython optimized implementation\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef _noarg_wrapper(f, docstring=None, **kargs):\n def wrapper(self):\n result = _na_map(f, self._data, **kargs)\n return self._wrap_result(result)\n\n wrapper.__name__ = f.__name__\n if docstring is not None:\n wrapper.__doc__ = docstring\n else:\n raise ValueError('Provide docstring')\n\n return wrapper\n\n\ndef _pat_wrapper(f, flags=False, na=False, **kwargs):\n def wrapper1(self, pat):\n result = f(self._data, pat)\n return self._wrap_result(result)\n\n def wrapper2(self, pat, flags=0, **kwargs):\n result = f(self._data, pat, flags=flags, **kwargs)\n return self._wrap_result(result)\n\n def wrapper3(self, pat, na=np.nan):\n result = f(self._data, pat, na=na)\n return self._wrap_result(result)\n\n wrapper = wrapper3 if na else wrapper2 if flags else wrapper1\n\n wrapper.__name__ = f.__name__\n if f.__doc__:\n wrapper.__doc__ = f.__doc__\n\n return wrapper\n\n\ndef copy(source):\n \"Copy a docstring from another source function (if present)\"\n\n def do_copy(target):\n if source.__doc__:\n target.__doc__ = source.__doc__\n return target\n\n return do_copy\n\n\nclass StringMethods(NoNewAttributesMixin):\n \"\"\"\n Vectorized string functions for Series and Index. NAs stay NA unless\n handled otherwise by a particular method. Patterned after Python's string\n methods, with some inspiration from R's stringr package.\n\n Examples\n --------\n >>> s.str.split('_')\n >>> s.str.replace('_', '')\n \"\"\"\n\n def __init__(self, data):\n self._validate(data)\n self._is_categorical = is_categorical_dtype(data)\n self._data = data.cat.categories if self._is_categorical else data\n # save orig to blow up categoricals to the right type\n self._orig = data\n self._freeze()\n\n @staticmethod\n def _validate(data):\n from pandas.core.index import Index\n\n if (isinstance(data, ABCSeries) and\n not ((is_categorical_dtype(data.dtype) and\n is_object_dtype(data.values.categories)) or\n (is_object_dtype(data.dtype)))):\n # it's neither a string series not a categorical series with\n # strings inside the categories.\n # this really should exclude all series with any non-string values\n # (instead of test for object dtype), but that isn't practical for\n # performance reasons until we have a str dtype (GH 9343)\n raise AttributeError(\"Can only use .str accessor with string \"\n \"values, which use np.object_ dtype in \"\n \"pandas\")\n elif isinstance(data, Index):\n # can't use ABCIndex to exclude non-str\n\n # see src/inference.pyx which can contain string values\n allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')\n if data.inferred_type not in allowed_types:\n message = (\"Can only use .str accessor with string values \"\n \"(i.e. inferred_type is 'string', 'unicode' or \"\n \"'mixed')\")\n raise AttributeError(message)\n if data.nlevels > 1:\n message = (\"Can only use .str accessor with Index, not \"\n \"MultiIndex\")\n raise AttributeError(message)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)\n\n def __iter__(self):\n i = 0\n g = self.get(i)\n while g.notna().any():\n yield g\n i += 1\n g = self.get(i)\n\n def _wrap_result(self, result, use_codes=True,\n name=None, expand=None):\n\n from pandas.core.index import Index, MultiIndex\n\n # for category, we do the stuff on the categories, so blow it up\n # to the full series again\n # But for some operations, we have to do the stuff on the full values,\n # so make it possible to skip this step as the method already did this\n # before the transformation...\n if use_codes and self._is_categorical:\n result = take_1d(result, self._orig.cat.codes)\n\n if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):\n return result\n assert result.ndim < 3\n\n if expand is None:\n # infer from ndim if expand is not specified\n expand = False if result.ndim == 1 else True\n\n elif expand is True and not isinstance(self._orig, Index):\n # required when expand=True is explicitly specified\n # not needed when inferred\n\n def cons_row(x):\n if is_list_like(x):\n return x\n else:\n return [x]\n\n result = [cons_row(x) for x in result]\n if result:\n # propagate nan values to match longest sequence (GH 18450)\n max_len = max(len(x) for x in result)\n result = [x * max_len if x[0] is np.nan else x for x in result]\n\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n\n if expand is False:\n # if expand is False, result should have the same name\n # as the original otherwise specified\n if name is None:\n name = getattr(result, 'name', None)\n if name is None:\n # do not use logical or, _orig may be a DataFrame\n # which has \"name\" column\n name = self._orig.name\n\n # Wait until we are sure result is a Series or Index before\n # checking attributes (GH 12180)\n if isinstance(self._orig, Index):\n # if result is a boolean np.array, return the np.array\n # instead of wrapping it into a boolean Index (GH 8875)\n if is_bool_dtype(result):\n return result\n\n if expand:\n result = list(result)\n out = MultiIndex.from_tuples(result, names=name)\n if out.nlevels == 1:\n # We had all tuples of length-one, which are\n # better represented as a regular Index.\n out = out.get_level_values(0)\n return out\n else:\n return Index(result, name=name)\n else:\n index = self._orig.index\n if expand:\n cons = self._orig._constructor_expanddim\n return cons(result, columns=name, index=index)\n else:\n # Must be a Series\n cons = self._orig._constructor\n return cons(result, name=name, index=index)\n\n @copy(str_cat)\n def cat(self, others=None, sep=None, na_rep=None):\n data = self._orig if self._is_categorical else self._data\n result = str_cat(data, others=others, sep=sep, na_rep=na_rep)\n return self._wrap_result(result, use_codes=(not self._is_categorical))\n\n @copy(str_split)\n def split(self, pat=None, n=-1, expand=False):\n result = str_split(self._data, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n @copy(str_rsplit)\n def rsplit(self, pat=None, n=-1, expand=False):\n result = str_rsplit(self._data, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n _shared_docs['str_partition'] = (\"\"\"\n Split the string at the %(side)s occurrence of `sep`, and return 3 elements\n containing the part before the separator, the separator itself,\n and the part after the separator.\n If the separator is not found, return %(return)s.\n\n Parameters\n ----------\n pat : string, default whitespace\n String to split on.\n expand : bool, default True\n * If True, return DataFrame/MultiIndex expanding dimensionality.\n * If False, return Series/Index.\n\n Returns\n -------\n split : DataFrame/MultiIndex or Series/Index of objects\n\n See Also\n --------\n %(also)s\n\n Examples\n --------\n\n >>> s = Series(['A_B_C', 'D_E_F', 'X'])\n 0 A_B_C\n 1 D_E_F\n 2 X\n dtype: object\n\n >>> s.str.partition('_')\n 0 1 2\n 0 A _ B_C\n 1 D _ E_F\n 2 X\n\n >>> s.str.rpartition('_')\n 0 1 2\n 0 A_B _ C\n 1 D_E _ F\n 2 X\n \"\"\")\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'first',\n 'return': '3 elements containing the string itself, followed by two '\n 'empty strings',\n 'also': 'rpartition : Split the string at the last occurrence of `sep`'\n })\n def partition(self, pat=' ', expand=True):\n f = lambda x: x.partition(pat)\n result = _na_map(f, self._data)\n return self._wrap_result(result, expand=expand)\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'last',\n 'return': '3 elements containing two empty strings, followed by the '\n 'string itself',\n 'also': 'partition : Split the string at the first occurrence of `sep`'\n })\n def rpartition(self, pat=' ', expand=True):\n f = lambda x: x.rpartition(pat)\n result = _na_map(f, self._data)\n return self._wrap_result(result, expand=expand)\n\n @copy(str_get)\n def get(self, i):\n result = str_get(self._data, i)\n return self._wrap_result(result)\n\n @copy(str_join)\n def join(self, sep):\n result = str_join(self._data, sep)\n return self._wrap_result(result)\n\n @copy(str_contains)\n def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):\n result = str_contains(self._data, pat, case=case, flags=flags, na=na,\n regex=regex)\n return self._wrap_result(result)\n\n @copy(str_match)\n def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None):\n result = str_match(self._data, pat, case=case, flags=flags, na=na,\n as_indexer=as_indexer)\n return self._wrap_result(result)\n\n @copy(str_replace)\n def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n result = str_replace(self._data, pat, repl, n=n, case=case,\n flags=flags, regex=regex)\n return self._wrap_result(result)\n\n @copy(str_repeat)\n def repeat(self, repeats):\n result = str_repeat(self._data, repeats)\n return self._wrap_result(result)\n\n @copy(str_pad)\n def pad(self, width, side='left', fillchar=' '):\n result = str_pad(self._data, width, side=side, fillchar=fillchar)\n return self._wrap_result(result)\n\n _shared_docs['str_pad'] = (\"\"\"\n Filling %(side)s side of strings in the Series/Index with an\n additional character. Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : Series/Index of objects\n \"\"\")\n\n @Appender(_shared_docs['str_pad'] % dict(side='left and right',\n method='center'))\n def center(self, width, fillchar=' '):\n return self.pad(width, side='both', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))\n def ljust(self, width, fillchar=' '):\n return self.pad(width, side='right', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))\n def rjust(self, width, fillchar=' '):\n return self.pad(width, side='left', fillchar=fillchar)\n\n def zfill(self, width):\n \"\"\"\n Filling left side of strings in the Series/Index with 0.\n Equivalent to :meth:`str.zfill`.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with 0\n\n Returns\n -------\n filled : Series/Index of objects\n \"\"\"\n result = str_pad(self._data, width, side='left', fillchar='0')\n return self._wrap_result(result)\n\n @copy(str_slice)\n def slice(self, start=None, stop=None, step=None):\n result = str_slice(self._data, start, stop, step)\n return self._wrap_result(result)\n\n @copy(str_slice_replace)\n def slice_replace(self, start=None, stop=None, repl=None):\n result = str_slice_replace(self._data, start, stop, repl)\n return self._wrap_result(result)\n\n @copy(str_decode)\n def decode(self, encoding, errors=\"strict\"):\n result = str_decode(self._data, encoding, errors)\n return self._wrap_result(result)\n\n @copy(str_encode)\n def encode(self, encoding, errors=\"strict\"):\n result = str_encode(self._data, encoding, errors)\n return self._wrap_result(result)\n\n _shared_docs['str_strip'] = (\"\"\"\n Strip whitespace (including newlines) from each string in the\n Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n stripped : Series/Index of objects\n \"\"\")\n\n @Appender(_shared_docs['str_strip'] % dict(side='left and right sides',\n method='strip'))\n def strip(self, to_strip=None):\n result = str_strip(self._data, to_strip, side='both')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='left side',\n method='lstrip'))\n def lstrip(self, to_strip=None):\n result = str_strip(self._data, to_strip, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='right side',\n method='rstrip'))\n def rstrip(self, to_strip=None):\n result = str_strip(self._data, to_strip, side='right')\n return self._wrap_result(result)\n\n @copy(str_wrap)\n def wrap(self, width, **kwargs):\n result = str_wrap(self._data, width, **kwargs)\n return self._wrap_result(result)\n\n @copy(str_get_dummies)\n def get_dummies(self, sep='|'):\n # we need to cast to Series of strings as only that has all\n # methods available for making the dummies...\n data = self._orig.astype(str) if self._is_categorical else self._data\n result, name = str_get_dummies(data, sep)\n return self._wrap_result(result, use_codes=(not self._is_categorical),\n name=name, expand=True)\n\n @copy(str_translate)\n def translate(self, table, deletechars=None):\n result = str_translate(self._data, table, deletechars)\n return self._wrap_result(result)\n\n count = _pat_wrapper(str_count, flags=True)\n startswith = _pat_wrapper(str_startswith, na=True)\n endswith = _pat_wrapper(str_endswith, na=True)\n findall = _pat_wrapper(str_findall, flags=True)\n\n @copy(str_extract)\n def extract(self, pat, flags=0, expand=True):\n return str_extract(self, pat, flags=flags, expand=expand)\n\n @copy(str_extractall)\n def extractall(self, pat, flags=0):\n return str_extractall(self._orig, pat, flags=flags)\n\n _shared_docs['find'] = (\"\"\"\n Return %(side)s indexes in each strings in the Series/Index\n where the substring is fully contained between [start:end].\n Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of integer values\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['find'] %\n dict(side='lowest', method='find',\n also='rfind : Return highest indexes in each strings'))\n def find(self, sub, start=0, end=None):\n result = str_find(self._data, sub, start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['find'] %\n dict(side='highest', method='rfind',\n also='find : Return lowest indexes in each strings'))\n def rfind(self, sub, start=0, end=None):\n result = str_find(self._data, sub, start=start, end=end, side='right')\n return self._wrap_result(result)\n\n def normalize(self, form):\n \"\"\"Return the Unicode normal form for the strings in the Series/Index.\n For more information on the forms, see the\n :func:`unicodedata.normalize`.\n\n Parameters\n ----------\n form : {'NFC', 'NFKC', 'NFD', 'NFKD'}\n Unicode form\n\n Returns\n -------\n normalized : Series/Index of objects\n \"\"\"\n import unicodedata\n f = lambda x: unicodedata.normalize(form, compat.u_safe(x))\n result = _na_map(f, self._data)\n return self._wrap_result(result)\n\n _shared_docs['index'] = (\"\"\"\n Return %(side)s indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.%(similar)s`` except instead of returning -1, it raises a ValueError\n when the substring is not found. Equivalent to standard ``str.%(method)s``.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of objects\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['index'] %\n dict(side='lowest', similar='find', method='index',\n also='rindex : Return highest indexes in each strings'))\n def index(self, sub, start=0, end=None):\n result = str_index(self._data, sub, start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['index'] %\n dict(side='highest', similar='rfind', method='rindex',\n also='index : Return lowest indexes in each strings'))\n def rindex(self, sub, start=0, end=None):\n result = str_index(self._data, sub, start=start, end=end, side='right')\n return self._wrap_result(result)\n\n _shared_docs['len'] = (\"\"\"\n Compute length of each string in the Series/Index.\n\n Returns\n -------\n lengths : Series/Index of integer values\n \"\"\")\n len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)\n\n _shared_docs['casemethods'] = (\"\"\"\n Convert strings in the Series/Index to %(type)s.\n Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n converted : Series/Index of objects\n \"\"\")\n _shared_docs['lower'] = dict(type='lowercase', method='lower')\n _shared_docs['upper'] = dict(type='uppercase', method='upper')\n _shared_docs['title'] = dict(type='titlecase', method='title')\n _shared_docs['capitalize'] = dict(type='be capitalized',\n method='capitalize')\n _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')\n lower = _noarg_wrapper(lambda x: x.lower(),\n docstring=_shared_docs['casemethods'] %\n _shared_docs['lower'])\n upper = _noarg_wrapper(lambda x: x.upper(),\n docstring=_shared_docs['casemethods'] %\n _shared_docs['upper'])\n title = _noarg_wrapper(lambda x: x.title(),\n docstring=_shared_docs['casemethods'] %\n _shared_docs['title'])\n capitalize = _noarg_wrapper(lambda x: x.capitalize(),\n docstring=_shared_docs['casemethods'] %\n _shared_docs['capitalize'])\n swapcase = _noarg_wrapper(lambda x: x.swapcase(),\n docstring=_shared_docs['casemethods'] %\n _shared_docs['swapcase'])\n\n _shared_docs['ismethods'] = (\"\"\"\n Check whether all characters in each string in the Series/Index\n are %(type)s. Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n is : Series/array of boolean values\n \"\"\")\n _shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')\n _shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')\n _shared_docs['isdigit'] = dict(type='digits', method='isdigit')\n _shared_docs['isspace'] = dict(type='whitespace', method='isspace')\n _shared_docs['islower'] = dict(type='lowercase', method='islower')\n _shared_docs['isupper'] = dict(type='uppercase', method='isupper')\n _shared_docs['istitle'] = dict(type='titlecase', method='istitle')\n _shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')\n _shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')\n isalnum = _noarg_wrapper(lambda x: x.isalnum(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isalnum'])\n isalpha = _noarg_wrapper(lambda x: x.isalpha(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isalpha'])\n isdigit = _noarg_wrapper(lambda x: x.isdigit(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isdigit'])\n isspace = _noarg_wrapper(lambda x: x.isspace(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isspace'])\n islower = _noarg_wrapper(lambda x: x.islower(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['islower'])\n isupper = _noarg_wrapper(lambda x: x.isupper(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isupper'])\n istitle = _noarg_wrapper(lambda x: x.istitle(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['istitle'])\n isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isnumeric'])\n isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),\n docstring=_shared_docs['ismethods'] %\n _shared_docs['isdecimal'])\n\n @classmethod\n def _make_accessor(cls, data):\n cls._validate(data)\n return cls(data)\n" ]
[ [ "pandas.compat.binary_type.__mul__", "numpy.asarray", "numpy.ndarray", "pandas.DataFrame", "pandas.core.dtypes.missing.notna", "numpy.where", "pandas.core.dtypes.common.is_string_like", "pandas.core.index.MultiIndex.from_tuples", "numpy.logical_or.reduce", "pandas._libs.lib.map_infer", "pandas.core.dtypes.common.is_categorical_dtype", "numpy.putmask", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Appender", "pandas.compat.u_safe", "pandas.compat.text_type.__mul__", "pandas.core.dtypes.common.is_bool_dtype", "pandas.core.algorithms.take_1d", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_re", "pandas.core.dtypes.common.is_integer", "pandas.core.common._values_from_object", "pandas.core.dtypes.common.is_object_dtype", "pandas.compat.zip", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.maybe_convert_objects", "numpy.empty", "pandas.core.index.Index" ] ]
felipemoran/tcc_sfm
[ "b0b35e92c7c2f86d84120366147a8a650d0a4f7c" ]
[ "pipeline/utils.py" ]
[ "import os\nfrom itertools import count, chain\nfrom dataclasses import dataclass\n\nimport numpy as np\n\n\nTYPE_CALIBRATION_MATRIX = 0\nTYPE_CAMERA = 1\nTYPE_POINT = 2\n\n\n@dataclass\nclass ErrorMetric:\n frame_number: int\n projection: float\n cam_orientation: float\n cam_position: float\n point_position: float\n\n\ndef write_to_viz_file(camera_matrix, Rs, Ts, points):\n \"\"\"\n Writes input information as a properly formatted csv file for visualization\n\n :param camera_matrix: calibration matrix\n :param Rs: list of R matrices\n :param Ts: list of T vectors\n :param points: point cloud with N points as a ndarray with shape Nx3\n \"\"\"\n # IMPORTANT: Rs and ts must be in the global coordinate system\n\n with open(\"out/viz_data.csv\", \"w\") as out_file:\n out_file.write(\"{}\\n\")\n\n def convert_and_save_line(line):\n line = [str(item) for item in line]\n out_file.write(\",\".join(line))\n out_file.write(\"\\n\")\n\n line_elements = [TYPE_CALIBRATION_MATRIX, 0] + list(\n camera_matrix.flatten()\n )\n convert_and_save_line(line_elements)\n\n for index, (R, t) in enumerate(zip(Rs, Ts)):\n line_elements = (\n [TYPE_CAMERA, index] + list(R.flatten()) + list(t.flatten())\n )\n convert_and_save_line(line_elements)\n\n for point_id, point in enumerate(points):\n if np.isnan(point).any():\n continue\n\n line_elements = [TYPE_POINT, point_id] + list(point.flatten())\n # if 'color' in point:\n # line_elements += list(point['point_color'].flatten())\n\n convert_and_save_line(line_elements)\n\n\ndef call_viz():\n \"\"\"\n Call visualizes with default csv file location\n \"\"\"\n os.system(\n os.path.join(\n os.getcwd(), \"visualizer\", \"cmake-build-debug\", \"visualizer\"\n )\n + \" \"\n + os.path.join(os.getcwd(), \"out\", \"viz_data.csv\")\n )\n\n\ndef visualize(camera_matrix, Rs, Ts, points):\n \"\"\"\n Bundles together visualization file creation and visualizer call\n\n :param camera_matrix: calibration matrix\n :param Rs: list of R matrices\n :param Ts: list of T vectors\n :param points: point cloud with N points as a ndarray with shape Nx3\n \"\"\"\n write_to_viz_file(camera_matrix, Rs, Ts, points)\n call_viz()\n\n\ndef compose_rts(rel_R, rel_T, comp_R, comp_T):\n \"\"\"\n Porperly composes two sets of rotation matrices and translation vectors\n\n :param rel_R: rotation matrix of new camera\n :param rel_T: translation vector of new camera\n :param comp_R: rotation matrix of previous camera\n :param comp_T: translation vector of previous camera\n :return: composed rotation matrix and translation vector\n \"\"\"\n res_T = comp_T + np.matmul(comp_R, rel_T)\n res_R = np.matmul(comp_R, rel_R)\n return res_R, res_T\n\n\ndef translate_points_to_base_frame(comp_R, comp_T, points):\n \"\"\"\n Convert point from local reference frame to global's\n :param comp_R: camera's rotation matrix in the global reference frame\n :param comp_T: camera's translation vector in the global reference frame\n :param points: points to be converted\n :return: point coordinates in the global reference frame\n \"\"\"\n return (comp_T + np.matmul(comp_R, points.transpose())).transpose()\n\n\ndef get_nan_bool_mask(input_array):\n \"\"\"\n Creates mask of booleans indicating which values are Nan/None\n\n :param input_array: feature track or point cloud\n :return: bool mask\n \"\"\"\n return (np.isnan(input_array)).any(axis=1)\n\n\ndef get_nan_index_mask(input_array):\n \"\"\"\n Creates mask with the indexes of elements that are Nan/None.\n\n :param input_array: feature track or point cloud\n :return: index mask\n \"\"\"\n nan_bool_mask = get_nan_bool_mask(input_array)\n return np.arange(len(input_array))[nan_bool_mask]\n\n\ndef get_not_nan_index_mask(input_array):\n \"\"\"\n Creates mask with the indexes of elements that are not Nan/None.\n\n :param input_array: feature track or point cloud\n :return: index mask\n \"\"\"\n\n not_nan_bool_mask = ~get_nan_bool_mask(input_array)\n return np.arange(len(input_array))[not_nan_bool_mask]\n\n\ndef invert_reference_frame(R, T):\n \"\"\"\n Inverts rotation matrix and translation vector\n\n :param R: rotation matrix\n :param T: translation vector\n :return: inverted R and T\n \"\"\"\n if R is None:\n return T, R\n return R.transpose(), np.matmul(R.transpose(), -T)\n\n\ndef init_rt():\n \"\"\"\n Creates initial rotation matrix and translation vector\n\n :return: 3x3 identity matix and vector of zeros of shape 3x1\n \"\"\"\n return np.eye(3), np.zeros((3, 1), dtype=np.float_)\n\n\ndef get_intersection_mask(a, b):\n \"\"\"\n Calculates the intersection of two index returning only those that are present in both\n vectors a nd b\n\n :param a: index vector a\n :param b: index vector b\n :return: intesection between a and b\n \"\"\"\n return np.intersect1d(a, b)\n\n\ndef get_last_track_pair(tracks, masks):\n \"\"\"\n Forms a track pair with the last two track slices.\n\n This formed pair consists of all the features that are present in both traacks.\n\n :param tracks: list of 2D feature vectors. Each vector has the shape Dx2\n :param masks: list of index masks for each feature vector. Indexes refer to the position of\n the item in the cloud\n :return: track pair and pair index mask in the same structure as tracks and masks\n \"\"\"\n pair_mask = get_intersection_mask(masks[-2], masks[-1])\n track_pair = [\n # track[[item in pair_mask for item in mask]]\n track[np.isin(mask, pair_mask)]\n for (track, mask) in zip(tracks[-2:], masks[-2:])\n ]\n return track_pair, pair_mask\n\n\ndef points_to_cloud(points, indexes):\n \"\"\"\n Creates a cloud of points from a set of sparse 3D points and indexes\n\n :param points: point cloud with N points as a ndarray with shape Nx3\n :param indexes: list of index masks for each feature vector. Indexes refer to the position\n of the item in the cloud\n :return: point cloud\n \"\"\"\n cloud = np.full((max(indexes) + 1, 3), None, dtype=np.float_)\n cloud[indexes] = points\n return cloud\n\n\ndef add_points_to_cloud(cloud, points, index_mask):\n \"\"\"\n Adds new points from 'points' to the cloud\n\n :param cloud: point cloud with N points as a ndarray with shape Nx3\n :param points: calculated 3D points from a given set of frames\n :param index_mask: vector with corresponding indexes for each point in points\n :return:\n \"\"\"\n\n assert cloud is not None\n\n cloud_mask = get_not_nan_index_mask(cloud)\n new_points_mask = np.setdiff1d(index_mask, cloud_mask)\n\n if max(index_mask) >= cloud.shape[0]:\n new_cloud = np.full((max(index_mask) * 2, 3), None, dtype=np.float_)\n new_cloud[cloud_mask] = cloud[cloud_mask]\n cloud = new_cloud\n\n cloud[new_points_mask] = points[np.isin(index_mask, new_points_mask)]\n\n return cloud\n\n\ndef generator_copier(generator, num_copies, num_elements=None):\n \"\"\"\n Copies a generator multiple times up to a certain point or until it's end,\n whichever comes first. The generator evaluation is eager, meaning that if an\n infinite generator is passed without a number of elements to be copied the\n code will hang.\n\n :param generator: generator to be copied\n :param num_copies: number of copies to be generated\n :param num_elements: number of elements to be copied\n :return: list with original generator and copies\n \"\"\"\n return_list = []\n\n index_generator = (\n range(num_elements) if num_elements is not None else count(0, 1)\n )\n\n for _ in index_generator:\n try:\n return_list += [next(generator)]\n except StopIteration:\n break\n\n generators = [chain(return_list, generator)] + [\n (item for item in return_list) for _ in range(num_copies)\n ]\n\n generators[0] = chain(generators[0], generator)\n\n return generators\n" ]
[ [ "numpy.isnan", "numpy.eye", "numpy.matmul", "numpy.setdiff1d", "numpy.intersect1d", "numpy.zeros", "numpy.isin" ] ]
snowde/tsfresh
[ "6cf12fe769762a7c9369a9c78a3a519a28d5c707" ]
[ "tests/units/feature_selection/test_significance_tests.py" ]
[ "import pytest\nimport numpy as np\nimport pandas as pd\n\nfrom tsfresh.defaults import TEST_FOR_BINARY_TARGET_REAL_FEATURE\nfrom tsfresh.feature_selection.significance_tests import target_binary_feature_binary_test, \\\n target_binary_feature_real_test, target_real_feature_real_test, target_real_feature_binary_test\n\n\n@pytest.fixture()\ndef set_random_seed():\n np.random.seed(seed=42)\n\n\n@pytest.fixture()\ndef binary_feature(set_random_seed):\n return pd.Series(np.random.binomial(1, 0.5, 250), name=\"TEST\")\n\n\n@pytest.fixture()\ndef binary_target_not_related(set_random_seed):\n return pd.Series(np.random.binomial(1, 0.5, 250))\n\n\n@pytest.fixture()\ndef real_feature(set_random_seed):\n return pd.Series(np.random.normal(0, 1, 250), name=\"TEST\")\n\n\n@pytest.fixture()\ndef real_target_not_related(set_random_seed):\n return pd.Series(np.random.normal(0, 1, 250))\n\n\nclass TestUnsignificant:\n @pytest.fixture()\n def minimal_p_value_for_unsignificant_features(self):\n return 0.05\n\n\n def test_feature_selection_target_binary_features_binary(self, minimal_p_value_for_unsignificant_features,\n binary_feature,\n binary_target_not_related):\n \"\"\"\n Test if the p_value returned by target_binary_feature_binary_test is\n large enough for highly unsignificant features.\n \"\"\"\n p_value = target_binary_feature_binary_test(binary_feature, binary_target_not_related)\n assert minimal_p_value_for_unsignificant_features < p_value\n\n\n def test_feature_selection_target_binary_features_realvalued(self, minimal_p_value_for_unsignificant_features,\n real_feature,\n binary_target_not_related):\n \"\"\"\n Test if the p_value returned by target_binary_feature_binary_test is\n large enough for highly unsignificant features.\n \"\"\"\n p_value = target_binary_feature_real_test(real_feature, binary_target_not_related,\n TEST_FOR_BINARY_TARGET_REAL_FEATURE)\n assert minimal_p_value_for_unsignificant_features < p_value\n\n\n def test_feature_selection_target_realvalued_features_binary(self, minimal_p_value_for_unsignificant_features,\n binary_feature,\n real_target_not_related):\n \"\"\"\n Test if the p_value returned by target_real_feature_binary_test is\n large enough for highly unsignificant features.\"\"\"\n p_value = target_real_feature_binary_test(binary_feature, real_target_not_related)\n assert minimal_p_value_for_unsignificant_features < p_value\n\n\n def test_feature_selection_target_realvalued_features_realvalued(self, minimal_p_value_for_unsignificant_features,\n real_feature,\n real_target_not_related):\n \"\"\"\n Test if the p_value returned by target_real_feature_real_test is\n large enough for highly unsignificant features.\n \"\"\"\n p_value = target_real_feature_real_test(real_feature, real_target_not_related)\n assert minimal_p_value_for_unsignificant_features < p_value\n\n\nclass TestSignificant:\n @pytest.fixture()\n def maximal_p_value_for_significant_features(self):\n return 0.15\n\n def test_feature_selection_target_binary_features_binary(self, maximal_p_value_for_significant_features,\n binary_feature):\n \"\"\"\n Test if the p_value returned by target_binary_feature_binary_test is\n low enough for highly significant features.\n \"\"\"\n y = binary_feature - pd.Series(np.random.binomial(1, 0.1, 250) + np.random.binomial(1, 0.1, 250))\n y[y == -1] = 0\n y[y == -2] = 0\n y[y == 2] = 1\n\n p_value = target_binary_feature_binary_test(binary_feature, y)\n assert maximal_p_value_for_significant_features > p_value\n\n\n def test_feature_selection_target_binary_features_realvalued_mann(self, maximal_p_value_for_significant_features,\n real_feature):\n \"\"\"\n Test if the p_value returned by target_binary_feature_real_test is\n low enough for highly significant features.\n \"\"\"\n y = pd.Series(np.ndarray(250))\n y[real_feature >= 0.3] = 1\n y[real_feature < 0.3] = 0\n y -= pd.Series(np.random.binomial(1, 0.1, 250))\n y[y == -1] = 0\n y[y == 2] = 1\n\n p_value = target_binary_feature_real_test(real_feature, y, TEST_FOR_BINARY_TARGET_REAL_FEATURE)\n assert maximal_p_value_for_significant_features > p_value\n\n\n def test_feature_selection_target_binary_features_realvalued_smir(self, maximal_p_value_for_significant_features,\n real_feature):\n \"\"\"\n Test if the p_value returned by target_binary_feature_real_test is\n low enough for highly significant features.\n \"\"\"\n y = pd.Series(np.ndarray(250))\n y[real_feature >= 0.3] = 1\n y[real_feature < 0.3] = 0\n y -= pd.Series(np.random.binomial(1, 0.2, 250))\n y[y == -1] = 0\n y[y == 2] = 1\n\n p_value = target_binary_feature_real_test(real_feature, y, test=\"smir\")\n assert maximal_p_value_for_significant_features > p_value\n\n\n def test_feature_selection_target_realvalued_features_binary(self, maximal_p_value_for_significant_features,\n binary_feature):\n \"\"\"\n Test if the p_value returned by target_real_feature_binary_test is\n low enough for highly significant features.\n \"\"\"\n y = binary_feature * pd.Series(np.random.normal(0, 1, 250)) + pd.Series(np.random.normal(0, 0.25, 250))\n\n p_value = target_real_feature_binary_test(binary_feature, y)\n assert maximal_p_value_for_significant_features > p_value\n\n\n def test_feature_selection_target_realvalued_features_realvalued(self, maximal_p_value_for_significant_features,\n real_feature):\n \"\"\"\n Test if the p_value returned by target_real_feature_real_test is\n low enough for highly significant features.\n \"\"\"\n y = real_feature + pd.Series(np.random.normal(0, 1, 250))\n\n p_value = target_real_feature_real_test(real_feature, y)\n\n assert maximal_p_value_for_significant_features > p_value\n" ]
[ [ "numpy.random.binomial", "numpy.random.normal", "numpy.ndarray", "numpy.random.seed" ] ]
kyraikeda/ginga
[ "e0ce979de4a87e12ba7a90eec0517a0be05d14bc", "e0ce979de4a87e12ba7a90eec0517a0be05d14bc" ]
[ "ginga/examples/gw/example1_video.py", "ginga/gtk3w/Plot.py" ]
[ "#! /usr/bin/env python\n#\n# video_play.py -- video playback example with Ginga\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\n\"\"\"\nThis example shows how you can set up a recurring refresh rate in Ginga.\nIt reads a video file and displays frames in a Ginga viewer.\n\nCaveats:\n\n1. There is no sound. This is due to the lack of a decent python module\n that can read video files and provide _both_ audio and video streams.\n\n2. Currently, it expects an OpenCV readable file as a command line parameter.\n Only formats supported by OpenCV can be used (typically JPEG encoded).\n\nRequirements:\n\nTo run this example you will need the OpenCV bindings for Python installed.\nThis module lets us access the video stream of a video file frame-by-frame.\n\nUsage::\n\n $ example1_video.py [log options] --optimize <video file>\n\nWorkings:\n\nTwo threads are created: a GUI handling thread and a worker thread to\nread frames from the file. This allows the viewer to remain fairly\nresponsive to user actions.\n\n\"\"\"\n\nimport sys\nimport time\nimport threading\n\nimport numpy as np\n\nimport ginga.toolkit as ginga_toolkit\nfrom ginga import RGBImage\nfrom ginga import AutoCuts, RGBMap\nfrom ginga.misc import log, Task\n\ntry:\n import cv2\nexcept ImportError:\n print(\"You need to install the OpenCV python module to run this example\")\n sys.exit(1)\n\n\nSTD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'\n\n\nclass GingaVision(object):\n\n def __init__(self, logger, ev_quit, options):\n super(GingaVision, self).__init__()\n self.logger = logger\n self.ev_quit = ev_quit\n\n from ginga.gw import Widgets, Viewers, GwMain\n\n self.card = 'default'\n # playback rate; changed when we know the actual rate\n self.fps = options.fps\n self.playback_rate = 1.0 / 30.0\n\n self.pimage = RGBImage.RGBImage()\n self.pdata = None\n\n self.app = Widgets.Application(logger=logger)\n self.app.add_callback('shutdown', self.quit)\n self.top = self.app.make_window(\"Ginga example2\")\n self.top.add_callback('close', lambda *args: self.quit())\n\n thread_pool = Task.ThreadPool(2, logger, ev_quit=ev_quit)\n thread_pool.startall()\n self.main = GwMain.GwMain(logger=logger, ev_quit=ev_quit,\n app=self.app, thread_pool=thread_pool)\n\n vbox = Widgets.VBox()\n vbox.set_border_width(2)\n vbox.set_spacing(1)\n\n fi = Viewers.CanvasView(logger=logger, render=options.render)\n fi.set_autocut_params('histogram')\n fi.enable_autozoom('off')\n fi.enable_autocenter('once')\n fi.enable_autocuts('off')\n fi.cut_levels(0, 255)\n fi.scale_to(1, 1)\n fi.set_bg(0.2, 0.2, 0.2)\n fi.ui_set_active(True)\n self.viewer = fi\n\n if options.optimize:\n # Some optimizations to smooth playback at decent FPS\n # PassThruRGBMapper is the most efficient mapper\n rgbmap = RGBMap.PassThruRGBMapper(self.logger)\n fi.set_rgbmap(rgbmap)\n\n # Clip cuts assumes data does not need to be scaled in cut levels--\n # only clipped\n fi.set_autocuts(AutoCuts.Clip(logger=self.logger))\n\n bd = fi.get_bindings()\n bd.enable_all(True)\n\n fi.set_desired_size(512, 512)\n iw = Viewers.GingaViewerWidget(viewer=fi)\n vbox.add_widget(iw, stretch=1)\n\n hbox = Widgets.HBox()\n hbox.set_margins(4, 2, 4, 2)\n\n wopen = Widgets.Button(\"Open File\")\n #wopen.clicked.connect(self.open_file)\n wquit = Widgets.Button(\"Quit\")\n wquit.add_callback('activated', lambda *args: self.quit())\n\n for w in (wopen, wquit):\n hbox.add_widget(w, stretch=0)\n hbox.add_widget(Widgets.Label(''), stretch=1)\n\n vbox.add_widget(hbox, stretch=0)\n\n self.top.set_widget(vbox)\n self.top.set_title(\"Video Example Viewer\")\n\n def quit(self):\n self.logger.info(\"quit called\")\n self.ev_quit.set()\n self.top.delete()\n\n def show_frame(self, img):\n self.logger.debug(\"updating image\")\n try:\n if (self.pdata is None) or (img.shape != self.pdata.shape):\n # No previous image, set up our data for ginga\n self.pdata = img\n self.pimage.set_data(self.pdata)\n # Hack: video frames seem to be returned with blue channel\n # in LSByte\n self.pimage.set_order('BGR')\n\n # After establishing this as the image, we can just\n # update the image data hereafter.\n self.main.gui_do(self.viewer.set_image, self.pimage)\n\n else:\n # Update the image data in-place. Viewer frame will be\n # updated at the next refresh interval.\n self.pdata[::] = img[::]\n\n except Exception as e:\n self.logger.error(\"Error updating image: %s\" % (str(e)))\n\n def capture_video(self, device):\n\n self.logger.info(\"capture video loop starting...\")\n cap = cv2.VideoCapture(device)\n\n # Get width and height of frames and resize window\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.logger.info(\"Video is %dx%d resolution\" % (width, height))\n bd = 50\n self.main.gui_do(self.top.resize, width + bd, height + bd)\n\n # Get the frame count\n num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n self.logger.info(\"There are %d frames\" % (num_frames))\n\n # Get the frame rate\n fps = cap.get(cv2.CAP_PROP_FPS)\n if self.fps is None:\n if fps is not None and not np.isnan(fps) and float(fps) >= 1.0:\n self.fps = float(fps)\n else:\n self.fps = 30.0\n self.logger.warn(\"No video rate found in metadata; \"\n \"assuming %.2f fps\" % (self.fps))\n\n else:\n self.logger.info(\"Forcing a video rate of %d fps\" % (self.fps))\n\n self.playback_rate = 1.0 / self.fps\n\n self.main.gui_do(self.viewer.set_refresh_rate, self.fps)\n self.main.gui_do(self.viewer.start_refresh)\n\n frame = 0\n last_report = 0\n done = False\n while not self.ev_quit.isSet() and not done:\n start_time = time.time()\n self.logger.debug(\"capture frame\")\n frame += 1\n f, img = cap.read()\n self.logger.debug(\"frame %d: capture time: %.4f\" % (\n frame, time.time() - start_time))\n\n split_time = time.time()\n if img is not None:\n self.show_frame(img)\n else:\n self.main.gui_do(self.viewer.stop_refresh)\n done = True\n\n end_time = time.time()\n self.logger.debug(\n \"update frame time %.4f sec\" % (end_time - split_time))\n\n if end_time - last_report > 5.0:\n # report measured FPS every 5 sec or so\n last_report = end_time\n stats = self.viewer.get_refresh_stats()\n self.logger.info(\n \"screen fps: %(fps).2f jitter: %(jitter).5f \"\n \"late pct,avg: %(late_pct).1f%%, %(late_avg).4f \"\n \"early pct,avg: %(early_pct).1f%%, %(early_avg).4f\" % stats)\n\n elapsed_time = end_time - start_time\n sleep_time = self.playback_rate - elapsed_time\n if sleep_time < 0:\n self.logger.warning(\"capture underrun %.4f sec\" % (-sleep_time))\n\n else:\n sleep_time = max(sleep_time, 0.0)\n self.logger.debug(\"sleeping for %.4f sec\" % (sleep_time))\n time.sleep(sleep_time)\n\n self.logger.info(\"capture video loop terminating...\")\n\n\ndef main(options, args):\n\n logger = log.get_logger(\"example2\", options=options)\n\n if options.toolkit is None:\n logger.error(\"Please choose a GUI toolkit with -t option\")\n\n # decide our toolkit, then import\n ginga_toolkit.use(options.toolkit)\n\n # event for synchronizing exit of all threads\n ev_quit = threading.Event()\n\n gv = GingaVision(logger, ev_quit, options)\n gv.top.resize(670, 540)\n gv.top.show()\n gv.top.raise_()\n\n # start video capture thread\n if len(args) > 0:\n filename = args[0]\n else:\n # default video input device\n filename = 0\n\n gv.main.nongui_do(gv.capture_video, filename)\n\n gv.main.mainloop()\n logger.info(\"program terminating...\")\n sys.exit(0)\n\n\nif __name__ == '__main__':\n\n # Parse command line options\n from argparse import ArgumentParser\n\n argprs = ArgumentParser()\n\n argprs.add_argument(\"--debug\", dest=\"debug\", default=False,\n action=\"store_true\",\n help=\"Enter the pdb debugger on main()\")\n argprs.add_argument(\"--fps\", dest=\"fps\", metavar=\"FPS\",\n type=float, default=None,\n help=\"Force a FPS (frames/sec)\")\n argprs.add_argument(\"--optimize\", dest=\"optimize\", default=False,\n action=\"store_true\",\n help=\"Perform some optimizations to improve FPS\")\n argprs.add_argument(\"-t\", \"--toolkit\", dest=\"toolkit\", metavar=\"NAME\",\n default='qt',\n help=\"Choose GUI toolkit (gtk|qt)\")\n argprs.add_argument(\"--profile\", dest=\"profile\", action=\"store_true\",\n default=False,\n help=\"Run the profiler on main()\")\n argprs.add_argument(\"-r\", \"--render\", dest=\"render\", default='widget',\n help=\"Set render type {widget|opengl}\")\n log.addlogopts(argprs)\n\n (options, args) = argprs.parse_known_args(sys.argv[1:])\n\n # Are we debugging this?\n if options.debug:\n import pdb\n\n pdb.run('main(options, args)')\n\n # Are we profiling this?\n elif options.profile:\n import profile\n\n print((\"%s profile:\" % sys.argv[0]))\n profile.run('main(options, args)')\n\n else:\n main(options, args)\n\n# END\n", "#\n# Plot.py -- Plotting function for Ginga viewer.\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport matplotlib\nmatplotlib.use('GTK3Cairo')\nfrom matplotlib.backends.backend_gtk3cairo import (FigureCanvasGTK3Cairo\n as FigureCanvas) # noqa\nfrom ginga.gtk3w import Widgets # noqa\n\n\nclass PlotWidget(Widgets.WidgetBase):\n\n def __init__(self, plot, width=500, height=500):\n super(PlotWidget, self).__init__()\n\n self.widget = FigureCanvas(plot.get_figure())\n self.plot = plot\n self.logger = plot.logger\n\n self.widget.set_size_request(width, height)\n self.widget.show_all()\n\n def set_plot(self, plot):\n self.plot = plot\n self.logger = plot.logger\n self.logger.debug(\"set_plot called\")\n\n def configure_window(self, wd, ht):\n self.logger.debug(\"canvas resized to %dx%d\" % (wd, ht))\n fig = self.plot.get_figure()\n fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)\n" ]
[ [ "numpy.isnan" ], [ "matplotlib.use" ] ]
markelg/ESMValCore
[ "5656fb8b546eeb4d750a424de7ed56a237edfabb" ]
[ "tests/unit/preprocessor/_other/test_other.py" ]
[ "\"\"\"Unit tests for the :func:`esmvalcore.preprocessor._other` module.\"\"\"\n\nimport unittest\n\nimport iris.coord_categorisation\nimport iris.coords\nimport numpy as np\nfrom cf_units import Unit\nfrom iris.cube import Cube\nfrom numpy.testing import assert_array_equal\n\nfrom esmvalcore.preprocessor._other import clip\n\n\nclass TestOther(unittest.TestCase):\n \"\"\"Test class for _other.\"\"\"\n def test_clip(self):\n \"\"\"Test clip function.\"\"\"\n cube = Cube(np.array([-10, 0, 10]))\n cube.add_dim_coord(\n iris.coords.DimCoord(\n np.arange(3),\n standard_name='time',\n units=Unit('days since 1950-01-01 00:00:00',\n calendar='gregorian'),\n ),\n 0,\n )\n # Cube needs to be copied, since it is modified in-place and test cube\n # should not change.\n assert_array_equal(clip(cube.copy(), 0, None).data,\n np.array([0, 0, 10]))\n assert_array_equal(clip(cube.copy(), None, 0).data,\n np.array([-10, 0, 0]))\n assert_array_equal(clip(cube.copy(), -1, 2).data,\n np.array([-1, 0, 2]))\n # Masked cube TODO\n # No parameters specified\n with self.assertRaises(ValueError):\n clip(cube, None, None)\n # Maximum lower than minimum\n with self.assertRaises(ValueError):\n clip(cube, 10, 8)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.arange", "numpy.array" ] ]
allam-labs/compression
[ "0c0e40f3899bcb1f14c4a9d4fbcf6906e40ef033" ]
[ "tensorflow_compression/python/ops/math_ops_test.py" ]
[ "# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the math operations.\"\"\"\n\nfrom absl.testing import parameterized\nimport scipy.stats\nimport tensorflow as tf\nfrom tensorflow_compression.python.ops import math_ops\nfrom tensorflow_compression.python.ops import soft_round_ops\n\n\nclass MathTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\"disconnected\", \"identity\", \"identity_if_towards\")\n def test_upper_bound_has_correct_outputs_and_gradients(self, gradient):\n inputs = tf.constant([-1, 1], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(inputs)\n outputs = math_ops.upper_bound(inputs, 0, gradient=gradient)\n pgrads = tape.gradient(outputs, inputs, tf.ones_like(inputs))\n ngrads = tape.gradient(outputs, inputs, -tf.ones_like(inputs))\n self.assertAllEqual(outputs, [-1, 0])\n if gradient == \"disconnected\":\n self.assertAllEqual(pgrads, [1, 0])\n self.assertAllEqual(ngrads, [-1, 0])\n elif gradient == \"identity\":\n self.assertAllEqual(pgrads, [1, 1])\n self.assertAllEqual(ngrads, [-1, -1])\n else:\n self.assertAllEqual(pgrads, [1, 1])\n self.assertAllEqual(ngrads, [-1, 0])\n\n def test_upper_bound_invalid(self):\n with self.assertRaises(ValueError):\n math_ops.upper_bound(tf.zeros((1, 2)), 0, gradient=\"invalid\")\n\n @parameterized.parameters(\"disconnected\", \"identity\", \"identity_if_towards\")\n def test_lower_bound_has_correct_outputs_and_gradients(self, gradient):\n inputs = tf.constant([-1, 1], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(inputs)\n outputs = math_ops.lower_bound(inputs, 0, gradient=gradient)\n pgrads = tape.gradient(outputs, inputs, tf.ones_like(inputs))\n ngrads = tape.gradient(outputs, inputs, -tf.ones_like(inputs))\n print(ngrads)\n self.assertAllEqual(outputs, [0, 1])\n if gradient == \"disconnected\":\n self.assertAllEqual(pgrads, [0, 1])\n self.assertAllEqual(ngrads, [0, -1])\n elif gradient == \"identity\":\n self.assertAllEqual(pgrads, [1, 1])\n self.assertAllEqual(ngrads, [-1, -1])\n else:\n self.assertAllEqual(pgrads, [0, 1])\n self.assertAllEqual(ngrads, [-1, -1])\n\n def test_lower_bound_invalid(self):\n with self.assertRaises(ValueError):\n math_ops.lower_bound(tf.zeros((1, 2)), 0, gradient=\"invalid\")\n\n\nclass PerturbAndApplyTest(tf.test.TestCase):\n\n def test_perturb_and_apply_noise(self):\n x = tf.random.normal([10000], seed=0)\n y, x_plus_u0 = math_ops.perturb_and_apply(\n tf.identity, x, expected_grads=True)\n u0 = x_plus_u0-x\n u1 = y - x\n # Check if residuals are as expected\n self.assertAllClose(u0, u1)\n # Check if noise has expected uniform distribution\n _, p = scipy.stats.kstest(u0, \"uniform\", (-0.5, 1.0))\n self.assertAllLessEqual(tf.abs(u0), 0.5)\n self.assertGreater(p, 1e-6)\n\n def test_perturb_and_apply_gradient_soft_round(self):\n f = soft_round_ops.soft_round\n x = tf.linspace(-2.0, 2.0, 200)\n temperature = 7.0\n with tf.GradientTape(persistent=True) as g:\n g.watch(x)\n y = math_ops.perturb_and_apply(f, x, temperature, expected_grads=True)[0]\n dx = g.gradient(y, x)\n self.assertAllClose(dx, tf.ones_like(dx))\n\n def test_perturb_and_apply_gradient_parabola(self):\n f = lambda x, a: a*x*x\n x = tf.linspace(-2.0, 2.0, 200)\n a = 7.0\n with tf.GradientTape(persistent=True) as g:\n g.watch(x)\n y = math_ops.perturb_and_apply(f, x, a, expected_grads=True)[0]\n dx = g.gradient(y, x)\n self.assertAllClose(dx, f(x+.5, a)-f(x-.5, a))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.zeros", "tensorflow.ones_like", "tensorflow.test.main", "tensorflow.linspace", "tensorflow.random.normal", "tensorflow.abs", "tensorflow.GradientTape" ] ]
webdizz/expansion-ai
[ "80db4937365cfbecfbbd3c83f02ba0d740f422af" ]
[ "expansionai_env.py" ]
[ "import logging\nimport sys\n\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom six import StringIO\n\nlogger = logging.getLogger('ExpansionAiEnv')\n\nMY_LAYER = 1\n\n\nclass ExpansionAiEnv(gym.Env):\n metadata = {'render.modes': ['ansi']}\n\n def __init__(self, board_size=4, armies=4, offset_x=0, offset_y=0):\n \"\"\" Initializes an env \"\"\"\n assert isinstance(board_size, int) and board_size >= 1, 'Invalid board size: {}'.format(board_size)\n self.board_size = board_size\n self.armies = armies\n self.initial_armies = armies\n self.offset_x = offset_x\n self.offset_y = offset_y\n\n self.cell_movements = 8 # 8 movements per cell\n logger.info('Env init was called with board size \"%s\" and armies \"%s\"' % (board_size, armies))\n\n self.board = np.random.randn(self.board_size, self.board_size)\n self.board_flatten = self.board.flatten()\n\n self.board_action_space = np.random.randn(self.board_size * self.board_size, self.cell_movements)\n self.board_action_space_flatten = self.board_action_space.flatten()\n\n self.move_down = [5, 4, 3]\n self.move_up = [7, 0, 1]\n self.move_left = [7, 6, 5]\n self.move_right = [1, 2, 3]\n\n # TODO: add armies into account\n self.action_space = spaces.Discrete(self.cell_movements)\n # self.action_space = spaces.Box(0, 8, shape=400)\n # .Discrete(board_size * board_size * self.cell_movements)\n self.observation_space = spaces.Box(-4, 20, shape=400)\n logger.info(\"Env action_space: %s and observation_space: %s\" % (self.action_space, self.observation_space))\n\n self._seed()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n \"\"\" add step logic here \"\"\"\n self.step_num += 1\n if self.done:\n return self.state, 0., True, {'state': self.state}\n # resize actions to have it squared to simplify coordination\n actions_squared = np.resize(action, (20, 20))\n\n self.move(actions_squared)\n reward = self.game_finished()\n logger.debug(\n 'Env movement after step {} for action \"{}\" for armies {} lead to new state \\n{}\\n'.format(\n self.step_num, action,\n self.armies,\n self.state[MY_LAYER]))\n\n self.done = reward >= 1 or reward == -1\n return self.state, reward, self.done, {'state': self.state, 'step': self.step_num, 'armies': self.armies}\n\n def move(self, actions_squared, new_armies=10):\n occupied_cells = self.resolve_occupied_cells()\n movable_cells = self.resolve_movable_cells()\n\n logger.debug((\"Env occupied cells \\n{}\\n movable cells \\n{}\".format(occupied_cells, movable_cells)))\n\n for cell_to_move in movable_cells:\n movement_action = actions_squared[cell_to_move[0]][cell_to_move[1]]\n to_pos = self.action_to_coordinate(movement_action, cell_to_move)\n previously_armies_in_cell = self.state[MY_LAYER, cell_to_move[0], cell_to_move[1]]\n armies_to_move = previously_armies_in_cell - 1\n # keep at least 1 army in cell\n self.state[MY_LAYER, cell_to_move[0], cell_to_move[1]] = previously_armies_in_cell - armies_to_move\n\n current_armies_in_cell = self.state[MY_LAYER, to_pos[0], to_pos[1]]\n logger.debug(\n \"Env move cell \\n{}\\n to new position \\n{} according to {}\".format(cell_to_move, to_pos,\n movement_action))\n self.state[MY_LAYER, to_pos[0], to_pos[1]] = new_armies + current_armies_in_cell + armies_to_move\n\n logger.debug(\"New board state {}\".format(self.state[MY_LAYER]))\n\n def _reset(self):\n self.step_num = 0\n self.state = np.zeros((2, self.board_size, self.board_size))\n self.done = False\n # place armies to initial state\n self.state[MY_LAYER, :, :] = 0\n self.armies = self.initial_armies\n self.state[MY_LAYER, self.board_size - 2 - self.offset_y, self.offset_x] = self.armies\n self.resolve_occupied_cells()\n self.resolve_movable_cells()\n logger.debug(\"Env model initial state: \\n{}\".format(self.state[MY_LAYER]))\n return self.state\n\n def resolve_movable_cells(self):\n movable_cells = np.argwhere(self.state[MY_LAYER] > 1)\n self.movable_cells_num = movable_cells.size\n return movable_cells\n\n def resolve_occupied_cells(self):\n occupied_cells = np.argwhere(self.state[MY_LAYER] > 0)\n self.occupied_cells_num = occupied_cells.size\n return occupied_cells\n\n def action_to_coordinate(self, movement_action, current_position):\n movement_action = np.argwhere(self.board_action_space == self.board_action_space_flatten[movement_action])[0]\n\n next_position_row = current_position[0]\n if np.argwhere(self.move_up == movement_action[1]) > -1:\n next_position_row -= 1\n elif np.argwhere(self.move_down == movement_action[1]) > -1:\n next_position_row += 1\n if next_position_row == -1:\n next_position_row = current_position[0]\n if next_position_row > self.board_size - 1:\n next_position_row = current_position[0]\n\n next_position_col = current_position[1]\n if np.argwhere(self.move_left == movement_action[1]) > -1:\n next_position_col -= 1\n elif np.argwhere(self.move_right == movement_action[1]) > -1:\n next_position_col += 1\n if next_position_col == -1:\n next_position_col = current_position[1]\n if next_position_col > self.board_size - 1:\n next_position_col = current_position[1]\n\n next_position = [next_position_row, next_position_col]\n return next_position\n\n def game_finished(self):\n # Returns 1 if player 1 wins, -1 if player 2 wins and 0 otherwise\n self.armies = current_num_of_armies = np.sum(self.state[MY_LAYER], dtype=np.int32)\n logger.debug(\"Env current armies num %s\" % current_num_of_armies)\n occupied_cells_num = self.occupied_cells_num\n self.resolve_occupied_cells()\n current_occupied_cells_num = self.occupied_cells_num\n\n movable_cells_num = self.movable_cells_num\n self.resolve_movable_cells()\n current_movable_cells = self.movable_cells_num\n\n if 0 not in self.state[MY_LAYER]:\n logger.info(\"Env wow, is about to get a reward \\n{}\\n\".format((self.state[MY_LAYER])))\n return 1\n elif (current_num_of_armies <= 0 or current_num_of_armies > 6000) and self.step_num > 0:\n return -1 # our army was destroyed\n elif np.argwhere(self.state[MY_LAYER] < 0).size > 0:\n return -1\n elif self.step_num >= 600:\n return -1\n elif current_occupied_cells_num - occupied_cells_num > 2:\n return 0.01\n elif current_movable_cells - movable_cells_num > 2:\n return 0.05\n elif current_occupied_cells_num > 200:\n return 0.01\n else:\n return 0\n\n\ndef _render(self, mode='ansi', close=False):\n \"\"\" Renders environment \"\"\"\n logger.debug('Env render was executed with mode \"{}\" and close \"{}'.format(mode, close))\n if close:\n return\n\n # process board\n board = self.state\n out_file = StringIO() if mode == 'ansi' else sys.stdout\n out_file.write(' ' * 13)\n out_file.write('\\t')\n\n for column in range(board.shape[1]):\n out_file.write('\\t' + str(column + 1) + '|')\n out_file.write('\\n')\n\n # underline\n out_file.write('\\t')\n out_file.write('-' * (self.board_size * 11 - 2))\n out_file.write('\\n')\n # end of header #\n\n for row in range(board.shape[1]):\n out_file.write('\\t')\n out_file.write(str(row + 1) + '\\t|')\n for column in range(board.shape[1]):\n out_file.write(str(board[MY_LAYER, row, column]))\n out_file.write('\\t|')\n out_file.write('\\n')\n\n # horizontal line\n out_file.write('\\t')\n out_file.write('-' * (self.board_size * 11 - 3))\n out_file.write('\\n')\n\n if mode != 'live':\n return out_file\n" ]
[ [ "numpy.resize", "numpy.argwhere", "numpy.random.randn", "numpy.zeros", "numpy.sum" ] ]
champon1020/scene_graph_benchmark
[ "970a7499f8fa2854810bd650f6c991bcad5748db" ]
[ "maskrcnn_benchmark/modeling/detector/generalized_rcnn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\n\nfrom ..backbone import build_backbone\nfrom ..rpn.rpn import build_rpn\nfrom ..roi_heads.roi_heads import build_roi_heads\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN. Currently supports boxes and masks.\n It consists of three main parts:\n - backbone\n - rpn\n - heads: takes the features + the proposals from the RPN and computes\n detections / masks from it.\n \"\"\"\n\n def __init__(self, cfg):\n super(GeneralizedRCNN, self).__init__()\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.backbone = build_backbone(cfg)\n self.rpn = build_rpn(cfg, self.backbone.out_channels)\n self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)\n self.force_boxes = cfg.MODEL.RPN.FORCE_BOXES\n\n def forward(self, images, targets=None):\n \"\"\"\n Arguments:\n images (list[Tensor] or ImageList): images to be processed\n targets (list[BoxList]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n if self.force_boxes and targets is None:\n # note targets cannot be None but could have 0 box.\n raise ValueError(\"In force_boxes setting, targets should be passed\")\n images = to_image_list(images)\n features = self.backbone(images.tensors)\n\n if targets:\n targets = [target.to(self.device)\n for target in targets if target is not None]\n\n if self.force_boxes:\n proposals = [BoxList(target.bbox, target.size, target.mode)\n for target in targets]\n if self.training:\n # note we still need to compute a loss using all rpn\n # named parameters, otherwise it will\n # give unused_parameters error in distributed training.\n null_loss = 0\n for key, param in self.rpn.named_parameters():\n null_loss += 0.0 * param.sum()\n proposal_losses = {'rpn_null_loss', null_loss}\n else:\n proposals, proposal_losses = self.rpn(images, features, targets)\n\n if self.roi_heads:\n x, result, detector_losses = self.roi_heads(features, proposals, targets)\n else:\n # RPN-only models don't have roi_heads\n x = features\n result = proposals\n detector_losses = {}\n\n if self.training:\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n return result\n" ]
[ [ "torch.cuda.is_available" ] ]
ishine/MAI
[ "64753cd2f59af2949896937c2e5dbfc4d8bab1e0" ]
[ "test/optest/tfunits/MaxPoolTest.py" ]
[ "import tensorflow as tf\n\nsess = tf.InteractiveSession()\n\nprint(\"-------------------With1Channels VALID------------\")\n\n#NHWC 2,2,4,1\ninputData=tf.constant([[[[-1.],[-2],[-3],[-4]],[[-5],[-6],[-7],[-8]]],[[[-9],[-10],[-11],[-12]],[[-13],[-14],[-15],[-16]]]])\n\n#target 2,1,3,1\ntarget=tf.nn.max_pool(inputData, [1,2,2,1], [1,1,1,1], 'VALID').eval()\n\nprint(target)\n\nprint(\"-------------------With1Channels SAME------------\")\ntarget=tf.nn.max_pool(inputData, [1,2,2,1], [1,1,1,1], 'SAME').eval()\nprint(target)\n\n\nprint(\"-------------------WithMultiChannels VALID------------\")\n#NHWC 1,2,2,2\ninputData=tf.constant([[[[-1.,-2],[-3,-4]],[[-5,-6],[-7,-8]]]])\n#target 1,2,2,2\ntarget=tf.nn.max_pool(inputData, [1,2,2,1], [1,1,1,1], 'VALID').eval()\nprint(target)\n\nprint(\"-------------------WithMultiChannels SAME------------\")\n#target 1,2,2,2\ntarget=tf.nn.max_pool(inputData, [1,2,2,1], [1,1,1,1], 'SAME').eval()\nprint(target)\n" ]
[ [ "tensorflow.nn.max_pool", "tensorflow.constant", "tensorflow.InteractiveSession" ] ]
fangchenli/zipline
[ "92abca6e0adb01af23cefd4de80c2c2721d72b89" ]
[ "tests/test_examples.py" ]
[ "#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom operator import itemgetter\nimport tarfile\n\nimport matplotlib\nimport pandas as pd\n\nfrom zipline import examples\nfrom zipline.data.bundles import register, unregister\nfrom zipline.testing import test_resource_path, parameter_space\nfrom zipline.testing.fixtures import (\n WithTmpDir,\n ZiplineTestCase,\n read_checked_in_benchmark_data,\n)\nfrom zipline.testing.predicates import assert_equal\nfrom zipline.utils.cache import dataframe_cache\n\n\n# Otherwise the next line sometimes complains about being run too late.\n_multiprocess_can_split_ = False\n\nmatplotlib.use('Agg')\n\nEXAMPLE_MODULES = examples.load_example_modules()\n\n\nclass ExamplesTests(WithTmpDir, ZiplineTestCase):\n # some columns contain values with unique ids that will not be the same\n\n @classmethod\n def init_class_fixtures(cls):\n super().init_class_fixtures()\n\n register('test', lambda *args: None)\n cls.add_class_callback(partial(unregister, 'test'))\n\n with tarfile.open(test_resource_path('example_data.tar.gz')) as tar:\n tar.extractall(cls.tmpdir.path)\n\n cls.expected_perf = dataframe_cache(\n cls.tmpdir.getpath(\n 'example_data/expected_perf/%s' %\n pd.__version__.replace('.', '-'),\n ),\n serialization='pickle',\n )\n\n cls.no_benchmark_expected_perf = {\n example_name: cls._no_benchmark_expectations_applied(\n expected_perf.copy()\n )\n for example_name, expected_perf in cls.expected_perf.items()\n }\n\n @staticmethod\n def _no_benchmark_expectations_applied(expected_perf):\n # With no benchmark, expect zero results for these metrics:\n expected_perf[['alpha', 'beta']] = None\n for col in ['benchmark_period_return', 'benchmark_volatility']:\n expected_perf.loc[\n ~pd.isnull(expected_perf[col]),\n col,\n ] = 0.0\n return expected_perf\n\n @parameter_space(\n example_name=sorted(EXAMPLE_MODULES),\n benchmark_returns=[read_checked_in_benchmark_data(), None]\n )\n def test_example(self, example_name, benchmark_returns):\n actual_perf = examples.run_example(\n EXAMPLE_MODULES,\n example_name,\n # This should match the invocation in\n # zipline/tests/resources/rebuild_example_data\n environ={\n 'ZIPLINE_ROOT': self.tmpdir.getpath('example_data/root'),\n },\n benchmark_returns=benchmark_returns,\n )\n if benchmark_returns is not None:\n expected_perf = self.expected_perf[example_name]\n else:\n expected_perf = self.no_benchmark_expected_perf[example_name]\n\n # Exclude positions column as the positions do not always have the\n # same order\n columns = [column for column in examples._cols_to_check\n if column != 'positions']\n assert_equal(\n actual_perf[columns],\n expected_perf[columns],\n # There is a difference in the datetime columns in pandas\n # 0.16 and 0.17 because in 16 they are object and in 17 they are\n # datetime[ns, UTC]. We will just ignore the dtypes for now.\n check_dtype=False,\n )\n # Sort positions by SID before comparing\n assert_equal(\n expected_perf['positions'].apply(sorted, key=itemgetter('sid')),\n actual_perf['positions'].apply(sorted, key=itemgetter('sid')),\n )\n" ]
[ [ "matplotlib.use", "pandas.__version__.replace", "pandas.isnull" ] ]
PwLo3K46/vivit
[ "937642975be2ade122632d4eaef273461992d7ab" ]
[ "exp/exp02_noise_analysis/run_noise.py" ]
[ "\"\"\"Load saved models and evaluate noise.\"\"\"\n\nimport os\nfrom collections import defaultdict\n\nimport torch\nfrom backpack import backpack, extend\nfrom backpack.core.derivatives.convnd import weight_jac_t_save_memory\nfrom deepobs.pytorch.testproblems import cifar100_allcnnc\nfrom shared import (\n CONFIGURATIONS,\n criterion,\n full_batch_exact,\n get_noise_savepath,\n load_checkpoint_data,\n one_group,\n)\n\nfrom exp.utils.deepobs import get_deepobs_architecture\nfrom exp.utils.path import write_to_json\n\n\ndef get_mini_batch(problem_cls, N):\n \"\"\"Draw mini batch on which noise is computed.\"\"\"\n _, _, X, y = get_deepobs_architecture(problem_cls, N)\n\n return X, y\n\n\ndef compute_noise(model, loss_func, X, y, param_groups, computations):\n \"\"\"Compute GGN eigenvalues, first- and second-order directional derivatives.\"\"\"\n model.zero_grad()\n\n loss = loss_func(model(X), y)\n\n with backpack(\n *computations.get_extensions(param_groups),\n extension_hook=computations.get_extension_hook(\n param_groups,\n keep_gram_mat=False,\n keep_gram_evals=True,\n keep_gram_evecs=False,\n keep_gammas=True,\n keep_lambdas=True,\n keep_batch_size=False,\n keep_backpack_buffers=False,\n ),\n ), weight_jac_t_save_memory(save_memory=True):\n loss.backward()\n\n grad_norm = {}\n for group in param_groups:\n grad_flat = torch.cat([p.grad.flatten() for p in group[\"params\"]])\n grad_norm[id(group)] = grad_flat.norm()\n\n return {\n \"gram_evals\": computations._gram_evals,\n \"gammas\": computations._gammas,\n \"lambdas\": computations._lambdas,\n \"grad_norm\": grad_norm,\n }\n\n\ndef run_noise(\n model,\n loss_func,\n X,\n y,\n device,\n param_groups_fn=one_group,\n computations_fn=full_batch_exact,\n):\n \"\"\"Run noise computation and return results.\"\"\"\n model = extend(model.to(device))\n loss_func = extend(loss_func.to(device))\n X = X.to(device)\n y = y.to(device)\n\n param_groups = param_groups_fn(model, criterion)\n computations = computations_fn(N)\n\n noise = compute_noise(model, loss_func, X, y, param_groups, computations)\n noise = convert_to_numpy(noise)\n print(noise)\n\n return noise\n\n\ndef convert_to_numpy(dictionary):\n \"\"\"Convert all torch tensors to numpy for JSON serialization.\"\"\"\n converted = {}\n\n for key in dictionary.keys():\n item = dictionary[key]\n\n if isinstance(item, dict):\n converted_item = convert_to_numpy(item)\n elif isinstance(item, torch.Tensor):\n converted_item = item.detach().cpu().numpy()\n else:\n raise ValueError(f\"Unknown conversion for key {key} with item {item}\")\n\n converted[key] = converted_item\n\n return converted\n\n\nif __name__ == \"__main__\":\n configurations = [config() for config in CONFIGURATIONS]\n\n for config in configurations:\n # use the same mini-batch for eval, gamma, and lambda at checkpoints\n\n optimizer_cls = config[\"optimizer_cls\"]\n problem_cls = config[\"problem_cls\"]\n savepath = get_noise_savepath(problem_cls, optimizer_cls)\n\n if os.path.exists(savepath):\n print(f\"[exp] File {savepath} already exists. Skipping computation.\")\n continue\n\n torch.manual_seed(0)\n N = config[\"batch_size\"]\n\n if problem_cls == cifar100_allcnnc:\n N = 64\n\n X, y = get_mini_batch(problem_cls, N)\n\n data = defaultdict(dict)\n\n for checkpoint in config[\"checkpoints\"]:\n checkpoint_data = load_checkpoint_data(\n checkpoint, optimizer_cls, problem_cls\n )\n\n model = checkpoint_data.pop(\"model\")\n loss_func = checkpoint_data.pop(\"loss_func\")\n device = torch.device(\"cpu\")\n\n epoch_count, batch_count = [int(count) for count in checkpoint]\n data[epoch_count][batch_count] = run_noise(model, loss_func, X, y, device)\n\n write_to_json(savepath, data)\n" ]
[ [ "torch.device", "torch.manual_seed" ] ]
RevanMacQueen/Riverswim-Variants
[ "a3593c6b2960185e1815b79aba5a2ccdb6ff9ea7" ]
[ "riverswim_variants/envs/stochastic_riverswim.py" ]
[ "import gym\nimport numpy as np\nfrom numpy.random import normal\n\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom gym.envs.toy_text import discrete\n\n\nLEFT = 0\nRIGHT = 1\n\ndef categorical_sample(prob_n, np_random):\n \"\"\"\n Sample from categorical distribution\n Each row specifies class probabilities\n \"\"\"\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()\n\n\nclass StochasticRiverSwimEnv(discrete.DiscreteEnv):\n \"\"\"\n Riverswim but rewards are now sampled according to a gaussian\n \"\"\"\n metadata = {'render.modes': ['human']}\n\n def __init__(self, nS=6):\n # Defining the number of actions\n nA = 2\n \n # Defining the reward system and dynamics of RiverSwim environment\n P, isd = self.__init_dynamics(nS, nA)\n \n super(StochasticRiverSwimEnv, self).__init__(nS, nA, P, isd)\n\n def __init_dynamics(self, nS, nA):\n \n # P[s][a] == [(probability, nextstate, reward, done), ...]\n P = {}\n for s in range(nS):\n P[s] = {a: [] for a in range(nA)}\n\n # Rewarded Transitions\n # NOTE: The rewards are functions, i.e. the generating distribution\n P[0][LEFT] = [(1., 0, lambda:normal(5/1000, 1), 0)]\n P[nS-1][RIGHT] = [(0.9, nS-1, lambda:normal(1, 1), 0), (0.1, nS-2, lambda:normal(1, 1), 0)]\n\n # Left Transitions\n for s in range(1, nS):\n P[s][LEFT] = [(1., max(0, s-1), lambda:normal(0, 0), 0)]\n\n # RIGHT Transitions\n for s in range(1, nS - 1):\n P[s][RIGHT] = [(0.3, min(nS - 1, s + 1), lambda:normal(0, 0), 0), (0.6, s, lambda:normal(0, 0), 0), (0.1, max(0, s-1), lambda:normal(0, 0), 0)]\n\n P[0][RIGHT] = [(0.3, 0, lambda:normal(0, 0), 0), (0.7, 1, lambda:normal(0, 0), 0)]\n\n # Starting State Distribution\n isd = np.zeros(nS)\n isd[0] = 1.\n\n return P, isd\n\n\n def step(self, a):\n transitions = self.P[self.s][a]\n i = categorical_sample([t[0] for t in transitions], self.np_random)\n p, s, r, d = transitions[i]\n r = r()\n self.s = s\n self.lastaction = a\n return (int(s), r, d, {\"prob\": p})\n\n\n def render(self, mode='human'):\n pass\n\n def close(self):\n pass\n" ]
[ [ "numpy.asarray", "numpy.random.normal", "numpy.zeros", "numpy.cumsum" ] ]
ramseylab/RTX
[ "e5782b874021f22cafb21658f8c688a865186eeb" ]
[ "code/reasoningtool/QuestionAnswering/Q2Solution.py" ]
[ "import numpy as np\nnp.warnings.filterwarnings('ignore')\nimport os\nimport ReasoningUtilities as RU\nimport requests_cache\nrequests_cache.install_cache('orangeboard')\nimport argparse\nfrom itertools import compress\nimport sys\nimport CustomExceptions\ntry:\n\timport QueryNCBIeUtils\nexcept ImportError:\n\tsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../kg-construction'))) # Go up one level and look for it\n\timport QueryNCBIeUtils\n\nQueryNCBIeUtils =QueryNCBIeUtils.QueryNCBIeUtils()\n\nimport FormatOutput\nimport networkx as nx\n\n\ndrug_to_disease_doid = dict()\ndisease_doid_to_description = dict()\nwith open(os.path.abspath('../../../data/q2/q2-drugandcondition-list-mapped.txt'), 'r') as fid:\n\ti = 0\n\tfor line in fid.readlines():\n\t\tif i == 0:\n\t\t\ti += 1\n\t\t\tcontinue\n\t\telse:\n\t\t\ti += 1\n\t\t\tline = line.strip()\n\t\t\tline_split = line.split('\\t')\n\t\t\tdrug = line_split[1].lower()\n\t\t\tdisease_doid = line_split[-1]\n\t\t\tdisease_descr = line_split[2]\n\t\t\tdrug_to_disease_doid[drug] = disease_doid\n\t\t\tdisease_doid_to_description[disease_doid] = disease_descr\n\n\ndef answerQ2(drug_name, disease_name, k, use_json=False, max_gd=1):\n\t\"\"\"\n\tFind the clinical outcome pathway connecting the drug to the disease\n\t:param drug_name: a name of a drug (node.id in the KG)\n\t:param disease_name: a name of a disease (node.id in the KG, eg DOID:)\n\t:param k: Number of paths to return (int)\n\t:param use_json: if you want the answers as JSON.\n\t:param max_gd: maximum value for google distance\n\t:return: Text answer\n\t\"\"\"\n\tresponse = FormatOutput.FormatResponse(2)\n\tif not RU.node_exists_with_property(drug_name, 'id'):\n\t\terror_message = \"Sorry, the drug %s is not yet in our knowledge graph.\" % drug_name\n\t\terror_code = \"DrugNotFound\"\n\t\tif not use_json:\n\t\t\tprint(error_message)\n\t\t\treturn 1\n\t\telse:\n\t\t\tresponse.add_error_message(error_code, error_message)\n\t\t\tresponse.print()\n\t\t\treturn 1\n\tif not RU.node_exists_with_property(disease_name, 'id'):\n\t\terror_message = \"Sorry, the disease %s is not yet in our knowledge graph.\" % disease_name\n\t\terror_code = \"DiseaseNotFound\"\n\t\tif not use_json:\n\t\t\tprint(error_message)\n\t\t\treturn 1\n\t\telse:\n\t\t\tresponse.add_error_message(error_code, error_message)\n\t\t\tresponse.print()\n\t\t\treturn 1\n\n\t# TODO: could dynamically get the terminal node label as some are (drug, phenotype) pairs\n\t# get the relevant subgraph between the source and target nodes\n\ttry: # First look for COP's where the gene is associated to the disease\n\t\t#g = RU.return_subgraph_through_node_labels(drug_name, 'chemical_substance', disease_name, 'disease',\n\t\t#\t\t\t\t\t\t\t\t\t\t\t['protein', 'anatomical_entity', 'phenotypic_feature'],\n\t\t#\t\t\t\t\t\t\t\t\t\t\twith_rel=['protein', 'gene_associated_with_condition', 'disease'],\n\t\t#\t\t\t\t\t\t\t\t\t\t\tdirected=False)\n\t\tg = RU.return_subgraph_through_node_labels(drug_name, 'chemical_substance', disease_name, 'disease',\n\t\t\t\t\t\t\t\t\t\t\t\t ['protein', 'anatomical_entity', 'phenotypic_feature'],\n\t\t\t\t\t\t\t\t\t\t\t\t directed=False)\n\texcept CustomExceptions.EmptyCypherError:\n\t\ttry: # Then look for any sort of COP\n\t\t\tg = RU.return_subgraph_through_node_labels(drug_name, 'chemical_substance', disease_name, 'disease',\n\t\t\t\t\t\t\t\t\t\t\t\t\t['protein', 'anatomical_entity', 'phenotypic_feature'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tdirected=False)\n\t\texcept CustomExceptions.EmptyCypherError:\n\t\t\ttry: # Then look for any sort of connection between source and target\n\t\t\t\tg = RU.get_shortest_subgraph_between_nodes(drug_name, 'chemical_substance', disease_name, 'disease',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmax_path_len=4, limit=50, debug=False, directed=False)\n\t\t\texcept CustomExceptions.EmptyCypherError:\n\t\t\t\terror_code = \"NoPathsFound\"\n\t\t\t\ttry:\n\t\t\t\t\terror_message = \"Sorry, I could not find any paths connecting %s to %s via protein, pathway, \"\\\n\t\t\t\t\t\t\"tissue, and phenotype. The drug and/or disease may not be one of the entities I know about, or they \"\\\n\t\t\t\t\t\t\"do not connect via a known pathway, tissue, and phenotype (understudied)\" % (\n\t\t\t\t\t\tRU.get_node_property(drug_name, 'name'), RU.get_node_property(disease_name, 'name'))\n\t\t\t\texcept:\n\t\t\t\t\terror_message = \"Sorry, I could not find any paths connecting %s to %s via protein, pathway, \"\\\n\t\t\t\t\t\t\"tissue, and phenotype. The drug and/or disease may not be one of the entities I know about, or they \"\\\n\t\t\t\t\t\t\"do not connect via a known pathway, tissue, and phenotype (understudied)\" % (RU.get_node_property(drug_name, 'name'), RU.get_node_property(disease_name, 'name'))\n\t\t\t\tif not use_json:\n\t\t\t\t\tprint(error_message)\n\t\t\t\t\treturn 1\n\t\t\t\telse:\n\t\t\t\t\tresponse.add_error_message(error_code, error_message)\n\t\t\t\t\tresponse.print()\n\t\t\t\t\treturn 1\n\t# Decorate with normalized google distance\n\tdisease_descr = RU.get_node_property(disease_name, 'name')\n\t# include context in the google distance TODO: this may not actually help things... need to test\n\tRU.weight_graph_with_google_distance(g, context_node_id=disease_name, context_node_descr=disease_descr, default_value=max_gd)\n\n\t# Decorate with drug binding probability (1-x since these will be multiplicatively merged)\n\t#RU.weight_graph_with_property(g, 'probability', transformation=lambda x: 1-x, default_value=2)\n\tmax_prob_weight = 100\n\tRU.weight_graph_with_property(g, 'probability', transformation=lambda x: min(1/float(x), max_prob_weight), default_value=max_prob_weight)\n\n\t# Combine the properties\n\tRU.merge_graph_properties(g, ['gd_weight', 'probability'], 'merged', operation=lambda x,y: x*y)\n\n\t# Get the top k paths\n\tnode_paths, edge_paths, weights = RU.get_top_shortest_paths(g, drug_name, disease_name, k, property='merged')\n\tactual_k = len(weights) # since there may be less than k paths\n\n\t# For each of these paths, connect the protein to a pathway\n\t# First, grab the proteins and locations\n\tproteins_per_path = []\n\tproteins_per_path_locations = []\n\tfor path in node_paths:\n\t\tfor i, node in enumerate(path):\n\t\t\tif \"protein\" in node[\"labels\"]:\n\t\t\t\tproteins_per_path.append(node)\n\t\t\t\tproteins_per_path_locations.append(i)\n\t\t\t\tbreak\n\n\t# Connect a reactome pathway to the proteins (only for the first seen protein in each path)\n\tpathways_per_path = []\n\tfor protein in proteins_per_path:\n\t\tpathways = RU.get_one_hop_target('protein', protein['names'], 'pathway', 'participates_in')\n\t\tpathways_per_path.append(pathways)\n\n\t# Delete those elements that don't have a reactome pathway\n\tbad_paths = []\n\tfor i, pathways in enumerate(pathways_per_path):\n\t\tif not pathways:\n\t\t\tbad_paths.append(i)\n\tfor i in reversed(bad_paths):\n\t\tdel node_paths[i]\n\t\tdel edge_paths[i]\n\t\tdel weights[i]\n\t\tdel proteins_per_path[i]\n\t\tdel proteins_per_path_locations[i]\n\t\tdel pathways_per_path[i]\n\n\t# Look for the pathway that has both a small GD between protein and disease\n\tbest_pathways_per_path = []\n\tbest_pathways_per_path_gd = []\n\tdisease_common_name = RU.get_node_property(disease_name, 'name', node_label='disease')\n\tfor j, pathways in enumerate(pathways_per_path):\n\t\tsmallest_gd = np.inf\n\t\tbest_pathway = \"\"\n\t\tfor pathway in pathways:\n\t\t\tprotein_pathway_gd = QueryNCBIeUtils.normalized_google_distance(\n\t\t\t\tQueryNCBIeUtils.get_uniprot_names(proteins_per_path[j]['names']),\n\t\t\t\tQueryNCBIeUtils.get_reactome_names(pathway),\n\t\t\t\tmesh1=False, mesh2=False)\n\t\t\tif np.isnan(protein_pathway_gd):\n\t\t\t\tprotein_pathway_gd = max_gd\n\n\t\t\tpathway_disease_gd = QueryNCBIeUtils.normalized_google_distance(disease_common_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tQueryNCBIeUtils.get_reactome_names(pathway),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmesh1=True, mesh2=False)\n\t\t\tif np.isnan(pathway_disease_gd):\n\t\t\t\tpathway_disease_gd = max_gd\n\n\t\t\tif protein_pathway_gd + pathway_disease_gd < smallest_gd:\n\t\t\t\tsmallest_gd = protein_pathway_gd + pathway_disease_gd\n\t\t\t\tbest_pathway = pathway\n\t\tbest_pathways_per_path.append(best_pathway)\n\t\tbest_pathways_per_path_gd.append(smallest_gd)\n\n\t# Delete those elements that don't have a best reactome pathway\n\tbad_paths = []\n\tfor i, pathways in enumerate(best_pathways_per_path):\n\t\tif not pathways:\n\t\t\tbad_paths.append(i)\n\tfor i in reversed(bad_paths):\n\t\tdel node_paths[i]\n\t\tdel edge_paths[i]\n\t\tdel weights[i]\n\t\tdel proteins_per_path[i]\n\t\tdel proteins_per_path_locations[i]\n\t\tdel pathways_per_path[i]\n\t\tdel best_pathways_per_path[i]\n\t\tdel best_pathways_per_path_gd[i]\n\n\t# Insert the best pathway into the node_path\n\tfor i in range(len(node_paths)):\n\t\tbest_pathway = best_pathways_per_path[i]\n\t\t# Convert the pathway name to a graph and grab the resulting data\n\t\tgraph = RU.get_node_as_graph(best_pathway)\n\t\tbest_pathway_with_node_data = list(graph.nodes(data=True)).pop()[1]\n\t\t# same for the edge\n\t\tgraph = RU.get_shortest_subgraph_between_nodes(proteins_per_path[i][\"names\"], \"protein\", best_pathway,\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"pathway\", max_path_len=1, limit=1, directed=False)\n\t\tedge_data = list(graph.edges(data=True)).pop()[2]\n\t\tbest_pathway_gd = best_pathways_per_path_gd[i]\n\t\tprotein_location = proteins_per_path_locations[i]\n\t\tnode_paths[i].insert(protein_location + 1, best_pathway_with_node_data)\n\t\tedge_paths[i].insert(protein_location, edge_data)\n\t\tweights[i] += best_pathway_gd\n\n\t# resort the paths\n\tnode_paths = [x for _, x in sorted(zip(weights, node_paths), key=lambda pair: pair[0])]\n\tedge_paths = [x for _, x in sorted(zip(weights, edge_paths), key=lambda pair: pair[0])]\n\tweights.sort()\n\n\t# Then display the results....\n\tif not use_json:\n\t\tprint(\"The possible clinical outcome pathways include: \")\n\t\tfor path_ind in range(len(node_paths)):\n\t\t\tnode_path = node_paths[path_ind]\n\t\t\tedge_path = edge_paths[path_ind]\n\t\t\tto_print = \"\"\n\t\t\tfor node_index in range(len(node_path)):\n\t\t\t\tto_print += \" (\" + str(node_path[node_index]['names']) + \",\" + str(node_path[node_index]['properties']['name']) + \")\"\n\t\t\t\tif node_index < len(edge_path):\n\t\t\t\t\tto_print += \" -[\" + str(edge_path[node_index]['type']) + \"]-\"\n\t\t\t#to_print += \". Distance (smaller is better): %f.\" % weights[path_ind]\n\t\t\tto_print += \". Confidence (larger is better): %f.\" % (1-weights[path_ind]/float(len(edge_path)*max_gd*max_prob_weight))\n\t\t\tprint(to_print)\n\telse: # you want the result object model\n\t\tfor path_ind in range(len(node_paths)):\n\t\t\t# Format the free text portion\n\t\t\tnode_path = node_paths[path_ind]\n\t\t\tedge_path = edge_paths[path_ind]\n\t\t\tto_print = \"\"\n\t\t\tfor node_index in range(len(node_path)):\n\t\t\t\tto_print += \" \" + str(node_path[node_index]['properties']['name'])\n\t\t\t\tif node_index < len(edge_path):\n\t\t\t\t\tto_print += \" -\" + str(edge_path[node_index]['type']) + \"->\"\n\t\t\t#to_print += \". Distance (smaller is better): %f.\" % weights[path_ind]\n\t\t\tconf = 1-weights[path_ind]/float(len(edge_path)*max_gd*max_prob_weight)\n\t\t\tto_print += \". Confidence (larger is better): %f.\" % conf\n\t\t\t# put the nodes/edges into a networkx graph\n\t\t\tg = nx.Graph()\n\t\t\tnodes_to_add = []\n\t\t\tedges_to_add = []\n\t\t\tfor node in node_path:\n\t\t\t\tnodes_to_add.append((node['properties']['UUID'], node))\n\t\t\tfor edge in edge_path:\n\t\t\t\tedges_to_add.append((edge['properties']['source_node_uuid'], edge['properties']['target_node_uuid'], edge))\n\t\t\tg.add_nodes_from(nodes_to_add)\n\t\t\tg.add_edges_from(edges_to_add)\n\t\t\t# populate the response. Quick hack to convert\n\t\t\t#response.add_subgraph(g.nodes(data=True), g.edges(data=True), to_print, 1-weights[path_ind]/float(max([len(x) for x in edge_paths])*max_gd))\n\t\t\tresponse.add_subgraph(g.nodes(data=True), g.edges(data=True), to_print, conf)\n\t\tresponse.add_neighborhood_graph(g.nodes(data=True), g.edges(data=True),\tconfidence=None) # Adding the neighborhood graph\n\t\tresponse.print()\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description=\"Runs the reasoning tool on Question 2\",\n\t\t\t\t\t\t\t\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('-r', '--drug', type=str, help=\"Input drug (name in the graph, eg. 'CHEMBL154' (naproxen))\", default='ChEMBL:154')\n\tparser.add_argument('-d', '--disease', type=str, help=\"Input disease (Identifier in the graph, eg 'DOID:8398')\", default='DOID:8398')\n\tparser.add_argument('-a', '--all', action=\"store_true\", help=\"Flag indicating you want to run it on all Q2 drugs + diseases\",\n\t\t\t\t\t\tdefault=False)\n\tparser.add_argument('-k', '--kpaths', type=int, help=\"Number of paths to return.\", default=10)\n\tparser.add_argument('-j', '--json', action=\"store_true\", help=\"Flag indicating you want the results in JSON format.\", default=False)\n\n\tif '-h' in sys.argv or '--help' in sys.argv:\n\t\tRU.session.close()\n\t\tRU.driver.close()\n\n\t# Parse and check args\n\targs = parser.parse_args()\n\tdrug = args.drug\n\tdisease = args.disease\n\tall_d = args.all\n\tk = args.kpaths\n\tuse_json = args.json\n\n\tif all_d:\n\t\tfor i, drug in enumerate(list(drug_to_disease_doid.keys())):\n\t\t\tdisease = drug_to_disease_doid[drug] # doid\n\t\t\tdisease_description = disease_doid_to_description[disease] # disease description\n\t\t\tprint(\"\\n\")\n\t\t\tprint((drug, disease_description, disease))\n\t\t\tprint(i)\n\t\t\tres = answerQ2(drug, disease, k, use_json)\n\telse:\n\t\tres = answerQ2(drug, disease, k, use_json)\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n" ]
[ [ "numpy.isnan", "numpy.warnings.filterwarnings" ] ]
YueNing/tn_source_code
[ "515713c9349a2444021fdc9b02fd483f5ffd3e56", "515713c9349a2444021fdc9b02fd483f5ffd3e56" ]
[ "drl_negotiation/a2c/distributions.py", "drl_negotiation/core.py" ]
[ "import numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python.ops import math_ops\nfrom gym import spaces\nimport drl_negotiation.utils as U\nfrom stable_baselines.common.tf_layers import linear\n\n\nclass ProbabilityDistribution(object):\n \"\"\"\n Base class for describing a probability distribution.\n \"\"\"\n def __init__(self):\n super(ProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n \"\"\"\n Return the direct probabilities\n\n :return: ([float]) the probabilities\n \"\"\"\n raise NotImplementedError\n\n def mode(self):\n \"\"\"\n Returns the probability\n\n :return: (Tensorflow Tensor) the deterministic action\n \"\"\"\n raise NotImplementedError\n\n def neglogp(self, x):\n \"\"\"\n returns the of the negative log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The negative log likelihood of the distribution\n \"\"\"\n # Usually it's easier to define the negative logprob\n raise NotImplementedError\n\n def kl(self, other):\n \"\"\"\n Calculates the Kullback-Leibler divergence from the given probability distribution\n\n :param other: ([float]) the distribution to compare with\n :return: (float) the KL divergence of the two distributions\n \"\"\"\n raise NotImplementedError\n\n def entropy(self):\n \"\"\"\n Returns Shannon's entropy of the probability\n\n :return: (float) the entropy\n \"\"\"\n raise NotImplementedError\n\n def sample(self):\n \"\"\"\n returns a sample from the probability distribution\n\n :return: (Tensorflow Tensor) the stochastic action\n \"\"\"\n raise NotImplementedError\n\n def logp(self, x):\n \"\"\"\n returns the of the log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The log likelihood of the distribution\n \"\"\"\n return - self.neglogp(x)\n\n\nclass ProbabilityDistributionType(object):\n \"\"\"\n Parametrized family of probability distributions\n \"\"\"\n\n def probability_distribution_class(self):\n \"\"\"\n returns the ProbabilityDistribution class of this type\n\n :return: (Type ProbabilityDistribution) the probability distribution class associated\n \"\"\"\n raise NotImplementedError\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n Returns the probability distribution from flat probabilities\n flat: flattened vector of parameters of probability distribution\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n \"\"\"\n returns the probability distribution from latent values\n\n :param pi_latent_vector: ([float]) the latent pi values\n :param vf_latent_vector: ([float]) the latent vf values\n :param init_scale: (float) the initial scale of the distribution\n :param init_bias: (float) the initial bias of the distribution\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n raise NotImplementedError\n\n def param_shape(self):\n \"\"\"\n returns the shape of the input parameters\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_shape(self):\n \"\"\"\n returns the shape of the sampling\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_dtype(self):\n \"\"\"\n returns the type of the sampling\n\n :return: (type) the type\n \"\"\"\n raise NotImplementedError\n\n def param_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the input parameters\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.placeholder(dtype=tf.float32, shape=prepend_shape + self.param_shape(), name=name)\n\n def sample_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the sampling\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape + self.sample_shape(), name=name)\n\n\nclass CategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_cat):\n \"\"\"\n The probability distribution type for categorical input\n\n :param n_cat: (int) the number of categories\n \"\"\"\n self.n_cat = n_cat\n\n def probability_distribution_class(self):\n return CategoricalProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.n_cat]\n\n def sample_shape(self):\n return []\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass MultiCategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_vec):\n \"\"\"\n The probability distribution type for multiple categorical input\n\n :param n_vec: ([int]) the vectors\n \"\"\"\n # Cast the variable because tf does not allow uint32\n self.n_vec = n_vec.astype(np.int32)\n # Check that the cast was valid\n assert (self.n_vec > 0).all(), \"Casting uint32 to int32 was invalid\"\n\n def probability_distribution_class(self):\n return MultiCategoricalProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n return MultiCategoricalProbabilityDistribution(self.n_vec, flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [sum(self.n_vec)]\n\n def sample_shape(self):\n return [len(self.n_vec)]\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass DiagGaussianProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for multivariate Gaussian input\n\n :param size: (int) the number of dimensions of the multivariate gaussian\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return DiagGaussianProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n returns the probability distribution from flat probabilities\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n mean = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())\n pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), mean, q_values\n\n def param_shape(self):\n return [2 * self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.float32\n\n\nclass BernoulliProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for Bernoulli input\n\n :param size: (int) the number of dimensions of the Bernoulli distribution\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return BernoulliProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.int32\n\n\nclass CategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from categorical input\n\n :param logits: ([float]) the categorical logits input\n \"\"\"\n self.logits = logits\n super(CategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.argmax(self.logits, axis=-1)\n\n def neglogp(self, x):\n # Note: we can't use sparse_softmax_cross_entropy_with_logits because\n # the implementation does not allow second-order derivatives...\n one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])\n return tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=self.logits,\n labels=tf.stop_gradient(one_hot_actions))\n\n def kl(self, other):\n a_0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)\n a_1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n exp_a_1 = tf.exp(a_1)\n z_0 = tf.reduce_sum(exp_a_0, axis=-1, keepdims=True)\n z_1 = tf.reduce_sum(exp_a_1, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(p_0 * (a_0 - tf.log(z_0) - a_1 + tf.log(z_1)), axis=-1)\n\n def entropy(self):\n a_0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n z_0 = tf.reduce_sum(exp_a_0, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(p_0 * (tf.log(z_0) - a_0), axis=-1)\n\n def sample(self):\n # Gumbel-max trick to sample\n # a categorical distribution (see http://amid.fish/humble-gumbel)\n uniform = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)\n return tf.argmax(self.logits - tf.log(-tf.log(uniform)), axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the categorical logits input\n :return: (ProbabilityDistribution) the instance from the given categorical input\n \"\"\"\n return cls(flat)\n\n\nclass MultiCategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, nvec, flat):\n \"\"\"\n Probability distributions from multicategorical input\n\n :param nvec: ([int]) the sizes of the different categorical inputs\n :param flat: ([float]) the categorical logits input\n \"\"\"\n self.flat = flat\n self.categoricals = list(map(CategoricalProbabilityDistribution, tf.split(flat, nvec, axis=-1)))\n super(MultiCategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n return tf.stack([p.mode() for p in self.categoricals], axis=-1)\n\n def neglogp(self, x):\n return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])\n\n def kl(self, other):\n return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])\n\n def entropy(self):\n return tf.add_n([p.entropy() for p in self.categoricals])\n\n def sample(self):\n return tf.stack([p.sample() for p in self.categoricals], axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the multi categorical logits input\n :return: (ProbabilityDistribution) the instance from the given multi categorical input\n \"\"\"\n raise NotImplementedError\n\n\nclass DiagGaussianProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, flat):\n \"\"\"\n Probability distributions from multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n \"\"\"\n self.flat = flat\n mean, logstd = tf.split(axis=len(flat.shape) - 1, num_or_size_splits=2, value=flat)\n self.mean = mean\n self.logstd = logstd\n self.std = tf.exp(logstd)\n super(DiagGaussianProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n # Bounds are taken into account outside this class (during training only)\n return self.mean\n\n def neglogp(self, x):\n return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \\\n + 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[-1], tf.float32) \\\n + tf.reduce_sum(self.logstd, axis=-1)\n\n def kl(self, other):\n assert isinstance(other, DiagGaussianProbabilityDistribution)\n return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) /\n (2.0 * tf.square(other.std)) - 0.5, axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)\n\n def sample(self):\n # Bounds are taken into acount outside this class (during training only)\n # Otherwise, it changes the distribution and breaks PPO2 for instance\n return self.mean + self.std * tf.random_normal(tf.shape(self.mean),\n dtype=self.mean.dtype)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n :return: (ProbabilityDistribution) the instance from the given multivariate Gaussian input data\n \"\"\"\n return cls(flat)\n\n\nclass BernoulliProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from Bernoulli input\n\n :param logits: ([float]) the Bernoulli input data\n \"\"\"\n self.logits = logits\n self.probabilities = tf.sigmoid(logits)\n super(BernoulliProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.round(self.probabilities)\n\n def neglogp(self, x):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=tf.cast(x, tf.float32)),\n axis=-1)\n\n def kl(self, other):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits,\n labels=self.probabilities), axis=-1) - \\\n tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def sample(self):\n samples_from_uniform = tf.random_uniform(tf.shape(self.probabilities))\n return tf.cast(math_ops.less(samples_from_uniform, self.probabilities), tf.float32)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new Bernoulli input\n\n :param flat: ([float]) the Bernoulli input data\n :return: (ProbabilityDistribution) the instance from the given Bernoulli input data\n \"\"\"\n return cls(flat)\n\n\nclass SoftCategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, ncat):\n self.ncat = ncat\n\n def proba_distribution_from_flat(self, flat):\n return self.probability_distribution_class()(flat)\n\n def probability_distribution_class(self):\n return SoftCategoricalProbabilityDistribution\n\n def param_shape(self):\n return [self.ncat]\n\n def sample_shape(self):\n return [self.ncat]\n\n def sample_dtype(self):\n return tf.float32\n\n\nclass SoftCategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n self.logits = logits\n def flatparam(self):\n return self.logits\n def mode(self):\n return tf.nn.softmax(self.logits, axis=-1)\n def logp(self, x):\n return -tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=x)\n def kl(self, other):\n a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)\n a1 = other.logits - U.max(other.logits, axis=1, keepdims=True)\n ea0 = tf.exp(a0)\n ea1 = tf.exp(a1)\n z0 = U.sum(ea0, axis=1, keepdims=True)\n z1 = U.sum(ea1, axis=1, keepdims=True)\n p0 = ea0 / z0\n return U.sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=1)\n def entropy(self):\n a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)\n ea0 = tf.exp(a0)\n z0 = U.sum(ea0, axis=1, keepdims=True)\n p0 = ea0 / z0\n return U.sum(p0 * (tf.log(z0) - a0), axis=1)\n def sample(self):\n u = tf.random_uniform(tf.shape(self.logits))\n return tf.nn.softmax(self.logits - tf.log(-tf.log(u)), axis=-1)\n @classmethod\n def fromflat(cls, flat):\n return cls(flat)\n\ndef make_pd_type(ac_space):\n \"\"\"\n return an instance of ProbabilityDistributionType for the correct type of action space\n\n :param ac_space: (Gym Space) the input action space\n :return: (ProbabilityDistributionType) the appropriate instance of a ProbabilityDistributionType\n \"\"\"\n if isinstance(ac_space, spaces.Box):\n assert len(ac_space.shape) == 1, \"Error: the action space must be a vector\"\n pd_type = DiagGaussianProbabilityDistributionType(ac_space.shape[0])\n elif isinstance(ac_space, spaces.Discrete):\n # pd_type = CategoricalProbabilityDistributionType(ac_space.n)\n pd_type = SoftCategoricalProbabilityDistributionType(ac_space.n)\n elif isinstance(ac_space, spaces.MultiDiscrete):\n pd_type = MultiCategoricalProbabilityDistributionType(ac_space.nvec)\n elif isinstance(ac_space, spaces.MultiBinary):\n pd_type = BernoulliProbabilityDistributionType(ac_space.n)\n else:\n raise NotImplementedError(\"Error: probability distribution, not implemented for action space of type {}.\"\n .format(type(ac_space)) +\n \" Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary.\")\n return pd_type\n\ndef shape_el(tensor, index):\n \"\"\"\n get the shape of a TensorFlow Tensor element\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param index: (int) the element\n :return: ([int]) the shape\n \"\"\"\n maybe = tensor.get_shape()[index]\n if maybe is not None:\n return maybe\n else:\n return tf.shape(tensor)[index]\n", "'''\n Core class, functions\n Author: naodongbanana\n E-Mail: n1085633848@outlook.com\n'''\nimport os, sys\nimport numpy as np\nfrom scml.scml2020 import SCML2020World, SCML2020Agent, is_system_agent\nfrom typing import Optional\nfrom drl_negotiation.hyperparameters import *\nimport yaml\nimport copy\nimport pickle\n\nclass AgentState:\n '''\n Agent state\n '''\n def __init__(self):\n # physical position for rendering\n self.p_pos = (0, 0)\n # others state\n self.o_negotiation_step = 0\n # financial report\n self.f: np.array = np.zeros(3)\n # self.f_init = 0\n # self.f_begin = 0\n # self.f_end = 0\n # current step\n # self.o_current_step = 0\n # management state, e.g. issues range\n # self.m = None\n # communication utterance\n self.c = None\n\nclass NegotiationRequestAction:\n DEFAULT_REQUEST = 0.0\n ACCEPT_REQUEST = 1.0\n REJECT_REQUEST = -1.0\n\nclass Action:\n '''\n agent's action\n m: management action\n e.g. discrete action --- accept or reject negotiation request\n continuous action --- range of issues for negotiating,\n (min, max, min, max, min, max)\n c: communication action\n e.g. send the info into public channel, secured, needs, negotiations, requests, \n or info of competitors predicted by agent\n '''\n def __init__(self):\n # agent management action, used after training, in test periode\n self.s = None\n self.s_vel = None\n\n # seller, used in training\n self.m = None\n self.m_vel = 10\n # buyer, used in training\n self.b = None\n self.b_vel = 10\n\n # agent communication action, communication channel\n self.c = None\n\nclass MySCML2020Agent(SCML2020Agent):\n '''\n My scml 2020 agent, subclass of scml2020agent,\n action_callback: action decided by the callback\n\n hook:\n init\n '''\n Owner = 'My'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # agents are manageable by default\n self.manageable = MANAGEABLE\n # cannot send communication signals\n self.silent = SLIENT\n # cannot observe the world\n self.blind = BLIND\n # management noise amount\n self.m_nois = None\n # communication noise amount\n self.c_nois = None\n # manageable range\n self.m_range = 1.0\n self.b_range = 1.0\n\n # state\n self.state = AgentState()\n # action\n self.action = Action()\n # heuristic behavior to execute\n self.action_callback = None\n # agents are interactive\n self.interative = False\n # agents are adversary\n self.adversary = False\n\n def init(self):\n super(MySCML2020Agent, self).init()\n\n @property\n def running_negotiations(self) -> [int, int]:\n \"\"\"\n\n Returns:\n number of runniing negotiations\n \"\"\"\n\n return self._count(super(MySCML2020Agent, self).running_negotiations)\n\n\n @property\n def negotiation_requests(self) -> [int, int]:\n \"\"\"\n\n Returns:\n number of standing negotiation requests, sell, buy\n \"\"\"\n return self._count(super(MySCML2020Agent, self).negotiation_requests)\n\n def _count(self, negotiations):\n sell = 0\n buy = 0\n for n in negotiations:\n if n.annotation[\"seller\"] == self.id:\n sell +=1\n elif n.annotation[\"buyer\"] == self.id:\n buy +=1\n return sell, buy\n \n def _get_obs(self, seller=True, scenario=\"scml\"):\n # local observation\n # TODO: different observation of buyer and seller, will be implemented here\n if scenario == \"scml\":\n \n o_m = self.awi.profile.costs\n o_m = o_m[:, self.awi.profile.processes]\n\n # agent information, agent's\n o_a = np.array([self._horizon])\n\n # catalog prices of products\n o_u_c = self.awi.catalog_prices\n # TODO: excepted value after predict\n o_u_e = np.array([self.expected_inputs, self.expected_outputs, self.input_cost, self.output_price])\n # TODO: trading strategy, needed and secured\n o_u_t = np.array([self.outputs_needed, self.outputs_secured, self.inputs_needed, self.inputs_secured])\n\n # running negotiation and negotiation request of agent\n o_q_n = np.array([\n self.running_negotiations,\n self.negotiation_requests,\n ])\n\n o_t_c = np.array([self.awi.current_step / self.awi.n_steps])\n\n # 2. Economic gap\n economic_gaps = []\n economic_gaps.append(self.state.f[2] - self.state.f[1])\n economic_gaps = np.array(economic_gaps)\n\n # return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c)\n\n return np.concatenate((economic_gaps.flatten(), o_m.flatten(), o_a, o_u_c, o_q_n.flatten(), o_t_c))\n\n def init(self):\n super(MySCML2020Agent, self).init()\n if RUNNING_IN_SCML2020World:\n if not self.train:\n self._setup_model()\n\n\n\nclass TrainWorld(SCML2020World):\n \"\"\"\n Multi-Agent, SCML world, used for training\n \"\"\"\n def __init__(self, configuration=None, *args, **kwargs):\n # maddpg drived agents, heuristic agents, script drived agents, interative agents\n # self.agents = []\n # SELLER, BUYER\n self.system_entities = []\n\n # communication channel dimensionality\n self.dim_c = 2\n # negotiation management dimensionality\n self.dim_m = DIM_M # seller\n self.dim_b = DIM_B # buyer\n\n # simulation timestep\n self.dt = 0.1\n \n # world done\n self.__done = False\n\n # set up the scml2020world\n if configuration is None:\n configuration = SCML2020World.generate(\n *args,\n **kwargs\n )\n\n self.configuration = copy.deepcopy(configuration)\n\n super().__init__(**self.configuration)\n # set action_callback for agent which hasnot it\n for agent in self.agents.values():\n if not hasattr(agent, 'action_callback'):\n if is_system_agent(agent.id):\n agent.action_callback = 'system'\n self.system_entities.append(agent)\n else:\n agent.action_callback = 'heuristic'\n \n if not hasattr(agent, 'interactive'):\n agent.interactive = False\n\n if not hasattr(agent, 'state'):\n agent.state = AgentState()\n\n @property\n def entities(self):\n '''\n agents + system_entities\n '''\n return [agent for agent in self.agents.values()]\n\n @property\n def policy_agents(self):\n '''\n e.g. maddpg drived agents,\n '''\n return [agent for agent in self.entities if agent.action_callback is None]\n \n @property\n def heuristic_agents(self):\n '''\n e.g. heuristic agents, BuyCheapSellExpensiveAgent\n '''\n return [agent for agent in self.entities if agent.action_callback=='heuristic']\n\n @property\n def interactive_agents(self):\n '''\n e.g. controlled by user\n '''\n return [agent for agent in self.entities if agent.interactive]\n \n @property\n def script_agents(self):\n '''\n My script-drived agents, with action_callback\n '''\n return [agent for agent in self.entities if callable(agent.action_callback)] \n\n def step(self):\n # actions of policy agents are preset in environement.\n\n # set actions for heuristic_agents\n # controlled by scripts\n # agents have action_callback\n for agent in self.script_agents:\n agent.action = agent.action_callback(agent, self)\n\n # simulation is already ends\n if self.time >= self.time_limit:\n self.__done = True\n return\n \n if not super().step():\n self.__done = True\n return \n \n # update agents' state\n # policy agents\n for agent in self.policy_agents:\n self.update_agent_state(agent)\n \n @property \n def world_done(self):\n '''\n running info of world\n '''\n return self.__done\n\n def update_agent_state(self, agent: Optional[MySCML2020Agent]):\n # initial update the state of\n if agent.awi.current_step == 0:\n f_init = [_.initial_balance for _ in self.factories if _.agent_id == agent.id][0]\n f_begin = f_init\n f_end = f_begin\n agent.state.f = np.array([f_init, f_begin, f_end])\n else:\n # set financial status\n if agent.blind:\n # agent.state.m = np.zeros(self.dim_m)\n agent.state.f = np.zeros(3)\n else:\n\n # update agent state, get the management state\n # qvalues = (1, agent.target_quantity(agent.state.o_step, agent.state.o_is_sell))\n # tvalues = agent._trange(agent.state.o_negotiation_step, agent.state.o_step)\n # uvalues = agent._urange(agent.state.o_step, agent.state.o_is_sell, tvalues)\n # agent.state.m = [qvalues, tvalues, uvalues]\n\n f_end = [_.current_balance for _ in self.factories if _.agent_id == agent.id][0]\n agent.state.f[2] = f_end\n\n #TODO: interactive test\n agent.state.o_negotiation_step = agent.awi.current_step\n\n if agent.state.o_negotiation_step == agent.awi.current_step:\n # after calculate the reward, then update the f_begin\n pass\n else:\n f_begin = f_end\n agent.state.f[1] = f_begin\n\n # set communication status\n if agent.silent:\n agent.state.c = np.zeros(self.dim_c)\n else:\n noise = np.random.randn(*agent.action.c.shape) * agent.c_nois if agent.c_nois else 0.0\n agent.state.c = agent.action.c + noise\n\n def save_config(self, file_name: str):\n dump_data = {\n \"agent_types\": [_._type_name() for _ in self.configuration['agent_types']],\n 'agent_params': self.configuration['agent_params'],\n \"n_steps\": self.n_steps\n }\n try:\n with open(file_name+'.yaml', \"w\") as file:\n yaml.safe_dump(dump_data, file)\n except FileNotFoundError as e:\n logging.info(f\"not find file {file_name}\")\n logging.error(str(e))\n os.makedirs('/'.join(file_name.split('/')[0:-1]))\n try:\n with open(file_name + '.yaml', \"w\") as file:\n yaml.safe_dump(dump_data, file)\n except FileNotFoundError as e:\n logging.info(f\"not find file {file_name}!\")\n logging.error(str(e))\n except Exception as e:\n logging.info(f\"other errors when open file {file_name}!\")\n logging.error(str(e))\n sys.exit(1)\n\n with open(file_name+'.pkl', 'wb') as file:\n pickle.dump(dump_data, file)\n # super().save_config(file_name=file_name)\n" ]
[ [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.shape", "tensorflow.python.ops.math_ops.less", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v1.round", "tensorflow.compat.v1.stop_gradient", "tensorflow.compat.v1.unstack", "numpy.log", "tensorflow.compat.v1.exp", "tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.square", "tensorflow.compat.v1.split", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.reduce_max", "tensorflow.compat.v1.log", "tensorflow.compat.v1.argmax", "tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits" ], [ "numpy.random.randn", "numpy.array", "numpy.zeros" ] ]
ArashHosseini/deep_dream
[ "4f5fd39a6d0f23ff1dd2e33953e72c0b909f8a7e" ]
[ "inception.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nclass Inception:\n\n input_image = \"input:0\"\n layer_names = [\"conv2d0\", \"conv2d1\", \"conv2d2\",\n \"mixed3a\", \"mixed3b\",\n \"mixed4a\", \"mixed4b\", \"mixed4c\", \"mixed4d\", \"mixed4e\",\n \"mixed5a\", \"mixed5b\"]\n\n def __init__(self):\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.gfile.FastGFile(\"inception/5h/tensorflow_inception_graph.pb\", \"rb\") as file:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(file.read())\n tf.import_graph_def(graph_def, name=\"\")\n self.input = self.graph.get_tensor_by_name(self.input_image)\n self.layers = [self.graph.get_tensor_by_name(name + \":0\") for name in self.layer_names]\n\n def get_feed_dict(self, image=None):\n image = np.expand_dims(image, axis=0)\n feed_dict = {self.input_image: image}\n return feed_dict\n\n def get_gradient(self, tensor):\n with self.graph.as_default():\n tensor = tf.square(tensor)\n tensor_mean = tf.reduce_mean(tensor)\n gradient = tf.gradients(tensor_mean, self.input)[0]\n return gradient\n" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.reduce_mean", "tensorflow.gradients", "tensorflow.square", "tensorflow.GraphDef", "tensorflow.gfile.FastGFile" ] ]
annakasprzik/qualle
[ "871f7fbce3d6d3da07fe7197cf21a5a68720645d" ]
[ "tests/features/test_confidence.py" ]
[ "# Copyright 2021 ZBW – Leibniz Information Centre for Economics\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pytest\n\nimport numpy as np\nfrom qualle.features.confidence import ConfidenceFeatures\n\n\n@pytest.fixture\ndef data():\n return [\n [0] * 2,\n [1] * 5,\n list(range(3)),\n list(range(3))[::-1],\n [1, 2, 4]\n ]\n\n\ndef test_transform_computes_all_features(data):\n cf = ConfidenceFeatures()\n features = cf.transform(data)\n assert type(features) == np.ndarray\n assert (features == np.vstack([\n [0] * 4,\n [1] * 4,\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [1, 7 / 3, 2, 8]\n ])).all()\n\n\ndef test_transform_empty_row_gets_zero_value_as_default():\n cf = ConfidenceFeatures()\n\n features = cf.transform([[], [1] * 5])\n assert type(features) == np.ndarray\n assert (features == np.vstack([\n [0] * 4,\n [1] * 4,\n ])).all()\n" ]
[ [ "numpy.vstack" ] ]
hajapy/Hexy
[ "06d920ac7882e41bbbd87a3b3e8131c2aa98dea5" ]
[ "hexy/hex_map.py" ]
[ "\"\"\"\nThis file contains routines for storing the properties of hexes\n\nThe keys are just the axial coordinates.\n\nKey Rules:\n- must be written 'a,b' without quotes\n- no spaces\n- a and b can be negative, so it is okay if the key is written as\n '-a,b' or 'a,-b' or '-a,-b'\n\"\"\"\nimport numpy as np\n\nfrom errors import IncorrectCoordinatesError, HexExistsError, MismatchError\nfrom hexy import *\n\n# The bases of the axial coordinate system\nbases_mat = cube_to_axial(np.array([SE, E], dtype=int))\n\n\ndef make_key_from_coordinates(indexes):\n \"\"\"\n Converts indexes to string for hashing\n :param indexes: the indexes of a hex. nx2, n=number of index pairs\n :return: key for hashing based on index.\n \"\"\"\n return [str(int(index[0])) + ',' + str(int(index[1])) for index in indexes]\n\n\ndef solve_for_indexes(hexes):\n \"\"\"\n We want to solve for the coefficients in the linear combos.\n :param hexes: The hexes whose indexes we want to solve for.\n nx2, n=number of hexes\n :return: indexes of `hexes`\n \"\"\"\n if hexes.shape[1] != 2:\n raise IncorrectCoordinatesError(\"Must be axial coordinates!\")\n return np.linalg.solve(bases_mat, hexes.T).T\n\n\nclass HexMap(dict):\n def __init__(self):\n super(HexMap, self).__init__()\n\n def __setitem__(self, coordinates, hex_objects):\n \"\"\"\n Assigns hex objects as values to coordinates as keys. The number of coordinates and hex objects\n should be equal.\n :param coordinates: Locations of hex objects.\n :param hex_objects: the hex objects themselves.\n :return: None\n \"\"\"\n if len(coordinates) != len(hex_objects):\n raise MismatchError(\"Number of coordinates does not match number of hex objects.\")\n\n keys = make_key_from_coordinates(coordinates)\n for key, hex in zip(keys, hex_objects):\n if key in self.keys():\n raise HexExistsError(\"key \" + key + \" already exists.\")\n\n super(HexMap, self).__setitem__(key, hex)\n\n def setitem_direct(self, key, value):\n if key in self.keys():\n raise HexExistsError(\"key \" + key + \" already exists.\")\n\n super(HexMap, self).__setitem__(key, value)\n\n def overwrite_entries(self, coordinates, hex):\n keys = make_key_from_coordinates(coordinates)\n for key in keys:\n super(HexMap, self).__setitem__(key, hex)\n\n def __delitem__(self, coordinates):\n if len(coordinates.shape) == 1:\n coordinates = np.array([coordinates])\n keys = make_key_from_coordinates(coordinates)\n for key in keys:\n if key in self.keys():\n super(HexMap, self).__delitem__(key)\n\n def __getitem__(self, coordinates):\n \"\"\"\n Retrieves hexes stores at `coordinates`\n :param coordinate: the locations used as keys for hexes. You can pass more than one coordinate\n :return: list of hexes mapped to using `coordinates`\n \"\"\"\n if len(coordinates.shape) == 1:\n coordinates = np.array([coordinates])\n keys = make_key_from_coordinates(coordinates)\n hexes = []\n for key in keys:\n if key in self.keys():\n hexes.append(super(HexMap, self).__getitem__(key))\n return hexes\n" ]
[ [ "numpy.array", "numpy.linalg.solve" ] ]
marek-cottingham/magSonify
[ "74edf69e6a89365d9a08935d58332bfcd8cfedc1" ]
[ "Example Code/Sonification Algorithm Diagrams/audiotsmWithDebugOutput/wsola.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe :mod:`audiotsm.wsola` module implements the WSOLA (Waveform\nSimilarity-based Overlap-Add) time-scale modification procedure.\n\nWSOLA works in the same way as OLA, with the exception that it allows slight\nshift of the position of the analysis frames.\n\"\"\"\n\nimport numpy as np\n\nfrom .base import AnalysisSynthesisTSM, Converter\nfrom .utils.windows import hanning\n\n\nclass WSOLAConverter(Converter):\n \"\"\"A Converter implementing the WSOLA (Waveform Similarity-based\n Overlap-Add) time-scale modification procedure.\"\"\"\n def __init__(self, channels, frame_length, synthesis_hop, tolerance):\n self._channels = channels\n self._frame_length = frame_length\n self._synthesis_hop = synthesis_hop\n self._tolerance = tolerance\n\n self._synthesis_frame = np.empty((channels, frame_length))\n self._natural_progression = np.empty((channels, frame_length))\n self._first = True\n self.STFT_DEBUG = 0\n\n def clear(self):\n self._first = True\n\n def convert_frame(self, analysis_frame):\n for k in range(0, self._channels):\n if self._first:\n delta = 0\n else:\n cross_correlation = np.correlate(\n analysis_frame[k, :-self._synthesis_hop],\n self._natural_progression[k])\n delta = np.argmax(cross_correlation)\n del cross_correlation\n\n # Copy the shifted analysis frame to the synthesis frame buffer\n np.copyto(self._synthesis_frame[k],\n analysis_frame[k, delta:delta + self._frame_length])\n\n # Save the natural progression (what the next synthesis frame would\n # be at normal speed)\n delta += self._synthesis_hop\n np.copyto(self._natural_progression[k],\n analysis_frame[k, delta:delta + self._frame_length])\n\n self._first = False\n\n return self._synthesis_frame\n\n\ndef wsola(channels, speed=1., frame_length=1024, analysis_hop=None,\n synthesis_hop=None, tolerance=None):\n \"\"\"Returns a :class:`~audiotsm.base.tsm.TSM` object implementing the WSOLA\n (Waveform Similarity-based Overlap-Add) time-scale modification procedure.\n\n In most cases, you should not need to set the ``frame_length``, the\n ``analysis_hop``, the ``synthesis_hop``, or the ``tolerance``. If you want\n to fine tune these parameters, you can check the documentation of the\n :class:`~audiotsm.base.analysis_synthesis.AnalysisSynthesisTSM` class to\n see what the first three represent.\n\n WSOLA works in the same way as OLA, with the exception that it allows\n slight shift (at most ``tolerance``) of the position of the analysis\n frames.\n\n :param channels: the number of channels of the input signal.\n :type channels: int\n :param speed: the speed ratio by which the speed of the signal will be\n multiplied (for example, if ``speed`` is set to 0.5, the output signal\n will be half as fast as the input signal).\n :type speed: float, optional\n :param frame_length: the length of the frames.\n :type frame_length: int, optional\n :param analysis_hop: the number of samples between two consecutive analysis\n frames (``speed * synthesis_hop`` by default). If ``analysis_hop`` is\n set, the ``speed`` parameter will be ignored.\n :type analysis_hop: int, optional\n :param synthesis_hop: the number of samples between two consecutive\n synthesis frames (``frame_length // 2`` by default).\n :type synthesis_hop: int, optional\n :param tolerance: the maximum number of samples that the analysis frame can\n be shifted.\n :type tolerance: int\n :returns: a :class:`audiotsm.base.tsm.TSM` object\n \"\"\"\n # pylint: disable=too-many-arguments\n if synthesis_hop is None:\n synthesis_hop = frame_length // 2\n\n if analysis_hop is None:\n analysis_hop = int(synthesis_hop * speed)\n\n if tolerance is None:\n tolerance = frame_length // 2\n\n analysis_window = None\n synthesis_window = hanning(frame_length)\n\n converter = WSOLAConverter(channels, frame_length, synthesis_hop,\n tolerance)\n\n return AnalysisSynthesisTSM(\n converter, channels, frame_length, analysis_hop, synthesis_hop,\n analysis_window, synthesis_window, tolerance,\n tolerance + synthesis_hop)\n" ]
[ [ "numpy.copyto", "numpy.correlate", "numpy.argmax", "numpy.empty" ] ]
Jhsmit/PyHDX
[ "34bf653743008508bb14f24ccca21ee39b5b25e3", "34bf653743008508bb14f24ccca21ee39b5b25e3" ]
[ "pyhdx/web/views.py", "pyhdx/web/controllers.py" ]
[ "import logging\nfrom itertools import groupby, count\n\nimport holoviews as hv\nimport pandas as pd\nimport panel as pn\nimport param\nfrom bokeh.models import HoverTool, Span, Rect, Whisker\nfrom bokeh.models.formatters import NumeralTickFormatter\nfrom bokeh.plotting import figure\nfrom lumen.filters import ParamFilter\nfrom lumen.views import hvPlotView, View\n\nfrom pyhdx.web.base import BokehFigurePanel, FigurePanel, MIN_BORDER_LEFT\nfrom pyhdx.web.widgets import LoggingMarkdown, NGL\n\n\nclass hvPlotAppView(hvPlotView):\n\n def get_data(self):\n # get data filter using pandas query syntax?\n try:\n data = super().get_data()\n except (KeyError, ValueError) as e:\n #print(f'Empty data in {self.__class__}: {e}')\n return self.empty_df\n\n #data = super().get_data()\n\n if data.size > 2: #todo fix this weird hack\n return data\n else:\n #print(f'got data but too small in {self.__class__}, ')\n return self.empty_df\n\n @property\n def empty_df(self):\n dic = {self.x: [], self.y: []}\n if 'c' in self.kwargs:\n dic[self.kwargs['c']] = []\n return pd.DataFrame(dic)\n\n\nclass hvRectangleAppView(View):\n\n opts = param.Dict(default={}, doc=\"HoloViews option to apply on the plot.\")\n\n view_type = 'rectangles'\n\n streaming = param.Boolean(default=False, doc=\"\"\"\n Whether to stream new data to the plot or rerender the plot.\"\"\")\n\n def __init__(self, **params):\n # import hvplot.pandas # noqa\n # if 'dask' in sys.modules:\n # try:\n # import hvplot.dask # noqa\n # except Exception:\n # pass\n self._stream = None\n self._linked_objs = []\n super().__init__(**params)\n\n def get_panel(self):\n kwargs = self._get_params()\n #interactive? https://github.com/holoviz/panel/issues/1824\n return pn.pane.HoloViews(**kwargs)\n\n def get_plot(self, df):\n \"\"\"\n Dataframe df must have columns x0, y0, x1, y1 (in this order) for coordinates\n bottom-left (x0, y0) and top right (x1, y1). Optionally a fifth value-column can be provided for colors\n\n Parameters\n ----------\n df\n\n Returns\n -------\n\n \"\"\"\n # processed = {}\n # for k, v in self.kwargs.items():\n # if k.endswith('formatter') and isinstance(v, str) and '%' not in v:\n # v = NumeralTickFormatter(format=v)\n # processed[k] = v\n # if self.streaming:\n # processed['stream'] = self._stream\n\n #hvplots stream? https://holoviews.org/user_guide/Streaming_Data.html\n\n# plot = hv.Rectangles([(0, 0, 1, 1), (2, 3, 4, 6), (0.5, 2, 1.5, 4), (2, 1, 3.5, 2.5)])\n\n processed = {}\n for k, v in self.kwargs.items():\n if k.endswith('formatter') and isinstance(v, str) and '%' not in v:\n v = NumeralTickFormatter(format=v)\n processed[k] = v\n if self.streaming:\n #processed['stream'] = self._stream\n\n plot = hv.DynamicMap(hv.Rectangles, streams=[self._stream])\n plot = plot.apply.opts(**self.opts) if self.opts else plot\n else:\n plot = hv.Rectangles(df)\n plot.opts(**self.opts) if self.opts else plot\n\n if self.selection_group or 'selection_expr' in self._param_watchers:\n plot = self._link_plot(plot)\n\n return plot\n\n def _get_params(self):\n df = self.get_data()\n if df is None:\n df = self.empty_df\n\n if self.streaming:\n from holoviews.streams import Pipe\n self._stream = Pipe(data=df)\n return dict(object=self.get_plot(df), sizing_mode='stretch_both') # todo update sizing mode\n\n def get_data(self):\n #todo uniformify this method for all views\n try:\n return super().get_data()\n except (KeyError, ValueError) as e:\n #print(f'Empty data in {self.__class__}: {e}')\n return None\n #return self.empty_df\n\n def update(self, *events, invalidate_cache=True):\n \"\"\"\n Triggers an update in the View.\n\n Parameters\n ----------\n events: tuple\n param events that may trigger an update.\n invalidate_cache : bool\n Whether to clear the View's cache.\n\n Returns\n -------\n stale : bool\n Whether the panel on the View is stale and needs to be\n rerendered.\n \"\"\"\n # Skip events triggered by a parameter change on this View\n own_parameters = [self.param[p] for p in self.param]\n own_events = events and all(\n isinstance(e.obj, ParamFilter) and\n (e.obj.parameter in own_parameters or\n e.new is self._ls.selection_expr)\n for e in events\n )\n if own_events:\n return False\n if invalidate_cache:\n self._cache = None\n if not self.streaming or self._stream is None:\n upd = self._update_panel()\n return upd\n if self.get_data() is not None:\n self._stream.send(self.get_data())\n return False\n\n\n @property\n def empty_df(self):\n return pd.DataFrame([[0] * 5], columns=['x0', 'x1', 'y0', 'y1', 'value'])\n\n\nclass NGLView(View):\n view_type = 'protein'\n\n #spin = param.Boolean(default=False)\n\n #js_files = {'ngl': \"https://cdn.jsdelivr.net/gh/arose/ngl@v2.0.0-dev.37/dist/ngl.js\"}\n\n def __init__(self, **params):\n super(NGLView, self).__init__(**params)\n self.ngl_view = NGL(sizing_mode='stretch_both')\n\n def get_panel(self):\n return self.ngl_view\n\n def update(self, *events, invalidate_cache=True):\n if invalidate_cache:\n self._cache = None\n data = self.get_data()\n if len(data.columns) > 1 or data.size < 1:\n # invalid number of columns\n self.ngl_view.color_list = [['white', \"*\"]]\n else:\n pd_series = data.iloc[:, 0]\n grp = pd_series.groupby(pd_series)\n\n color_list = []\n for c, pd_series in grp:\n result = [list(g) for _, g in groupby(pd_series.index, key=lambda n, c=count(): n - next(c))]\n\n resi = ' or '.join([f'{g[0]}-{g[-1]}' for g in result])\n color_list.append([c, resi])\n\n self.ngl_view.color_list = color_list\n\n # update panel?\n return self._update_panel()\n\n\nclass LoggingView(View):\n view_type = 'logging'\n\n logger = param.ClassSelector(logging.Logger, doc='Logger object to show in Log view')\n\n level = param.Integer(default=10, doc='Logging level of the streamhandler redirecting logs to the view')\n\n def __init__(self, *args, **params):\n super(LoggingView, self).__init__(**params)\n self.markdown = LoggingMarkdown('### Log Window \\n', sizing_mode='stretch_both')\n\n self.sh = logging.StreamHandler(self.markdown)\n self.sh.terminator = ' \\n'\n self.sh.setLevel(self.level)\n formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', \"%Y-%m-%d %H:%M:%S\")\n self.sh.setFormatter(formatter)\n self.logger.addHandler(self.sh)\n\n @param.depends('level', watch=True)\n def _level_updated(self):\n self.sh.setLevel(self.level)\n\n @property\n def panel(self):\n return self.markdown\n\n def update(self, *events, invalidate_cache=True):\n pass\n\n\nclass CoverageFigure(BokehFigurePanel):\n title = 'Coverage'\n accepted_tags = ['coverage']\n\n def __init__(self, *args, **params):\n super(CoverageFigure, self).__init__(*args, **params)\n\n def draw_figure(self):\n fig = figure(title=None, min_border=0, tools='pan,wheel_zoom,box_zoom,save,reset')\n fig.min_border_left = MIN_BORDER_LEFT\n fig.xaxis.axis_label = 'Residue number'\n\n return fig\n\n def render_sources(self, src_dict): #todo , **render_kwargs\n tooltips = [('Pos', '$x{int}'),\n ('Index', '@index'),\n ('Start', '@start (@_start)'),\n ('End', '@end (@_end)'),\n ('Sequence', '@sequence'),\n ('Score', '@scores'),\n ('Uptake', '@uptake (@uptake_corrected / @ex_residues, @maxuptake)')]\n\n for name, data_source in src_dict.items():\n glyph = Rect(x='x', y='y', width='width', height=1, fill_color='color')\n renderer = self.figure.add_glyph(data_source.source, glyph)\n self.renderers[name] = renderer\n\n hovertool = HoverTool(renderers=[renderer], tooltips=tooltips)\n self.figure.add_tools(hovertool)\n\n\nclass LinearLogFigure(BokehFigurePanel):\n \"\"\"base class for bokeh figure which can switch between log and linear axis\n This is a very slim base class (perhaps it should be a mixin?) (yes probably)\n and it should have a different name\n \"\"\"\n\n def _redraw_event(self, event):\n \"\"\"Redraw the figure with new kwargs passed to figure\"\"\"\n kwargs = {event.name: event.new.lower()}\n self.redraw(**kwargs)\n\n\nclass ThdFigure(LinearLogFigure):\n \"\"\"base class for figures extending LinearLogFigure with threshold lines\n parent must have ClassificationControl\n \"\"\"\n accepted_tags = ['mapping']\n\n # def __init__(self, parent, *args, **params):\n # super(ThdFigure, self).__init__(parent, *args, **params)\n\n def draw_figure(self, **kwargs):\n y_axis_type = kwargs.pop('y_axis_type', 'log')\n x_axis_type = kwargs.pop('x_axis_type', 'linear')\n fig = figure(y_axis_type=y_axis_type, x_axis_type=x_axis_type,\n tools='pan,wheel_zoom,box_zoom,save,reset', **kwargs)\n fig.min_border_left = MIN_BORDER_LEFT\n fig.xaxis.axis_label = 'Residue number'\n fig.yaxis.axis_label = self.y_label\n\n # todo refactor controller access to imporove upon current try/except and allow more controller agnostic behaviour\n try:\n for _ in range(self.control_panels['ClassificationControl'].param['num_colors'].bounds[1] - 1):\n sp = Span(location=0, dimension='width')\n sp.tags = ['thd']\n sp.visible = False\n fig.add_layout(sp)\n except KeyError:\n pass\n\n return fig\n\n def render_sources(self, src_dict, **render_kwargs):\n for name, data_source in src_dict.items():\n kwargs = {**data_source.render_kwargs, **render_kwargs}\n\n glyph_func = getattr(self.figure, data_source.renderer)\n renderer = glyph_func(source=data_source.source, name=name,\n legend_label=name, **kwargs) #todo size is being specified at two different places now\n\n self.renderers[name] = renderer\n hovertool = HoverTool(renderers=[renderer],\n tooltips=[('Residue', '@r_number{int}'), (self.y_label, f'@{kwargs[\"y\"]}')],\n mode='vline')\n self.figure.add_tools(hovertool)\n\n if self.renderers:\n self.figure.legend.click_policy = 'hide'\n\n def _draw_thds(self, *events):\n #todo duplicate code, subclass\n spans = self.figure.select(tags='thd')\n\n if not self.figure.renderers:\n return\n\n y_names, source_names = zip(*[(renderer.glyph.y, renderer.data_source.name) for renderer in self.figure.renderers])\n if not self.control_panels['ClassificationControl'].target in source_names:\n self._hide_thds()\n return\n if not self.control_panels['ClassificationControl'].quantity in y_names:\n self._hide_thds()\n return\n\n spans.sort(key=lambda x: x.id)\n for i, span in enumerate(spans):\n if i < len(self.control_panels['ClassificationControl'].values):\n span.location = self.control_panels['ClassificationControl'].values[i]\n span.visible = self.control_panels['ClassificationControl'].show_thds\n else:\n span.visible = False\n\n def _hide_thds(self):\n spans = self.figure.select(tags='thd')\n for i, span in enumerate(spans):\n span.visible = False\n\n\nclass RateFigure(ThdFigure):\n title = 'Rates'\n accepted_tags = [('mapping', 'rate')]\n y_label = 'Rate (min⁻¹)'\n\n def setup_hooks(self):\n super().setup_hooks()\n self.control_panels['ClassificationControl'].param.watch(self._draw_thds, ['values', 'show_thds'])\n\n def render_sources(self, src_dict, **render_kwargs):\n super().render_sources(src_dict, **render_kwargs)\n\n\nclass PFactFigure(ThdFigure):\n title = 'Protection Factors'\n accepted_tags = [('mapping', 'pfact')]\n y_label = 'Protection factor'\n\n def setup_hooks(self):\n #todo move to superclass?\n super().setup_hooks()\n self.control_panels['ClassificationControl'].param.watch(self._draw_thds, ['values', 'show_thds'])\n\n def render_sources(self, src_dict, **render_kwargs):\n super().render_sources(src_dict, y='pfact', **render_kwargs)\n\n\nclass DeltaGFigure(ThdFigure):\n title = 'DeltaG'\n accepted_tags = [('mapping', 'deltaG')]\n y_label = 'DeltaG (J/mol)'\n\n def setup_hooks(self):\n #todo move to superclass?\n super().setup_hooks()\n self.control_panels['ClassificationControl'].param.watch(self._draw_thds, ['values', 'show_thds'])\n\n def draw_figure(self, **kwargs):\n return super().draw_figure(y_axis_type='linear')\n\n def render_sources(self, src_dict, **render_kwargs):\n super().render_sources(src_dict, y='deltaG', **render_kwargs)\n\n #todo make sure that if a new deltaG get plotted the graph is redrawn\n for name, data_source in src_dict.items():\n if 'covariance' in data_source.source.data.keys():\n # y = data_source.source.data['deltaG']\n # cov = data_source.source.data['covariance']\n # data_source.source.data['__upper'] = y + cov\n # data_source.source.data['__lower'] = y - cov\n\n whiskers = Whisker(source=data_source.source, base='r_number', upper='__upper', lower='__lower')\n self.figure.add_layout(whiskers)\n\n # def _data_updated_callback(self, attr, old, new):\n # #get layouts and update the cds\n # # print(attr, old, new) # data, dict, propertyvalue columndata\n #\n #\n # if not np.allclose(old['deltaG'], new['deltaG']):\n # y = new['deltaG']\n # # x = data_source.source.data['r_number']\n # cov = new['covariance']\n #\n # new['__upper'] = y + cov\n # new['__lower'] = y - cov\n #\n # super()._data_updated_callback(attr, old, new)\n\n\nclass BinaryComparisonFigure(ThdFigure):\n title = 'Binary Comparison'\n accepted_tags = [('comparison', 'mapping')] # [ ('x' AND 'y') OR 'z' OR 'asdf']\n x_label = 'Residue number' # move these to _redraw_kwargs?\n y_label = 'Difference'\n\n def __init__(self, parent, *args, **params):\n super(BinaryComparisonFigure, self).__init__(parent, *args, **params)\n self._redraw_kwargs['y_axis_type'] = 'linear'\n\n @property\n def y_kwarg(self):\n try:\n plot_quantity = self.control_panels['ClassificationControl'].quantity\n if plot_quantity is not None:\n return plot_quantity\n except KeyError:\n pass\n return None\n\n def render_sources(self, src_dict, **render_kwargs):\n kwargs = {**self._render_kwargs, **render_kwargs}\n super().render_sources(src_dict, **kwargs)\n\n def setup_hooks(self):\n super().setup_hooks()\n #todo this should be resolved in some other way than the name\n try:\n self.control_panels['ClassificationControl'].param.watch(self._draw_thds, ['values', 'show_thds'])\n self.control_panels['ClassificationControl'].param.watch(self._log_space_updated, ['log_space'])\n self.control_panels['ClassificationControl'].param.watch(self._quantity_updated, ['quantity'])\n except KeyError:\n pass\n\n try:\n self.control_panels['ColoringControl'].param.watch(self._draw_thds, ['values', 'show_thds'])\n self.control_panels['ColoringControl'].param.watch(self._log_space_updated, ['log_space'])\n except KeyError:\n pass\n\n #todo group into one function?\n def _quantity_updated(self, event):\n self._render_kwargs['y'] = event.new\n self.y_label = event.new\n self.redraw(**self._redraw_kwargs) # todo auto pass kwargs?\n\n # def _target_updated(self, event): #event. cls, name, new, obj, old, type, what\n # #todo make redraw accept events\n # self._render_kwargs['target'] = event.new\n # self.redraw(**self._redraw_kwargs) # todo auto pass kwargs?\n\n def _log_space_updated(self, event):\n if event.new: # True, log space\n self._redraw_kwargs['y_axis_type'] = 'log'\n else:\n self._redraw_kwargs['y_axis_type'] = 'linear'\n self.redraw(**self._redraw_kwargs) # todo auto pass kwargs?\n\n def draw_figure(self, **kwargs):\n y_axis_type = kwargs.pop('y_axis_type', 'linear')\n return super().draw_figure(y_axis_type=y_axis_type, **kwargs)\n\n\nclass ScoresFigure(LinearLogFigure):\n title = 'Scores'\n accepted_tags = [('scores', 'mapping')]\n x_label = 'Residue number'\n y_label = 'Value'\n\n def render_sources(self, src_dict):\n for name, data_source in src_dict.items():\n for y_field in data_source.render_kwargs['y']:\n glyph_func = getattr(self.figure, data_source.renderer)\n kwargs = data_source.render_kwargs.copy()\n kwargs.pop('y')\n\n renderer = glyph_func(**kwargs, y=y_field, source=data_source.source, name=name,\n legend_label=f'{y_field}')\n\n self.renderers[name] = renderer\n # hovertool = HoverTool(renderers=[renderer],\n # tooltips=[('Residue', '@r_number{int}'), (self.y_label, f'@{data_source.render_kwargs[\"y\"]}')],\n # mode='vline')\n # self.figure.add_tools(hovertool)\n\n if self.renderers:\n self.figure.legend.click_policy = 'hide'\n\n\nclass SingleValueFigure(LinearLogFigure):\n title = 'Values'\n accepted_tags = [('comparison', 'mapping')]\n x_label = 'Residue number'\n y_label = 'Value'\n\n def render_sources(self, src_dict):\n for name, data_source in src_dict.items():\n for field, render_func in zip(['value1', 'value2'], ['triangle', 'square']):\n glyph_func = getattr(self.figure, render_func)\n kwargs = data_source.render_kwargs.copy()\n kwargs.pop('y')\n\n renderer = glyph_func(**kwargs, y=field, source=data_source.source, name=name,\n legend_label=name + f'_{field}')\n\n self.renderers[name] = renderer\n hovertool = HoverTool(renderers=[renderer],\n tooltips=[('Residue', '@r_number{int}'), (self.y_label, f'@{data_source.render_kwargs[\"y\"]}')],\n mode='vline')\n self.figure.add_tools(hovertool)\n\n if self.renderers:\n self.figure.legend.click_policy = 'hide'\n\n def draw_figure(self, **kwargs):\n y_axis_type = kwargs.pop('y_axis_type', 'linear')\n return super().draw_figure(y_axis_type=y_axis_type, **kwargs)\n\n\nclass SingleFigure(BinaryComparisonFigure):\n title = 'Values'\n y_label = 'Value'\n\n\nclass FitResultFigure(BokehFigurePanel):\n title = 'Fit Result'\n accepted_tags = ['uptake_curve']\n y_label = 'Uptake corrected'\n x_label = 'Time'\n\n def __init__(self, parent, *args, **params):\n super(FitResultFigure, self).__init__(parent, *args, **params)\n\n def setup_hooks(self):\n self.control_panels['FitResultControl'].param.watch(self._redraw_event, ['x_axis_type'])\n\n def _redraw_event(self, event):\n \"\"\"Redraw the figure with new kwargs passed to figure\"\"\"\n kwargs = {event.name: event.new.lower()}\n self.redraw(**kwargs)\n\n def draw_figure(self, **kwargs):\n fig = super().draw_figure(x_axis_type=self.control_panels['FitResultControl'].x_axis_type.lower())\n return fig\n\n def render_sources(self, src_dict, **render_kwargs):\n super().render_sources(src_dict)\n\n if self.renderers:\n self.figure.legend.location = \"bottom_right\"\n\n\nclass LoggingFigure(FigurePanel):\n title = 'Logging'\n\n def __init__(self, *args, **params):\n super(LoggingFigure, self).__init__(*args, **params)\n self.markdown = LoggingMarkdown('### Log Window \\n', sizing_mode='stretch_both')\n\n sh = logging.StreamHandler(self.markdown)\n sh.terminator = ' \\n'\n formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', \"%Y-%m-%d %H:%M:%S\")\n sh.setFormatter(formatter)\n #sh.setLevel(logging.DEBUG)\n self.parent.logger.addHandler(sh)\n\n def setup_hooks(self):\n # todo add function on base class for doing these things (with try/except) ?\n # Or the users has to be responsible and set this up correctly\n # if the hook is unwanted, the class should be subclassed with override on setup_hooks\n\n try:\n self.parent.control_panels['OptionsControl'].param.watch(self._update_log_level, ['log_level'])\n self.parent.control_panels['OptionsControl'].param.trigger('log_level')\n except KeyError:\n self.parent.logger.debug('Control panel OptionsControl not founc')\n\n def _update_log_level(self, event):\n self.parent.logger.setLevel(event.new)\n\n @property\n def panel(self):\n return self.markdown\n\n\nclass ImageFigure(LinearLogFigure):\n title = 'Image'\n\n accepted_tags = ['image']\n x_label = 'Residue number'\n y_label = 'Time (probably)'\n\n def draw_figure(self, **kwargs):\n figure = super().draw_figure()\n figure.x_range.range_padding = figure.y_range.range_padding = 0\n\n return figure\n\n def render_sources(self, src_dict, **render_kwargs):\n render_kwargs.pop('color', None)\n for name, data_source in src_dict.items():\n ds_kwargs = data_source.render_kwargs\n ds_kwargs.pop('color', None) # todo fix color winding up here int he first place\n renderer = self.figure.image_rgba(source=data_source.source, image='img', **ds_kwargs)\n hovertool = HoverTool(renderers=[renderer],\n tooltips=[(\"x\", \"$x\"), (\"y\", \"$y\"), (\"value\", \"@scores\")]\n )\n self.figure.add_tools(hovertool)\n #\n", "import operator\nimport urllib.request\nimport zipfile\nfrom collections import namedtuple\nfrom io import StringIO, BytesIO\nfrom pathlib import Path\n\nimport colorcet\nimport dask\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport panel as pn\nimport param\nfrom numpy.lib.recfunctions import append_fields\nfrom skimage.filters import threshold_multiotsu\n\nfrom pyhdx import VERSION_STRING\nfrom pyhdx.fileIO import read_dynamx, csv_to_protein, csv_to_dataframe, dataframe_to_stringio\nfrom pyhdx.fitting import fit_rates_weighted_average, fit_rates_half_time_interpolate, get_bounds, fit_gibbs_global, \\\n fit_gibbs_global_batch, PATIENCE, STOP_LOSS, EPOCHS, R1, R2, optimizer_defaults\nfrom pyhdx.models import PeptideMasterTable, HDXMeasurement, Protein, array_intersection\nfrom pyhdx.web.base import ControlPanel, DEFAULT_COLORS, DEFAULT_CLASS_COLORS\nfrom pyhdx.web.sources import DataSource, DataFrameSource\nfrom pyhdx.web.transforms import ApplyCmapTransform\nfrom pyhdx.web.widgets import ASyncProgressBar\nfrom pyhdx.support import rgb_to_hex, hex_to_rgba, series_to_pymol\n\nHalfLifeFitResult = namedtuple('HalfLifeFitResult', ['output'])\n\n\nclass MappingFileInputControl(ControlPanel):\n \"\"\"\n This controller allows users to upload *.txt files where quantities (protection factors, Gibbs free energy, etc) are\n mapped to a linear sequence. The data is then used further downstream to generate binary comparisons between datasets.\n\n The column should be tab separated with on the last header line (starts with '#') the names of the columns. Columns\n should be tab-delimited.\n \"\"\"\n header = 'File Input'\n\n input_file = param.Parameter(default=None, doc='Input file to add to available datasets')\n dataset_name = param.String(doc='Name for the dataset to add. Defaults to filename')\n offset = param.Integer(default=0, doc=\"Offset to add to the file's r_number column\")\n add_dataset = param.Action(lambda self: self._action_add_dataset(),\n doc='Add the dataset to available datasets')\n datasets_list = param.ListSelector(doc='Current datasets', label='Datasets')\n remove_dataset = param.Action(lambda self: self._action_remove_dataset(),\n doc='Remove selected datasets')\n\n def __init__(self, parent, **params):\n super(MappingFileInputControl, self).__init__(parent, **params)\n self.parent.param.watch(self._datasets_updated, ['datasets'])\n\n def make_dict(self):\n return self.generate_widgets(input_file=pn.widgets.FileInput)\n\n @param.depends('input_file', watch=True)\n def _input_file_updated(self):\n self.dataset_name = self.dataset_name or Path(self.widget_dict['input_file'].filename).stem\n\n @property\n def protein(self):\n \"\"\"The protein object from the currently selected file in the file widget\"\"\"\n\n try:\n sio = StringIO(self.input_file.decode())\n except UnicodeDecodeError:\n self.parent.logger.info('Invalid file type, supplied file is not a text file')\n return None\n try:\n sio.seek(0)\n protein = txt_to_protein(sio)\n except KeyError:\n sio.seek(0)\n protein = csv_to_protein(sio)\n return protein\n\n def _add_dataset(self):\n self.parent.datasets[self.dataset_name] = self.protein\n\n #todo refactor dataset to protein_something\n def _action_add_dataset(self):\n if self.dataset_name in self.parent.datasets.keys():\n self.parent.logger.info(f'Dataset {self.dataset_name} already added')\n elif not self.dataset_name:\n self.parent.logger.info('The added comparison needs to have a name')\n elif not self.input_file:\n self.parent.logger.info('Empty or no file selected')\n elif self.protein is not None:\n self._add_dataset()\n self.parent.param.trigger('datasets')\n\n self.widget_dict['input_file'].filename = ''\n self.widget_dict['input_file'].value = b''\n\n self.dataset_name = ''\n\n def _action_remove_dataset(self):\n if self.datasets_list is not None:\n for dataset_name in self.datasets_list:\n self.parent.datasets.pop(dataset_name)\n self.parent.param.trigger('datasets')\n\n def _datasets_updated(self, events):\n self.param['datasets_list'].objects = list(self.parent.datasets.keys())\n\n\nimport itertools\ncmap_cycle = itertools.cycle(['gray','PiYG', 'jet'])\n\n\nclass CSVFileInputControl(ControlPanel):\n input_file = param.Parameter()\n load_file = param.Action(lambda self: self._action_load())\n temp_new_data = param.Action(lambda self: self._action_new_data())\n temp_new_cmap = param.Action(lambda self: self._action_new_cmap())\n\n temp_update_filter = param.Action(lambda self: self._action_exposure())\n temp_cmap_rect = param.Action(lambda self: self._action_cmap_rect())\n\n #cmap_obj = param.ObjectSelector(default='viridis', objects=['viridis', 'plasma', 'magma'])\n\n\n def make_dict(self):\n return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))\n\n def _action_load(self):\n sio = StringIO(self.input_file.decode('UTF-8'))\n df = csv_to_dataframe(sio)\n source = DataFrameSource(df=df)\n\n def _action_new_data(self):\n\n source = self.parent.sources['torch_fit']\n table = source.get('torch_fit')\n\n size = len(table)\n\n new_data = 40e3*np.random.rand(size)\n\n table['deltaG'] = new_data\n\n self.parent.update()\n\n def _action_new_cmap(self):\n cmap_name = np.random.choice(['viridis', 'inferno', 'plasma'])\n cmap = mpl.cm.get_cmap(cmap_name)\n\n transform = self.parent.transforms['cmap']\n transform.cmap = cmap\n\n self.parent.update()\n\n def _action_exposure(self):\n filter = self.parent.filters['exposure']\n filter.widget.value = 0.\n\n self.parent.update()\n\n def _action_cmap_rect(self):\n new_cmap = next(cmap_cycle)\n\n rect_view = self.parent.figure_panels['rect_plot']\n rect_view.opts['cmap'] = new_cmap\n\n self.parent.update()\n\n item = self.parent.rows['rect_plot'][0]\n #item.param.trigger('object')\n\n\nclass TestFileInputControl(ControlPanel):\n input_file = param.Parameter()\n load_file = param.Action(lambda self: self._action_load())\n\n\n _layout = {\n 'self': None,\n 'filters.exposure_slider': None\n }\n\n def __init__(self, parent, **params):\n super().__init__(parent, **params)\n # todo property and list of tuples\n self._layout = {\n 'self': None,\n 'filters.exposure_slider': None\n }\n\n self.update_box()\n\n def make_dict(self):\n return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))\n\n def _action_load(self):\n sio = StringIO(self.input_file.decode('UTF-8'))\n df = csv_to_dataframe(sio)\n source = DataFrameSource(df=df)\n\n\nclass PeptideFileInputControl(ControlPanel):\n \"\"\"\n This controller allows users to input .csv file (Currently only DynamX format) of 'state' peptide uptake data.\n Users can then choose how to correct for back-exchange and which 'state' and exposure times should be used for\n analysis.\n\n \"\"\"\n header = 'Peptide Input'\n\n input_files = param.List()\n\n be_mode = param.Selector(doc='Select method of back exchange correction', label='Back exchange correction method', objects=['FD Sample', 'Flat percentage'])\n fd_state = param.Selector(doc='State used to normalize uptake', label='FD State')\n fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='FD Exposure')\n exp_state = param.Selector(doc='State for selected experiment', label='Experiment State')\n exp_exposures = param.ListSelector(default=[], objects=[''], label='Experiment Exposures'\n , doc='Selected exposure time to use')\n\n be_percent = param.Number(28., bounds=(0, 100), doc='Global percentage of back-exchange',\n label='Back exchange percentage')\n\n drop_first = param.Integer(1, bounds=(0, None), doc='Select the number of N-terminal residues to ignore.')\n ignore_prolines = param.Boolean(True, constant=True, doc='Prolines are ignored as they do not exchange D.')\n d_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the labelling buffer',\n label='Deuterium percentage')\n #fd_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the FD control sample buffer',\n # label='FD Deuterium percentage')\n temperature = param.Number(293.15, bounds=(0, 373.15), doc='Temperature of the D-labelling reaction',\n label='Temperature (K)')\n pH = param.Number(7.5, doc='pH of the D-labelling reaction, as read from pH meter',\n label='pH read')\n #load_button = param.Action(lambda self: self._action_load(), doc='Load the selected files', label='Load Files')\n\n n_term = param.Integer(1, doc='Index of the n terminal residue in the protein. Can be set to negative values to '\n 'accommodate for purification tags. Used in the determination of intrinsic rate of exchange')\n c_term = param.Integer(0, bounds=(0, None),\n doc='Index of the c terminal residue in the protein. Used for generating pymol export script'\n 'and determination of intrinsic rate of exchange for the C-terminal residue')\n sequence = param.String('', doc='Optional FASTA protein sequence')\n dataset_name = param.String()\n add_dataset_button = param.Action(lambda self: self._action_add_dataset(), label='Add dataset',\n doc='Parse selected peptides for further analysis and apply back-exchange correction')\n dataset_list = param.ObjectSelector(default=[], label='Datasets', doc='Lists available datasets')\n\n def __init__(self, parent, **params):\n super(PeptideFileInputControl, self).__init__(parent, **params)\n self.parent.param.watch(self._datasets_updated, ['data_objects'])\n\n excluded = ['be_percent']\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n self.update_box()\n\n self._df = None # Numpy array with raw input data\n\n @property\n def _layout(self):\n return [('self', self.own_widget_names)]\n\n def make_dict(self):\n text_area = pn.widgets.TextAreaInput(name='Sequence (optional)', placeholder='Enter sequence in FASTA format', max_length=10000,\n width=300, height=100, height_policy='fixed', width_policy='fixed')\n return self.generate_widgets(\n input_files=pn.widgets.FileInput(multiple=True, name='Input files'),\n temperature=pn.widgets.FloatInput,\n #be_mode=pn.widgets.RadioButtonGroup,\n be_percent=pn.widgets.FloatInput,\n d_percentage=pn.widgets.FloatInput,\n #fd_percentage=pn.widgets.FloatInput,\n sequence=text_area)\n\n def make_list(self):\n excluded = ['be_percent']\n widget_list = [widget for name, widget, in self.widget_dict.items() if name not in excluded]\n\n return widget_list\n\n @param.depends('be_mode', watch=True)\n def _update_be_mode(self):\n # todo @tejas: Add test\n if self.be_mode == 'FD Sample':\n excluded = ['be_percent']\n elif self.be_mode == 'Flat percentage':\n excluded = ['fd_state', 'fd_exposure']\n\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n #self._layout = {'self': widgets}\n self.update_box()\n\n @param.depends('input_files', watch=True)\n def _read_files(self):\n \"\"\"\"\"\"\n if self.input_files:\n combined_df = read_dynamx(*[StringIO(byte_content.decode('UTF-8')) for byte_content in self.input_files])\n self._df = combined_df\n\n self.parent.logger.info(\n f'Loaded {len(self.input_files)} file{\"s\" if len(self.input_files) > 1 else \"\"} with a total '\n f'of {len(self._df)} peptides')\n\n else:\n self._df = None\n\n self._update_fd_state()\n self._update_fd_exposure()\n self._update_exp_state()\n self._update_exp_exposure()\n\n def _update_fd_state(self):\n if self._df is not None:\n states = list(self._df['state'].unique())\n self.param['fd_state'].objects = states\n self.fd_state = states[0]\n else:\n self.param['fd_state'].objects = []\n\n @param.depends('fd_state', watch=True)\n def _update_fd_exposure(self):\n if self._df is not None:\n fd_entries = self._df[self._df['state'] == self.fd_state]\n exposures = list(np.unique(fd_entries['exposure']))\n else:\n exposures = []\n self.param['fd_exposure'].objects = exposures\n if exposures:\n self.fd_exposure = exposures[0]\n\n @param.depends('fd_state', 'fd_exposure', watch=True)\n def _update_exp_state(self):\n if self._df is not None:\n # Booleans of data entries which are in the selected control\n control_bools = np.logical_and(self._df['state'] == self.fd_state, self._df['exposure'] == self.fd_exposure)\n\n control_data = self._df[control_bools].to_records()\n other_data = self._df[~control_bools].to_records()\n\n intersection = array_intersection([control_data, other_data], fields=['start', 'end']) # sequence?\n states = list(np.unique(intersection[1]['state']))\n else:\n states = []\n\n self.param['exp_state'].objects = states\n if states:\n self.exp_state = states[0] if not self.exp_state else self.exp_state\n\n @param.depends('exp_state', watch=True)\n def _update_exp_exposure(self):\n if self._df is not None:\n exp_entries = self._df[self._df['state'] == self.exp_state]\n exposures = list(np.unique(exp_entries['exposure']))\n exposures.sort()\n else:\n exposures = []\n\n self.param['exp_exposures'].objects = exposures\n self.exp_exposures = exposures\n\n if not self.dataset_name or self.dataset_name in self.param['exp_state'].objects:\n self.dataset_name = self.exp_state\n\n if not self.c_term and exposures:\n self.c_term = int(np.max(exp_entries['end']))\n\n def _datasets_updated(self, events):\n # Update datasets widget as datasets on parents change\n objects = list(self.parent.data_objects.keys())\n self.param['dataset_list'].objects = objects\n\n def _action_add_dataset(self):\n \"\"\"Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`\"\"\"\n\n if self._df is None:\n self.parent.logger.info(\"No data loaded\")\n return\n elif self.dataset_list and self.dataset_name in self.dataset_list:\n self.parent.logger.info(f\"Dataset name {self.dataset_name} already in use\")\n return\n\n peptides = PeptideMasterTable(self._df, d_percentage=self.d_percentage,\n drop_first=self.drop_first, ignore_prolines=self.ignore_prolines)\n if self.be_mode == 'FD Sample':\n control_0 = None # = (self.zero_state, self.zero_exposure) if self.zero_state != 'None' else None\n peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)\n elif self.be_mode == 'Flat percentage':\n # todo @tejas: Add test\n peptides.set_backexchange(self.be_percent)\n\n data = peptides.get_state(self.exp_state)\n exp_bools = data['exposure'].isin(self.exp_exposures)\n data = data[exp_bools]\n\n #todo temperature ph kwarg for series\n hdxm = HDXMeasurement(data, c_term=self.c_term, n_term=self.n_term, sequence=self.sequence,\n name=self.dataset_name, temperature=self.temperature, pH=self.pH)\n\n self.parent.data_objects[self.dataset_name] = hdxm\n self.parent.param.trigger('data_objects') # Trigger update\n\n df = hdxm.data\n df['start_end'] = [str(s) + '_' + str(e) for s, e in zip(df['start'], df['end'])]\n df['id'] = df.index % hdxm.Np\n target_source = self.parent.sources['dataframe']\n target_source.add_df(df, 'peptides', self.dataset_name)\n\n index = pd.Index(hdxm.coverage.r_number, name='r_number')\n df = pd.DataFrame(hdxm.rfu_residues, index=index, columns=hdxm.timepoints)\n target_source = self.parent.sources['dataframe']\n target_source.add_df(df, 'rfu', self.dataset_name)\n\n self.dataset_list.append(self.dataset_name)\n\n self.parent.logger.info(f'Loaded dataset {self.dataset_name} with experiment state {self.exp_state} '\n f'({len(hdxm)} timepoints, {len(hdxm.coverage)} peptides each)')\n self.parent.logger.info(f'Average coverage: {hdxm.coverage.percent_coverage:.3}%, '\n f'Redundancy: {hdxm.coverage.redundancy:.2}')\n\n def _action_remove_datasets(self):\n raise NotImplementedError('Removing datasets not implemented')\n for name in self.dataset_list:\n self.parent.datasets.pop(name)\n\n self.parent.param.trigger('datasets') # Manual trigger as key assignment does not trigger the param\n\n\n# todo class DataManagerControl()\n\n\nclass CoverageControl(ControlPanel):\n header = 'Coverage'\n\n #temp_new_data = param.Action(lambda self: self._action_new_data())\n\n def __init__(self, parent, **params):\n super().__init__(parent, **params)\n\n self.update_box()\n\n @property\n def _layout(self):\n return [\n # ('filters.coverage_state_name', None),\n # ('filters.coverage_exposure', None),\n ('opts.cmap', None),\n #('self', None)\n ]\n\n\nclass InitialGuessControl(ControlPanel):\n \"\"\"\n This controller allows users to derive initial guesses for D-exchange rate from peptide uptake data.\n \"\"\"\n\n #todo remove lambda symbol although its really really funny\n header = 'Initial Guesses'\n fitting_model = param.Selector(default='Half-life (λ)', objects=['Half-life (λ)', 'Association'],\n doc='Choose method for determining initial guesses.')\n dataset = param.Selector(default='', doc='Dataset to apply bounds to', label='Dataset (for bounds)')\n global_bounds = param.Boolean(default=False, doc='Set bounds globally across all datasets')\n lower_bound = param.Number(0., doc='Lower bound for association model fitting')\n upper_bound = param.Number(0., doc='Upper bound for association model fitting')\n guess_name = param.String(default='Guess_1', doc='Name for the initial guesses')\n do_fit1 = param.Action(lambda self: self._action_fit(), label='Calculate Guesses', doc='Start initial guess fitting',\n constant=True)\n\n bounds = param.Dict({}, doc='Dictionary which stores rate fitting bounds', precedence=-1)\n\n def __init__(self, parent, **params):\n self.pbar1 = ASyncProgressBar() #tqdm? https://github.com/holoviz/panel/pull/2079\n self.pbar2 = ASyncProgressBar()\n super(InitialGuessControl, self).__init__(parent, **params)\n self.parent.param.watch(self._parent_datasets_updated, ['data_objects']) #todo refactor\n\n excluded = ['lower_bound', 'upper_bound', 'global_bounds', 'dataset']\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n self.update_box()\n\n self._guess_names = {}\n\n @property\n def _layout(self):\n return [\n ('self', self.own_widget_names),\n # ('filters.select_index_rates_lv1', None),\n # ('filters.select_index_rates_lv2', None),\n ]\n\n def make_dict(self):\n widgets = self.generate_widgets(lower_bound=pn.widgets.FloatInput, upper_bound=pn.widgets.FloatInput)\n widgets.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)\n\n return widgets\n\n @param.depends('fitting_model', watch=True)\n def _fitting_model_updated(self):\n if self.fitting_model == 'Half-life (λ)':\n excluded = ['dataset', 'lower_bound', 'upper_bound', 'global_bounds']\n\n elif self.fitting_model in ['Association', 'Dissociation']:\n excluded = []\n\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n self.update_box()\n\n @param.depends('global_bounds', watch=True)\n def _global_bounds_updated(self):\n if self.global_bounds:\n self.param['dataset'].constant = True\n else:\n self.param['dataset'].constant = False\n\n @param.depends('dataset', watch=True)\n def _dataset_updated(self):\n lower, upper = self.bounds[self.dataset]\n self.lower_bound = lower\n self.upper_bound = upper\n\n @param.depends('lower_bound', 'upper_bound', watch=True)\n def _bounds_updated(self):\n # if self.global_bounds:\n # for k in self.bounds.keys():\n # self.bounds[k] = (self.lower_bound, self.upper_bound)\n if not self.global_bounds:\n self.bounds[self.dataset] = (self.lower_bound, self.upper_bound)\n\n def _parent_datasets_updated(self, events):\n if len(self.parent.data_objects) > 0:\n self.param['do_fit1'].constant = False\n\n # keys to remove:\n for k in self.bounds.keys() - self.parent.data_objects.keys():\n self.bounds.pop(k)\n # keys to add:\n for k in self.parent.data_objects.keys() - self.bounds.keys():\n self.bounds[k] = get_bounds(self.parent.data_objects[k].timepoints)\n\n options = list(self.parent.data_objects.keys())\n self.param['dataset'].objects = options\n if not self.dataset:\n self.dataset = options[0]\n\n def add_fit_result(self, future):\n name = self._guess_names.pop(future.key)\n\n results = future.result()\n dfs = [result.output for result in results]\n combined_results = pd.concat(dfs, axis=1,\n keys=list(self.parent.data_objects.keys()),\n names=['state_name', 'quantity'])\n\n self.sources['dataframe'].add_df(combined_results, 'rates', name)\n self.parent.fit_results[name] = {k: v for k, v in zip(self.parent.data_objects.keys(), results)}\n self.parent.param.trigger('data_objects') # Informs other fittings that initial guesses are now available\n self.param['do_fit1'].constant = False\n\n def _action_fit(self):\n if len(self.parent.data_objects) == 0:\n self.parent.logger.info('No datasets loaded')\n return\n\n if self.guess_name in itertools.chain(self.parent.fit_results.keys(), self._guess_names.values()):\n self.parent.logger.info(f\"Guess with name {self.guess_name} already in use\")\n return\n\n self.parent.logger.debug('Start initial guess fit')\n self.param['do_fit1'].constant = True\n\n num_samples = len(self.parent.data_objects)\n if self.fitting_model.lower() in ['association', 'dissociation']:\n if self.global_bounds:\n bounds = [(self.lower_bound, self.upper_bound)]*num_samples\n else:\n bounds = self.bounds.values()\n\n futures = self.parent.client.map(fit_rates_weighted_average,\n self.parent.data_objects.values(), bounds, client='worker_client')\n elif self.fitting_model == 'Half-life (λ)': # this is practically instantaneous and does not require dask\n futures = self.parent.client.map(fit_rates_half_time_interpolate, self.parent.data_objects.values())\n\n dask_future = self.parent.client.submit(lambda args: args, futures) #combine multiple futures into one future\n self._guess_names[dask_future.key] = self.guess_name\n\n self.parent.future_queue.append((dask_future, self.add_fit_result))\n\n\nclass FitControl(ControlPanel):\n \"\"\"\n This controller allows users to execute PyTorch fitting of the global data set.\n\n Currently, repeated fitting overrides the old result.\n \"\"\"\n\n header = 'Fitting'\n\n initial_guess = param.Selector(doc='Name of dataset to use for initial guesses.')\n\n fit_mode = param.Selector(default='Batch', objects=['Batch', 'Single'])\n\n stop_loss = param.Number(STOP_LOSS, bounds=(0, None),\n doc='Threshold loss difference below which to stop fitting.')\n stop_patience = param.Integer(PATIENCE, bounds=(1, None),\n doc='Number of epochs where stop loss should be satisfied before stopping.')\n learning_rate = param.Number(optimizer_defaults['SGD']['lr'], bounds=(0, None),\n doc='Learning rate parameter for optimization.')\n momentum = param.Number(optimizer_defaults['SGD']['momentum'], bounds=(0, None),\n doc='Stochastic Gradient Descent momentum')\n nesterov = param.Boolean(optimizer_defaults['SGD']['nesterov'],\n doc='Use Nesterov type of momentum for SGD')\n epochs = param.Integer(EPOCHS, bounds=(1, None),\n doc='Maximum number of epochs (iterations.')\n r1 = param.Number(R1, bounds=(0, None), label='Regularizer 1 (peptide axis)',\n doc='Value of the regularizer along residue axis.')\n\n r2 = param.Number(R2, bounds=(0, None), label='Regularizer 2 (sample axis)',\n doc='Value of the regularizer along sample axis.', constant=True)\n\n fit_name = param.String(\"Gibbs_fit_1\", doc=\"Name for for the fit result\")\n\n do_fit = param.Action(lambda self: self._action_fit(), constant=True, label='Do Fitting',\n doc='Start global fitting')\n\n def __init__(self, parent, **params):\n self.pbar1 = ASyncProgressBar() #tqdm?\n super(FitControl, self).__init__(parent, **params)\n\n source = self.parent.sources['dataframe']\n source.param.watch(self._source_updated, ['updated'])\n\n self._current_jobs = 0\n self._max_jobs = 2 #todo config\n self._fit_names = {}\n\n def _source_updated(self, *events):\n table = self.parent.sources['dataframe'].get('rates')\n\n objects = list(table.columns.levels[0])\n if objects:\n self.param['do_fit'].constant = False\n\n self._fit_mode_updated()\n\n self.param['initial_guess'].objects = objects\n if not self.initial_guess and objects:\n self.initial_guess = objects[0]\n\n @param.depends('fit_mode', watch=True)\n def _fit_mode_updated(self):\n if self.fit_mode == 'Batch' and len(self.parent.data_objects) > 1:\n self.param['r2'].constant = False\n else:\n self.param['r2'].constant = True\n\n def add_fit_result(self, future):\n #todo perhaps all these dfs should be in the future?\n name = self._fit_names.pop(future.key)\n result = future.result()\n self._current_jobs -= 1\n\n self.parent.logger.info(f'Finished PyTorch fit: {name}')\n\n # List of single fit results\n if isinstance(result, list):\n self.parent.fit_results[name] = list(result)\n output_dfs = {fit_result.hdxm_set.name: fit_result.output for fit_result in result}\n df = pd.concat(output_dfs.values(), keys=output_dfs.keys(), axis=1)\n\n # create mse losses dataframe\n dfs = {}\n for single_result in result:\n # Determine mean squared errors per peptide, summed over timepoints\n mse = single_result.get_mse()\n mse_sum = np.sum(mse, axis=1)\n peptide_data = single_result.hdxm_set[0].data\n data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum}\n dfs[single_result.hdxm_set.name] = pd.DataFrame(data_dict)\n mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)\n\n #todo d calc for single fits\n #todo losses for single fits\n\n # Create d_calc dataframe\n # -----------------------\n # todo needs cleaning up\n state_dfs = {}\n for single_result in result:\n tp_flat = single_result.hdxm_set.timepoints\n elem = tp_flat[np.nonzero(tp_flat)]\n\n time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)\n d_calc_state = single_result(time_vec) #shape Np x Nt\n hdxm = single_result.hdxm_set\n\n peptide_dfs = []\n pm_data = hdxm[0].data\n for d_peptide, pm_row in zip(d_calc_state, pm_data):\n peptide_id = f\"{pm_row['start']}_{pm_row['end']}\"\n data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}\n peptide_dfs.append(pd.DataFrame(data_dict))\n state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)\n\n d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)\n\n\n # Create losses/epoch dataframe\n # -----------------------------\n losses_dfs = {fit_result.hdxm_set.name: fit_result.losses for fit_result in result}\n losses_df = pd.concat(losses_dfs.values(), keys=losses_dfs.keys(), axis=1)\n\n\n else: # one batchfit result\n self.parent.fit_results[name] = result # todo this name can be changed by the time this is executed\n df = result.output\n # df.index.name = 'peptide index'\n\n # Create MSE losses df (per peptide, summed over timepoints)\n # -----------------------\n mse = result.get_mse()\n dfs = {}\n for mse_sample, hdxm in zip(mse, result.hdxm_set):\n peptide_data = hdxm[0].data\n mse_sum = np.sum(mse_sample, axis=1)\n # Indexing of mse_sum with Np to account for zero-padding\n data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum[:hdxm.Np]}\n dfs[hdxm.name] = pd.DataFrame(data_dict)\n\n mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)\n\n self.parent.logger.info('Finished PyTorch fit')\n\n # Create d_calc dataframe\n # -----------------------\n tp_flat = result.hdxm_set.timepoints.flatten()\n elem = tp_flat[np.nonzero(tp_flat)]\n\n time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)\n stacked = np.stack([time_vec for i in range(result.hdxm_set.Ns)])\n d_calc = result(stacked)\n\n state_dfs = {}\n for hdxm, d_calc_state in zip(result.hdxm_set, d_calc):\n peptide_dfs = []\n pm_data = hdxm[0].data\n for d_peptide, idx in zip(d_calc_state, pm_data.index):\n peptide_id = f\"{pm_data.loc[idx, 'start']}_{pm_data.loc[idx, 'end']}\"\n data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}\n peptide_dfs.append(pd.DataFrame(data_dict))\n state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)\n d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)\n\n # Create losses/epoch dataframe\n # -----------------------------\n losses_df = result.losses.copy()\n losses_df.columns = pd.MultiIndex.from_product(\n [['All states'], losses_df.columns],\n names=['state_name', 'quantity']\n )\n\n self.parent.logger.info(\n f\"Finished fitting in {len(result.losses)} epochs, final mean squared residuals is {result.mse_loss:.2f}\")\n self.parent.logger.info(f\"Total loss: {result.total_loss:.2f}, regularization loss: {result.reg_loss:.2f} \"\n f\"({result.regularization_percentage:.1f}%)\")\n\n self.parent.sources['dataframe'].add_df(df, 'global_fit', names=[name])\n self.parent.sources['dataframe'].add_df(mse_df, 'peptides_mse', names=[name])\n self.parent.sources['dataframe'].add_df(d_calc_df, 'd_calc', names=[name])\n self.parent.sources['dataframe'].add_df(losses_df, 'losses', names=[name])\n\n\n\n self.parent.param.trigger('fit_results')\n\n def _action_fit(self):\n if self.fit_name in itertools.chain(self.parent.fit_results.keys(), self._fit_names.values()):\n self.parent.logger.info(f\"Fit result with name {self.fit_name} already in use\")\n return\n\n self.parent.logger.info('Started PyTorch fit')\n\n self._current_jobs += 1\n if self._current_jobs >= self._max_jobs:\n self.widgets['do_fit'].constant = True\n\n self.parent.logger.info(f'Current number of active jobs: {self._current_jobs}')\n if self.fit_mode == 'Batch':\n hdx_set = self.parent.hdx_set\n rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)\n\n rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]\n gibbs_guess = hdx_set.guess_deltaG(rates_guess)\n\n dask_future = self.parent.client.submit(fit_gibbs_global_batch, hdx_set, gibbs_guess, **self.fit_kwargs)\n else:\n data_objs = self.parent.data_objects.values()\n rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)\n gibbs_guesses = [data_obj.guess_deltaG(rates_df[data_obj.name]['rate']) for data_obj in data_objs]\n futures = self.parent.client.map(fit_gibbs_global, data_objs, gibbs_guesses, **self.fit_kwargs)\n\n # Combine list of futures into one future object\n # See https://github.com/dask/distributed/pull/560\n dask_future = self.parent.client.submit(lambda args: args, futures)\n\n self._fit_names[dask_future.key] = self.fit_name\n self.parent.future_queue.append((dask_future, self.add_fit_result))\n\n @property\n def fit_kwargs(self):\n fit_kwargs = dict(r1=self.r1, lr=self.learning_rate, momentum=self.momentum, nesterov=self.nesterov,\n epochs=self.epochs, patience=self.stop_patience, stop_loss=self.stop_loss)\n if self.fit_mode == 'Batch':\n fit_kwargs['r2'] = self.r2\n\n return fit_kwargs\n\n\nclass ClassificationControl(ControlPanel):\n \"\"\"\n This controller allows users classify 'mapping' datasets and assign them colors.\n\n Coloring can be either in discrete categories or as a continuous custom color map.\n \"\"\"\n\n header = 'Classification'\n # format ['tag1', ('tag2a', 'tag2b') ] = tag1 OR (tag2a AND tag2b)\n\n # todo unify name for target field (target_data set)\n # When coupling param with the same name together there should be an option to exclude this behaviour\n table = param.Selector(label='Target table')\n # fit_ID = param.Selector() # generalize selecting widgets based on selected table\n # quantity = param.Selector(label='Quantity') # this is the lowest-level quantity of the multiindex df (filter??)\n\n mode = param.Selector(default='Discrete', objects=['Discrete', 'Continuous', 'Color map'],\n doc='Choose color mode (interpolation between selected colors).')#, 'ColorMap'])\n num_colors = param.Integer(3, bounds=(1, 10), label='Number of colours',\n doc='Number of classification colors.')\n library = param.Selector(default='matplotlib', objects=['matplotlib', 'colorcet'])\n color_map = param.Selector()\n otsu_thd = param.Action(lambda self: self._action_otsu(), label='Otsu',\n doc=\"Automatically perform thresholding based on Otsu's method.\")\n linear_thd = param.Action(lambda self: self._action_linear(), label='Linear',\n doc='Automatically perform thresholding by creating equally spaced sections.')\n log_space = param.Boolean(False,\n doc='Boolean to set whether to apply colors in log space or not.')\n #apply = param.Action(lambda self: self._action_apply())\n no_coverage = param.Color(default='#8c8c8c', doc='Color to use for regions of no coverage')\n\n color_set_name = param.String('', doc='Name for the color dataset to add')\n add_colorset = param.Action(lambda self: self._action_add_colorset())\n\n #show_thds = param.Boolean(True, label='Show Thresholds', doc='Toggle to show/hide threshold lines.')\n values = param.List(default=[], precedence=-1)\n colors = param.List(default=[], precedence=-1)\n\n def __init__(self, parent, **param):\n super(ClassificationControl, self).__init__(parent, **param)\n\n # https://discourse.holoviz.org/t/based-on-a-select-widget-update-a-second-select-widget-then-how-to-link-the-latter-to-a-reactive-plot/917/8\n cc_cmaps = sorted(colorcet.cm.keys())\n mpl_cmaps = sorted(set(plt.colormaps()) - set('cet_' + cmap for cmap in cc_cmaps))\n self.cmaps = {'matplotlib': mpl_cmaps, 'colorcet': cc_cmaps}\n self.param['color_map'].objects = mpl_cmaps\n\n self._update_num_colors()\n self._update_num_values()\n self.excluded = ['library', 'color_map'] # excluded widgets based on choice of `mode`\n\n views = [view for view in self.views.values() if any(isinstance(trs, ApplyCmapTransform) for trs in view.transforms)]\n options = [view.table for view in views]\n\n for view in views:\n view.source.param.watch(self._sources_updated, 'updated')\n\n self.param['table'].objects = options\n if not self.table and options:\n self.table = options[0]\n\n self._table_updated() # also updates box\n #self.update_box()\n\n @property\n def own_widget_names(self):\n \"\"\"returns a list of names of widgets in self.widgets to be laid out in controller card\"\"\"\n\n # initial_widgets = [name for name in self.widgets.keys() if name not in self.excluded]\n initial_widgets = []\n for name in self.param:\n precedence = self.param[name].precedence\n if (precedence is None or precedence > 0) and name not in self.excluded + ['name']:\n initial_widgets.append(name)\n #l1[1:1] = l2\n select_widgets = [name for name in self.widgets.keys() if name.startswith('select')]\n initial_widgets[1:1] = select_widgets\n\n #value_widget_names = [f'value_{i}' for i in range(len(self.values))]\n #color_widget_names = [f'color_{i}' for i in range(len(self.colors))]\n widget_names = initial_widgets + [f'value_{i}' for i in range(len(self.values))]\n if self.mode != 'Color map':\n widget_names += [f'color_{i}' for i in range(len(self.colors))]\n return widget_names\n\n # return initial_widgets + #list(self.values_widgets.keys()) + list(self.colors_widgets.keys())\n\n def make_dict(self):\n return self.generate_widgets(num_colors=pn.widgets.IntInput)\n\n @property\n def _layout(self):\n return [\n ('self', self.own_widget_names),\n ]\n\n def _sources_updated(self, *events):\n self._table_updated()\n\n @param.depends('table', watch=True)\n def _table_updated(self):\n df = self.get_data()\n\n #todo also get schema and check if this table is compatible (ie has numbers, not colors only)\n if df.empty:\n return\n names = df.columns.names\n\n # Remove old widgets (list comprehension)\n old_widget_names = [key for key in self.widgets.keys() if key.startswith('select')]\n [self.widgets.pop(key) for key in old_widget_names]\n\n widget_dict = {}\n for i, (name, options) in enumerate(zip(names, df.columns.levels)):\n _opts = ['*'] + list(options) if i != len(names) - 1 else list(options)\n #todo make function to determine defaults\n if i == 0:\n default = _opts[-1]\n else:\n default = 'deltaG' if 'deltaG' in _opts else _opts[0]\n widget = pn.widgets.Select(name=name, options=_opts, value=default)\n widget_dict[f'select_{i}'] = widget\n\n self.widgets.update(widget_dict)\n self.update_box()\n\n def get_data(self):\n \"\"\"object pandas dataframe: returns current multindex dataframe\"\"\"\n source = self.sources['dataframe']\n df = source.get(self.table)\n\n return df\n\n def get_selected_data(self):\n #todo move method to data source?\n df = self.get_data()\n selected_fields = [widget.value for name, widget in self.widgets.items() if name.startswith('select')]\n bools_list = [df.columns.get_level_values(i) == value for i, value in enumerate(selected_fields) if\n value != '*']\n\n if len(bools_list) == 0:\n bools = np.ones(len(df.columns)).astype(bool)\n elif len(bools_list) == 1:\n bools = np.array(bools_list).flatten()\n else:\n bools_array = np.array(bools_list)\n bools = np.product(bools_array, axis=0).astype(bool)\n\n selected_df = df.iloc[:, bools]\n\n return selected_df\n\n def get_values(self):\n \"\"\"return numpy array with only the values from selected dataframe, nan omitted\"\"\"\n\n array = self.get_selected_data().to_numpy().flatten()\n values = array[~np.isnan(array)]\n\n return values\n\n def _action_otsu(self):\n if self.num_colors <= 1:\n return\n values = self.get_values() # todo check for no values\n if not values.size:\n return\n\n func = np.log if self.log_space else lambda x: x # this can have NaN when in log space\n thds = threshold_multiotsu(func(values), classes=self.num_colors)\n widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]\n for thd, widget in zip(thds[::-1], widgets): # Values from high to low\n widget.start = None\n widget.end = None\n widget.value = np.exp(thd) if self.log_space else thd\n self._update_bounds()\n\n #self._get_colors()\n\n def _action_linear(self):\n i = 1 if self.mode == 'Discrete' else 0\n values = self.get_values()\n if not values.size:\n return\n\n if self.log_space:\n thds = np.logspace(np.log(np.min(values)), np.log(np.max(values)),\n num=self.num_colors + i, endpoint=True, base=np.e)\n else:\n thds = np.linspace(np.min(values), np.max(values), num=self.num_colors + i, endpoint=True)\n\n widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]\n for thd, widget in zip(thds[i:self.num_colors][::-1], widgets):\n # Remove bounds, set values, update bounds\n widget.start = None\n widget.end = None\n widget.value = thd\n self._update_bounds()\n\n def _action_add_colorset(self):\n if not self.color_set_name:\n self.parent.logger.info('No name given tot the colorset')\n return\n\n source = self.sources['dataframe']\n if self.color_set_name in source.tables.keys(): #todo update\n self.parent.logger.info(f'Colorset with name {self.color_set_name} already present')\n return\n\n selected_df = self.get_selected_data()\n cmap, norm = self.get_cmap_and_norm()\n\n array = cmap(norm(selected_df), bytes=True)\n colors_hex = rgb_to_hex(array.reshape(-1, 4))\n output = colors_hex.reshape(array.shape[:-1])\n\n output_df = pd.DataFrame(output, index=selected_df.index, columns=selected_df.columns)\n if output_df.index.name == 'r_number': # The selected dataset is a protein mappable\n c_term = max([data_obj.coverage.protein.c_term for data_obj in self.parent.data_objects.values()])\n n_term = min([data_obj.coverage.protein.n_term for data_obj in self.parent.data_objects.values()])\n\n new_index = pd.RangeIndex(start=n_term, stop=c_term, name='r_number')\n output_df = output_df.reindex(index=new_index, fill_value=self.no_coverage.upper())\n output_df.rename_axis(columns={'fit_ID': 'color_ID'}, inplace=True)\n output_df.columns = output_df.columns.set_levels([self.color_set_name], level=0)\n\n source.add_df(output_df, 'colors')\n\n @param.depends('color_map', 'values', 'colors', watch=True)\n def _action_apply(self):\n cmap, norm = self.get_cmap_and_norm()\n\n if cmap and norm:\n #this needs to be updated to more generalized\n transform = self.transforms['cmap_transform']\n transform.cmap = cmap\n transform.norm = norm\n\n def get_cmap_and_norm(self):\n norm_klass = mpl.colors.Normalize if not self.log_space else mpl.colors.LogNorm\n if len(self.values) < 2:\n return None, None\n\n if self.mode == 'Discrete':\n if len(self.values) != len(self.colors) - 1:\n return None, None\n cmap = mpl.colors.ListedColormap(self.colors)\n norm = mpl.colors.BoundaryNorm(self.values[::-1], self.num_colors, extend='both') #todo refactor values to thd_values\n elif self.mode == 'Continuous':\n norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)\n positions = norm(self.values[::-1])\n cmap = mpl.colors.LinearSegmentedColormap.from_list('custom_cmap', list(zip(positions, self.colors)))\n elif self.mode == 'Color map':\n norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)\n if self.library == 'matplotlib':\n cmap = mpl.cm.get_cmap(self.color_map)\n elif self.library == 'colorcet':\n cmap = getattr(colorcet, 'm_' + self.color_map)\n\n cmap.set_bad(self.no_coverage)\n return cmap, norm\n\n @param.depends('library', watch=True)\n def _update_library(self):\n options = self.cmaps[self.library]\n self.param['color_map'].objects = options\n\n @param.depends('mode', watch=True)\n def _mode_updated(self):\n if self.mode == 'Discrete':\n self.excluded = ['library', 'color_map']\n # self.num_colors = max(3, self.num_colors)\n # self.param['num_colors'].bounds = (3, None)\n elif self.mode == 'Continuous':\n self.excluded = ['library', 'color_map', 'otsu_thd']\n # self.param['num_colors'].bounds = (2, None)\n elif self.mode == 'Color map':\n self.excluded = ['otsu_thd', 'num_colors']\n self.num_colors = 2\n\n #todo adjust add/ remove color widgets methods\n self.param.trigger('num_colors')\n self.update_box()\n\n @param.depends('num_colors', watch=True)\n def _update_num_colors(self):\n while len(self.colors) != self.num_colors:\n if len(self.colors) > self.num_colors:\n self._remove_color()\n elif len(self.colors) < self.num_colors:\n self._add_color()\n self.param.trigger('colors')\n\n @param.depends('num_colors', watch=True)\n def _update_num_values(self):\n diff = 1 if self.mode == 'Discrete' else 0\n while len(self.values) != self.num_colors - diff:\n if len(self.values) > self.num_colors - diff:\n self._remove_value()\n elif len(self.values) < self.num_colors - diff:\n self._add_value()\n\n self._update_bounds()\n self.param.trigger('values')\n self.update_box()\n\n def _add_value(self):\n # value widgets are ordered in decreasing order, ergo next value widget\n # starts with default value of previous value -1\n try:\n first_value = self.values[-1]\n except IndexError:\n first_value = 0\n\n default = float(first_value - 1)\n self.values.append(default)\n\n name = f'Threshold {len(self.values)}'\n key = f'value_{len(self.values) - 1}' # values already populated, first name starts at 1\n widget = pn.widgets.FloatInput(name=name, value=default)\n self.widgets[key] = widget\n widget.param.watch(self._value_event, ['value'])\n\n def _remove_value(self):\n key = f'value_{len(self.values) - 1}'\n widget = self.widgets.pop(key)\n self.values.pop()\n\n [widget.param.unwatch(watcher) for watcher in widget.param._watchers]\n del widget\n\n def _add_color(self):\n try:\n default = DEFAULT_CLASS_COLORS[len(self.colors)]\n except IndexError:\n default = \"#\"+''.join(np.random.choice(list('0123456789abcdef'), 6))\n\n self.colors.append(default)\n\n key = f'color_{len(self.colors) - 1}'\n widget = pn.widgets.ColorPicker(value=default)\n\n self.widgets[key] = widget\n\n widget.param.watch(self._color_event, ['value'])\n\n def _remove_color(self):\n key = f'color_{len(self.colors) - 1}'\n widget = self.widgets.pop(key)\n self.colors.pop()\n [widget.param.unwatch(watcher) for watcher in widget.param._watchers]\n del widget\n\n def _color_event(self, *events):\n for event in events:\n idx = list(self.widgets.values()).index(event.obj)\n key = list(self.widgets.keys())[idx]\n widget_index = int(key.split('_')[1])\n # idx = list(self.colors_widgets).index(event.obj)\n self.colors[widget_index] = event.new\n\n self.param.trigger('colors')\n\n #todo param trigger colors????\n\n def _value_event(self, *events):\n \"\"\"triggers when a single value gets changed\"\"\"\n for event in events:\n idx = list(self.widgets.values()).index(event.obj)\n key = list(self.widgets.keys())[idx]\n widget_index = int(key.split('_')[1])\n self.values[widget_index] = event.new\n\n self._update_bounds()\n self.param.trigger('values')\n\n def _update_bounds(self):\n #for i, widget in enumerate(self.values_widgets.values()):\n for i in range(len(self.values)):\n widget = self.widgets[f'value_{i}']\n if i > 0:\n key = f'value_{i-1}'\n prev_value = float(self.widgets[key].value)\n widget.end = np.nextafter(prev_value, prev_value - 1)\n else:\n widget.end = None\n\n if i < len(self.values) - 1:\n key = f'value_{i+1}'\n next_value = float(self.widgets[key].value)\n widget.start = np.nextafter(next_value, next_value + 1)\n else:\n widget.start = None\n\n\nclass ProteinControl(ControlPanel):\n header = 'Protein Control'\n\n input_mode = param.Selector(doc='Method of protein structure input', objects=['PDB File', 'RCSB Download'])\n file_binary = param.Parameter()\n rcsb_id = param.String(doc='RCSB ID of protein to download')\n load_structure = param.Action(lambda self: self._action_load_structure())\n\n def __init__(self, parent, **params):\n super(ProteinControl, self).__init__(parent, **params)\n\n excluded = ['rcsb_id']\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n self.update_box()\n\n @property\n def _layout(self):\n return [('self', self.own_widget_names),\n ('filters.ngl_color_id', None),\n ('filters.ngl_state_name', None),\n ]\n\n def make_dict(self):\n return self.generate_widgets(file_binary=pn.widgets.FileInput(multiple=False, accept='.pdb'))\n\n @param.depends('input_mode', watch=True)\n def _update_input_mode(self):\n if self.input_mode == 'PDB File':\n excluded = ['rcsb_id']\n elif self.input_mode == 'RCSB Download':\n excluded = ['file_binary']\n\n self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]\n self.update_box()\n\n def _action_load_structure(self):\n view = self.views['protein']\n if self.input_mode == 'PDB File':\n pdb_string = self.file_binary.decode()\n view.ngl_view.pdb_string = pdb_string\n elif self.input_mode == 'RCSB Download':\n if len(self.rcsb_id) != 4:\n self.parent.logger.info(f\"Invalid RCSB pdb id: {self.rcsb_id}\")\n return\n\n url = f'http://files.rcsb.org/download/{self.rcsb_id}.pdb'\n with urllib.request.urlopen(url) as response:\n pdb_string = response.read().decode()\n view.ngl_view.pdb_string = pdb_string\n\n\nclass GraphControl(ControlPanel):\n header = 'Graph Control'\n\n spin = param.Boolean(default=False, doc='Spin the protein object')\n\n state_name = param.Selector(doc=\"Name of the currently selected state\")\n fit_id = param.Selector(doc=\"Name of the currently selected fit ID\")\n peptide_index = param.Selector(doc=\"Index of the currently selected peptide\")\n\n def __init__(self, parent, **params):\n super(GraphControl, self).__init__(parent, **params)\n source = self.sources['dataframe']\n source.param.watch(self._source_updated, 'updated')\n\n def make_dict(self):\n widgets = {\n 'general': pn.pane.Markdown('### General'),\n 'coverage': pn.pane.Markdown('### Coverage'),\n 'peptide': pn.pane.Markdown('### Peptide'),\n 'losses': pn.pane.Markdown('### Losses'),\n 'debugging': pn.pane.Markdown('### Debugging'),\n\n }\n\n return {**widgets, **self.generate_widgets()}\n\n def _source_updated(self, *events):\n source = self.sources['dataframe']\n table = source.get('global_fit')\n fit_id_options = list(table.columns.get_level_values(0).unique())\n self.param['fit_id'].objects = fit_id_options\n if not self.fit_id and fit_id_options:\n self.fit_id = fit_id_options[0]\n\n table = source.get('peptides')\n state_name_options = list(table.columns.get_level_values(0).unique())\n\n self.param['state_name'].objects = state_name_options\n if not self.state_name and state_name_options:\n self.state_name = state_name_options[0]\n\n @param.depends('state_name', watch=True)\n def _update_state_name(self):\n #https://param.holoviz.org/reference.html#param.parameterized.batch_watch\n\n dwarfs = ['coverage_state_name', 'coverage_mse_state_name', 'peptide_d_exp_state_name', 'peptide_d_calc_state_name',\n 'deltaG_state_name', 'rates_state_name', 'ngl_state_name'] # there really are 7\n\n # one filter to rule them all, one filter to find them,\n # one filter to bring them all, and in the darkness bind them;\n # in the Land of Mordor where the shadows lie.\n for dwarf in dwarfs:\n filt = self.filters[dwarf]\n filt.value = self.state_name\n\n # If current fit result was done as single, also update the state for the losses graph\n losses_filt = self.filters['losses_state_name']\n if self.state_name in losses_filt.param['value'].objects:\n losses_filt.value = self.state_name\n\n\n # Update possible choices for peptide selection depending on selected state\n source = self.sources['dataframe']\n table = source.get('peptides')\n unique_vals = table[self.state_name]['start_end'].unique()\n peptide_options = list(range(len(unique_vals)))\n self.param['peptide_index'].objects = peptide_options\n if self.peptide_index is not None and peptide_options:\n self.peptide_index = peptide_options[0]\n\n @param.depends('fit_id', watch=True)\n def _update_fit_id(self):\n elves = ['coverage_mse_fit_id', 'peptide_d_calc_fit_id', 'deltaG_fit_id', 'losses_fit_id']\n for elf in elves:\n filt = self.filters[elf]\n filt.value = self.fit_id\n\n # perhaps this is faster?\n # widget = self.widget.clone()\n # self.widget.link(widget, value='value', bidirectional=True)\n\n @param.depends('peptide_index', watch=True)\n def _update_peptide_index(self):\n hobbits = ['peptide_d_exp_select', 'peptide_d_calc_select']\n for hobbit in hobbits:\n filt = self.filters[hobbit]\n filt.value = self.peptide_index\n\n @property\n def _layout(self):\n return [\n # ('self', ['coverage']),\n # ('filters.select_index', None),\n # ('filters.exposure_slider', None),\n # ('opts.cmap', None),\n ('self', ['general']),\n ('self', ['fit_id', 'state_name']),\n ('self', ['coverage']),\n ('filters.coverage_exposure', None),\n ('self', ['peptide', 'peptide_index']),\n ('self', ['losses']),\n ('filters.losses_state_name', None),\n # ('self', ['debugging']),\n # ('filters.deltaG_fit_id', None),\n # ('filters.coverage_mse_fit_id', None),\n ]\n\n @param.depends('spin', watch=True)\n def _spin_updated(self):\n view = self.views['protein']\n view.ngl_view.spin = self.spin\n\n\nclass FileExportControl(ControlPanel):\n # todo check if docstring is true\n \"\"\"\n This controller allows users to export and download datasets.\n\n All datasets can be exported as .txt tables.\n 'Mappable' datasets (with r_number column) can be exported as .pml pymol script, which colors protein structures\n based on their 'color' column.\n\n \"\"\"\n\n header = \"File Export\"\n table = param.Selector(label='Target dataset', doc='Name of the dataset to export')\n export_format = param.Selector(default='csv', objects=['csv', 'pprint'],\n doc=\"Format of the exported tables.\"\n \"'csv' is machine-readable, 'pprint' is human-readable format\")\n #todo add color param an dlink with protein viewer color\n\n def __init__(self, parent, **param):\n super(FileExportControl, self).__init__(parent, **param)\n\n objects = list(self.sources['dataframe'].tables.keys())\n self.param['table'].objects = objects\n self.table = objects[0]\n self.sources['dataframe'].param.watch(self._source_updated, 'updated')\n\n def make_dict(self):\n widgets = self.generate_widgets()\n widgets['export_tables'] = pn.widgets.FileDownload(\n label='Download table',\n callback=self.table_export_callback\n )\n widgets['export_pml'] = pn.widgets.FileDownload(label='Download pml scripts',\n callback=self.pml_export_callback,\n )\n\n return widgets\n\n @property\n def _layout(self):\n return [\n ('self', None)\n ]\n\n def _source_updated(self, *events):\n self.param['table'].objects = list(self.sources['dataframe'].tables.keys())\n self._table_updated()\n\n @param.depends('table', 'export_format', watch=True)\n def _table_updated(self):\n self.df = self.sources['dataframe'].get(self.table)\n\n ext = '.csv' if self.export_format == 'csv' else '.txt'\n self.widgets['export_tables'].filename = self.table + ext\n\n if self.table == 'colors':\n self.widgets['export_pml'].disabled = False\n self.widgets['export_pml'].filename = self.table + '_pml_scripts.zip'\n else:\n self.widgets['export_pml'].disabled = True\n\n @pn.depends('table')\n def pml_export_callback(self):\n\n if self.table:\n #todo check if table is valid for pml conversion\n\n bio = BytesIO()\n with zipfile.ZipFile(bio, 'w') as pml_zip:\n for col_name in self.df.columns:\n name = col_name if isinstance(col_name, str) else '_'.join(col_name)\n colors = self.df[col_name]\n pml_script = series_to_pymol(colors) # todo refactor pd_series_to_pymol?\n pml_zip.writestr(name + '.pml', pml_script)\n\n bio.seek(0)\n return bio\n\n @pn.depends('table') # param.depends?\n def table_export_callback(self):\n if self.table:\n io = dataframe_to_stringio(self.df, fmt=self.export_format)\n return io\n else:\n return None\n\n\nclass SingleMappingFileInputControl(MappingFileInputControl):\n \"\"\"\n This controller allows users to upload *.txt files where quantities (protection factors, Gibbs free energy, etc) are\n mapped to a linear sequence.\n\n The column should be tab separated with on the last header line (starts with '#') the names of the columns. Columns\n should be tab-delimited.\n \"\"\"\n\n def _action_add_dataset(self):\n super()._action_add_dataset()\n to_add_keys = set(self.parent.datasets.keys()) - set(self.parent.sources.keys())\n for key in to_add_keys:\n records = self.parent.datasets[key].to_records()\n data_source = DataSource(records, tags=['comparison', 'mapping'], x='r_number',\n renderer='circle', size=10)\n self.parent.publish_data(key, data_source)\n\n\nclass MatrixMappingFileInputControl(SingleMappingFileInputControl):\n datapoints = param.ListSelector(doc='Select datapoints to include in the matrix')\n\n def _action_add_dataset(self):\n super()._action_add_dataset()\n\n N = 20\n img = np.empty((N, N), dtype=np.uint32)\n view = img.view(dtype=np.uint8).reshape((N, N, 4))\n for i in range(N):\n for j in range(N):\n view[i, j, 0] = int(i / N * 255)\n view[i, j, 1] = 158\n view[i, j, 2] = int(j / N * 255)\n view[i, j, 3] = 255\n\n values = np.random.random(img.shape)\n\n img_ds_dict = {'img': [img], 'scores': [values]}\n data_source = DataSource(img_ds_dict, tags=['image'], name='scores_image', x=0, y=0)\n\n self.parent.publish_data('scores_image', data_source)\n\n def make_list(self):\n widget_list = super().make_list()\n datapoints_widget = widget_list.pop()\n widget_list.insert(3, datapoints_widget)\n return widget_list\n\n def _add_dataset(self):\n full_dict = self.protein.to_dict()\n data_dict = {k: v for k, v in full_dict.items() if k in self.datapoints}\n data_dict['r_number'] = self.protein.index\n protein = Protein(data_dict, index='r_number')\n self.parent.datasets[self.dataset_name] = protein\n\n @param.depends('input_file', watch=True)\n def _input_file_updated(self):\n super()._input_file_updated()\n if self.input_file:\n header_fields = self.protein.df.columns\n\n float_fields = [f for f in header_fields if f.replace('.', '', 1).isdigit()]\n self.param['datapoints'].objects = float_fields\n self.datapoints = float_fields\n\n# self.dataset_name = self.dataset_name or Path(self.widget_dict['input_file'].filename).stem\n\n\nclass MatrixImageControl(ControlPanel):\n \"\"\"\n This controller takes an input loaded matrix and converts it to an (rgba) interpolated rendered image\n\n \"\"\"\n\n\nclass FDPeptideFileInputControl(PeptideFileInputControl):\n # todo @tejas: Add test\n # This requires making a test function with the full_deuteration_app in apps.py\n def make_list(self):\n parameters = ['add_button', 'clear_button', 'drop_first', 'load_button', 'd_percentage',\n 'fd_state', 'fd_exposure', 'parse_button']\n first_widgets = list([self.widget_dict[par] for par in parameters])\n return self.file_selectors + first_widgets\n\n def _action_parse(self):\n \"\"\"Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`\"\"\"\n pmt = self.parent.peptides\n\n data_states = pmt.data[pmt.data['state'] == self.fd_state]\n data_exposure = data_states[data_states['exposure'] == self.fd_exposure]\n\n scores = 100 * data_exposure['uptake'] / data_exposure['ex_residues']\n data_final = append_fields(data_exposure, 'scores', data=scores, usemask=False)\n\n # pmt.set_control((fd_state, fd_exposure))\n series = HDXMeasurement(data_final)\n\n self.parent.series = series\n\n self.parent.logger.info(f\"Loaded FD control '{self.exp_state}' with {len(series.coverage)} peptides\")\n self.parent.logger.info(f'Mean deuteration is {scores.mean()}%, std {scores.std()}%')\n\n\nclass PeptideFoldingFileInputControl(PeptideFileInputControl):\n # todo @tejas: Add test\n # This requires making a test function with the folding in apps.py\n\n be_mode = param.Selector(doc='Select method of normalization', label='Norm mode', objects=['Exp', 'Theory']\n , precedence=-1)\n fd_state = param.Selector(doc='State used to normalize uptake', label='100% Control State')\n fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='100% Control Exposure')\n zero_state = param.Selector(doc='State used to zero uptake', label='0% Control State')\n zero_exposure = param.Selector(doc='Exposure used to zero uptake', label='0% Control Exposure')\n\n def make_dict(self):\n return self.generate_widgets()\n\n def make_list(self):\n parameters = ['add_button', 'clear_button', 'drop_first', 'ignore_prolines', 'load_button',\n 'fd_state', 'fd_exposure', 'zero_state', 'zero_exposure', 'exp_state',\n 'exp_exposures', 'parse_button']\n first_widgets = list([self.widget_dict[par] for par in parameters])\n return self.file_selectors + first_widgets\n\n def _action_load(self):\n super()._action_load()\n states = list(np.unique(self.parent.peptides.data['state']))\n self.param['zero_state'].objects = states\n self.zero_state = states[0]\n\n @param.depends('fd_state', 'fd_exposure', watch=True)\n def _update_experiment(self):\n #TODO THIS needs to be updated to also incorporate the zero (?)\n pm_dict = self.parent.peptides.return_by_name(self.fd_state, self.fd_exposure)\n states = list(np.unique([v.state for v in pm_dict.values()]))\n self.param['exp_state'].objects = states\n self.exp_state = states[0] if not self.exp_state else self.exp_state\n\n @param.depends('zero_state', watch=True)\n def _update_zero_exposure(self):\n b = self.parent.peptides.data['state'] == self.zero_state\n data = self.parent.peptides.data[b]\n exposures = list(np.unique(data['exposure']))\n self.param['zero_exposure'].objects = exposures\n if exposures:\n self.control_exposure = exposures[0]\n\n def _action_parse(self):\n \"\"\"Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`\"\"\"\n control_0 = self.zero_state, self.zero_exposure\n self.parent.peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)\n\n data_states = self.parent.peptides.data[self.parent.peptides.data['state'] == self.exp_state]\n data = data_states[np.isin(data_states['exposure'], self.exp_exposures)]\n\n series = HDXMeasurement(data)\n self.parent.series = series\n\n self._publish_scores()\n\n self.parent.logger.info(f'Loaded experiment state {self.exp_state} '\n f'({len(series)} timepoints, {len(series.coverage)} peptides each)')\n\n\nclass DifferenceControl(ControlPanel):\n \"\"\"\n This controller allows users to select two datasets from available datasets, choose a quantity to compare between,\n and choose the type of operation between quantities (Subtract/Divide).\n\n \"\"\"\n header = 'Differences'\n\n dataset_1 = param.Selector(doc='First dataset to compare')\n dataset_2 = param.Selector(doc='Second dataset to compare')\n\n comparison_name = param.String()\n operation = param.Selector(default='Subtract', objects=['Subtract', 'Divide'],\n doc='Select the operation to perform between the two datasets')\n\n comparison_quantity = param.Selector(doc=\"Select a quantity to compare (column from input txt file)\")\n add_comparison = param.Action(lambda self: self._action_add_comparison(),\n doc='Click to add this comparison to available comparisons')\n comparison_list = param.ListSelector(doc='Lists available comparisons')\n remove_comparison = param.Action(lambda self: self._action_remove_comparison(),\n doc='Remove selected comparisons from the list')\n\n def __init__(self, parent, **params):\n super(DifferenceControl, self).__init__(parent, **params)\n self.parent.param.watch(self._datasets_updated, ['datasets'])\n\n def _datasets_updated(self, events):\n objects = list(self.parent.datasets.keys())\n\n self.param['dataset_1'].objects = objects\n if not self.dataset_1:\n self.dataset_1 = objects[0]\n self.param['dataset_2'].objects = objects\n if not self.dataset_2:# or self.dataset_2 == objects[0]: # dataset2 default to second dataset? toggle user modify?\n self.dataset_2 = objects[0]\n\n @param.depends('dataset_1', 'dataset_2', watch=True)\n def _selection_updated(self):\n if self.datasets:\n unique_names = set.intersection(*[{name for name in protein.df.dtypes.index} for protein in self.datasets])\n objects = [name for name in unique_names if np.issubdtype(self.protein_1[name].dtype, np.number)]\n objects.sort()\n\n # todo check for scara dtype\n self.param['comparison_quantity'].objects = objects\n if self.comparison_quantity is None:\n self.comparison_quantity = objects[0]\n\n @property\n def protein_1(self):\n \"\"\":class:`~pyhdx.models.Protein`: Protein object of dataset 1\"\"\"\n try:\n return self.parent.datasets[self.dataset_1]\n except KeyError:\n return None\n\n @property\n def protein_2(self):\n \"\"\":class:`~pyhdx.models.Protein`: Protein object of dataset 2\"\"\"\n try:\n return self.parent.datasets[self.dataset_2]\n except KeyError:\n return None\n\n @property\n def datasets(self):\n \"\"\":obj:`tuple`: Tuple with `(protein_1, protein_2)\"\"\"\n datasets = (self.protein_1, self.protein_2)\n if None in datasets:\n return None\n else:\n return datasets\n\n def _action_add_comparison(self):\n if not self.comparison_name:\n self.parent.logger.info('The added comparison needs to have a name')\n return\n if self.datasets is None:\n return\n\n op = {'Subtract': operator.sub, 'Divide': operator.truediv}[self.operation]\n comparison = op(*[p[self.comparison_quantity] for p in self.datasets]).rename('comparison')\n value1 = self.protein_1[self.comparison_quantity].rename('value1')\n value2 = self.protein_2[self.comparison_quantity].rename('value2')\n df = pd.concat([comparison, value1, value2], axis=1)\n\n output = df.to_records()\n data_source = DataSource(output, tags=['comparison', 'mapping'], x='r_number', y='comparison',\n renderer='circle', size=10)\n self.parent.publish_data(self.comparison_name, data_source) # Triggers parent.sources param\n self.comparison_name = ''\n\n def _action_remove_comparison(self):\n for comparison in self.comparison_list:\n self.parent.sources.pop(comparison) #Popping from dicts does not trigger param\n self.parent.param.trigger('sources')\n\n @param.depends('parent.sources', watch=True)\n def _update_comparison_list(self):\n objects = [name for name, d in self.parent.sources.items() if 'comparison' in d.tags]\n self.param['comparison_list'].objects = objects\n\n\nclass SingleControl(ControlPanel):\n # todo @tejas: Add test\n\n \"\"\"\n This controller allows users to select a dataset from available datasets, and choose a quantity to classify/visualize,\n and add this quantity to the available datasets.\n \"\"\"\n\n #todo subclass with DifferenceControl\n #rename dataset_name\n header = 'Datasets'\n\n dataset = param.Selector(doc='Dataset')\n dataset_name = param.String(doc='Name of the dataset to add')\n quantity = param.Selector(doc=\"Select a quantity to plot (column from input txt file)\")\n\n add_dataset = param.Action(lambda self: self._action_add_dataset(),\n doc='Click to add this comparison to available comparisons')\n dataset_list = param.ListSelector(doc='Lists available comparisons')\n remove_dataset = param.Action(lambda self: self._action_remove_comparison(),\n doc='Remove selected datasets from available datasets')\n\n def __init__(self, parent, **params):\n super(SingleControl, self).__init__(parent, **params)\n self.parent.param.watch(self._datasets_updated, ['datasets'])\n\n def _datasets_updated(self, events):\n objects = list(self.parent.datasets.keys())\n\n self.param['dataset'].objects = objects\n if not self.dataset:\n self.dataset = objects[0]\n\n @param.depends('dataset', watch=True)\n def _selection_updated(self):\n if self.dataset:\n dataset = self.parent.datasets[self.dataset]\n names = dataset.dtype.names\n objects = [name for name in names if name != 'r_number']\n self.param['quantity'].objects = objects\n if self.quantity is None:\n self.quantity = objects[0]\n\n def _action_add_dataset(self):\n if not self.dataset_name:\n self.parent.logger.info('The added comparison needs to have a name')\n return\n if not self.dataset:\n return\n\n array = self.parent.datasets[self.dataset]\n data_source = DataSource(array, tags=['comparison', 'mapping'], x='r_number', y=self.quantity,\n renderer='circle', size=10)\n self.parent.publish_data(self.dataset_name, data_source) # Triggers parent.sources param\n self.comparison_name = ''\n\n def _action_remove_comparison(self):\n for ds in self.dataset_list:\n self.parent.sources.pop(ds) #Popping from dicts does not trigger param\n self.parent.param.trigger('sources')\n\n @param.depends('parent.sources', watch=True)\n def _update_dataset_list(self):\n objects = [name for name, d in self.parent.sources.items()]\n self.param['dataset_list'].objects = objects\n\n\nclass FDCoverageControl(CoverageControl):\n def make_list(self):\n lst = super(CoverageControl, self).make_list()\n return lst[:-1]\n\n\nclass FoldingFitting(InitialGuessControl):\n fitting_model = param.Selector(default='Dissociation', objects=['Dissociation'],\n doc='Choose method for determining initial guesses.')\n\n def make_list(self):\n self.widget_dict.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)\n parameters = ['fitting_model', 'lower_bound', 'upper_bound', 'do_fit1', 'pbar1']\n\n widget_list = list([self.widget_dict[par] for par in parameters])\n return widget_list\n\n\nclass FitResultControl(ControlPanel):\n # @tejas skip test, currently bugged, issue #182\n\n \"\"\"\n This controller allows users to view to fit result and how it describes the uptake of every peptide.\n \"\"\"\n\n header = 'Fit Results'\n\n peptide_index = param.Integer(0, bounds=(0, None),\n doc='Index of the peptide to display.')\n x_axis_type = param.Selector(default='Log', objects=['Linear', 'Log'],\n doc='Choose whether to plot the x axis as Logarithmic axis or Linear.')\n\n def __init__(self, parent, **param):\n super(FitResultControl, self).__init__(parent, **param)\n\n self.d_uptake = {} ## Dictionary of arrays (N_p, N_t) with results of fit result model calls\n #todo why does still still exists should it not just be dataobjects??\n # --> because they need to be calcualted only once and then dataobjects are generated per index\n # can be improved probably (by putting all data in data source a priory?\n\n self.parent.param.watch(self._series_updated, ['datasets']) #todo refactor\n self.parent.param.watch(self._fit_results_updated, ['fit_results'])\n\n def _series_updated(self, *events):\n pass\n #\n # self.param['peptide_index'].bounds = (0, len(self.parent.series.coverage.data) - 1)\n # self.d_uptake['uptake_corrected'] = self.parent.series.uptake_corrected.T\n # self._update_sources()\n\n @property\n def fit_timepoints(self):\n time = np.logspace(-2, np.log10(self.parent.series.timepoints.max()), num=250)\n time = np.insert(time, 0, 0.)\n return time\n\n def _fit_results_updated(self, *events):\n accepted_fitresults = ['fr_pfact']\n #todo wrappertje which checks with a cached previous version of this particular param what the changes are even it a manual trigger\n for name, fit_result in self.parent.fit_results.items():\n if name in accepted_fitresults:\n D_upt = fit_result(self.fit_timepoints)\n self.d_uptake[name] = D_upt\n else:\n continue\n # push results to graph\n self._update_sources()\n\n @param.depends('peptide_index', watch=True)\n def _update_sources(self):\n for name, array in self.d_uptake.items():\n if name == 'uptake_corrected': ## this is the raw data\n timepoints = self.parent.series.timepoints\n renderer = 'circle'\n color = '#000000'\n else:\n timepoints = self.fit_timepoints\n renderer = 'line'\n color = '#bd0d1f' #todo css / default color cycle per Figure Panel?\n\n dic = {'time': timepoints, 'uptake': array[self.peptide_index, :]}\n data_source = DataSource(dic, x='time', y='uptake', tags=['uptake_curve'], renderer=renderer, color=color)\n self.parent.publish_data(name, data_source)\n\n\nclass ColoringControl(ClassificationControl):\n # WIP class, skip tests\n\n\n def make_dict(self):\n widgets_dict = super().make_dict()\n widgets_dict.pop('quantity')\n\n return widgets_dict\n\n @param.depends('values', 'colors', 'target', 'quantity', watch=True)\n def _get_colors(self):\n # todo this part is repeated\n if np.all(self.values == 0):\n return\n elif np.any(np.diff(self.values) > 0): # Skip applying colors when not strictly monotonic descending\n return\n elif not self.target:\n return\n elif 'scores_image' not in self.parent.sources.keys():\n return\n\n tgt_source = self.parent.sources[self.target] # full array including nan entries\n r_number = tgt_source.source.data['r_number']\n assert np.all(np.diff(r_number) == 1)\n\n\n headers = [f for f in tgt_source.source.data.keys() if f.replace('.', '', 1).isdigit()]\n\n headers.sort(key=float)\n timepoints = np.array([float(f) for f in headers])\n N_interpolate = 500\n interp_timepoints = np.linspace(0, timepoints.max(), num=N_interpolate, endpoint=True)\n data_array = np.stack([tgt_source.source.data[k] for k in headers])\n\n array = np.stack([np.interp(interp_timepoints, timepoints, data) for data in data_array.T]).T\n\n\n colors_hex = self._calc_colors(array.flatten()) # colors are in hex format\n if colors_hex is None: # this is the colors not between 0 and 1 bug / error\n return\n\n colors_hex[colors_hex == 'nan'] = '#8c8c8c'\n colors_rgba = np.array([hex_to_rgba(h) for h in colors_hex])\n\n shape = (N_interpolate, len(r_number))\n img = np.empty(shape, dtype=np.uint32)\n view = img.view(dtype=np.uint8).reshape(*shape, 4)\n view[:] = colors_rgba.reshape(*shape, 4)\n\n img_source = self.parent.sources['scores_image']\n img_source.render_kwargs['dw'] = r_number.max()\n img_source.render_kwargs['dh'] = timepoints.max()\n img_source.source.data.update(img=[img], scores=[array])\n\n\n #self.parent.sources[self.target].source.data['color'] = colors\n\n\nclass DifferenceFileExportControl(FileExportControl):\n \"\"\"\n This controller allows users to export and download datasets.\n\n 'Mappable' datasets (with r_number column) can be exported as .pml pymol script, which colors protein structures\n based on their 'color' column.\n\n \"\"\"\n\n accepted_tags = ['mapping']\n #todo include comparison info (x vs y) in output\n\n def _sources_updated(self, *events): #refactor _parent_sources_updated on classificationcontrol\n data_sources = [k for k, src in self.parent.sources.items() if src.resolve_tags(self.accepted_tags)]\n self.param['target'].objects = list(data_sources)\n\n # Set target if its not set already\n if not self.target and data_sources:\n self.target = data_sources[-1]\n\n @pn.depends('target', watch=True)\n def _update_filename(self):\n self.export_linear_download.filename = self.target + '_linear.txt'\n if 'r_number' in self.export_dict.keys():\n self.pml_script_download.filename = self.target + '_pymol.pml'\n\n\nclass OptionsControl(ControlPanel):\n \"\"\"The controller is used for various settings.\"\"\"\n\n header = 'Options'\n\n #todo this should be a component (mixin?) for apps who dont have these figures\n link_xrange = param.Boolean(True, doc='Link the X range of the coverage figure and other linear mapping figures.', constant=False)\n log_level = param.Selector(default='DEBUG', objects=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', 'OFF', 'TRACE'],\n doc='Set the logging level.')\n\n def __init__(self, parent, **param):\n super(OptionsControl, self).__init__(parent, **param)\n\n @property\n def enabled(self):\n return self.master_figure is not None and self.client_figures is not None\n\n @param.depends('link_xrange', watch=True)\n def _update_link(self):\n if self.enabled:\n if self.link_xrange:\n self._link()\n else:\n self._unlink()\n\n @property\n def client_figures(self):\n client_names = ['RateFigure', 'PFactFigure']\n return [self.parent.figure_panels[name].figure for name in client_names]\n\n @property\n def master_figure(self):\n return self.parent.figure_panels['CoverageFigure'].figure\n\n @property\n def figures(self):\n return [self.master_figure] + self.client_figures\n\n def _unlink(self):\n for fig in self.figures:\n fig.x_range.js_property_callbacks.pop('change:start')\n fig.x_range.js_property_callbacks.pop('change:end')\n\n def _link(self):\n for client in self.client_figures:\n self.master_figure.x_range.js_link('start', client.x_range, 'start')\n self.master_figure.x_range.js_link('end', client.x_range, 'end')\n\n client.x_range.js_link('start', self.master_figure.x_range, 'start')\n client.x_range.js_link('end', self.master_figure.x_range, 'end')\n\n\nclass DeveloperControl(ControlPanel):\n \"\"\"Controller with debugging options\"\"\"\n\n header = 'Developer Options'\n test_logging = param.Action(lambda self: self._action_test_logging())\n breakpoint_btn = param.Action(lambda self: self._action_break())\n test_btn = param.Action(lambda self: self._action_test())\n trigger_btn = param.Action(lambda self: self._action_trigger())\n print_btn = param.Action(lambda self: self._action_print())\n runtime_warning = param.Action(lambda self: self._action_runtime())\n\n def __init__(self, parent, **params):\n super(DeveloperControl, self).__init__(parent, **params)\n\n def _action_test_logging(self):\n print(self.parent.logger)\n self.parent.logger.debug('TEST DEBUG MESSAGE')\n #logging.info('THis is some info')\n for i in range(20):\n self.parent.logger.info('dit is een test123')\n\n def _action_print(self):\n\n hdx_set = self.parent.hdx_set\n print(hdx_set.names)\n guess = self.parent.control_panels['FitControl']\n rates_df = self.sources['dataframe'].get('rates', fit_ID=guess.initial_guess)\n print(guess.initial_guess)\n print(rates_df)\n\n rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]\n gibbs_guess = hdx_set.guess_deltaG(rates_guess)\n\n def _action_break(self):\n main_ctrl = self.parent\n control_panels = main_ctrl.control_panels\n views = main_ctrl.views\n sources = main_ctrl.sources\n\n mse_view = views['coverage_mse']\n data = mse_view.get_data()\n print('mse')\n print(data)\n\n coverage_view = views['coverage']\n data = coverage_view.get_data()\n print('coverage')\n print(data)\n\n\n print('Time for a break')\n\n def _action_test(self):\n src_file = r'C:\\Users\\jhsmi\\pp\\PyHDX\\tests\\test_data\\ecSecB_torch_fit.txt'\n array = txt_to_np(src_file)\n data_dict = {name: array[name] for name in array.dtype.names}\n\n data_dict['color'] = np.full_like(array, fill_value=DEFAULT_COLORS['pfact'], dtype='<U7')\n data_source = DataSource(data_dict, x='r_number', tags=['mapping', 'pfact', 'deltaG'],\n renderer='circle', size=10, name='global_fit')\n\n self.parent.publish_data('global_fit', data_source)\n\n def _action_trigger(self):\n deltaG_figure = self.parent.figure_panels['DeltaGFigure']\n deltaG_figure.bk_pane.param.trigger('object')\n\n def _action_runtime(self):\n result = np.mean([])" ]
[ [ "pandas.DataFrame" ], [ "matplotlib.colors.BoundaryNorm", "numpy.product", "pandas.RangeIndex", "numpy.issubdtype", "matplotlib.pyplot.colormaps", "pandas.DataFrame", "numpy.all", "numpy.max", "numpy.mean", "numpy.exp", "numpy.nextafter", "numpy.unique", "pandas.Index", "numpy.stack", "numpy.diff", "numpy.insert", "numpy.interp", "numpy.lib.recfunctions.append_fields", "numpy.isin", "pandas.concat", "numpy.nonzero", "numpy.random.choice", "numpy.min", "numpy.isnan", "numpy.full_like", "matplotlib.colors.ListedColormap", "numpy.random.rand", "pandas.MultiIndex.from_product", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.random.random", "matplotlib.cm.get_cmap", "numpy.empty" ] ]
Napam/JallaResearch
[ "f0ed573f20f050ec0a653bff09068eb11a9c4128" ]
[ "coalescmask/coalesc.py" ]
[ "# https://stackoverflow.com/questions/67557724/coalescing-rows-from-boolean-mask?noredirect=1#comment119461081_67557724\nimport numpy as np \n\nrows = np.r_['1,2,0', :6, :6]\nmask = np.tile([1, 1, 0, 0, 1, 1], (2,1)).T.astype(bool)\n\ndef maskforwardfill(a: np.ndarray, mask: np.ndarray):\n mask = mask.copy()\n mask[np.diff(mask,prepend=[0]) == 1] = False # set leading True to False\n return a[~mask] # index out wanted rows\n\ndef maskforwardfill2(a: np.ndarray, mask: np.ndarray):\n mask = mask.copy()\n mask[1:] = mask[1:] & mask[:-1] # Set leading Trues to Falses\n mask[0] = False\n return a[~mask] # index out wanted rows\n\n# Reduce mask's dimension since I assume that you only do complete rows\nprint(maskforwardfill2(rows, mask.any(1)))\n#[[0 0]\n# [2 2]\n# [3 3]\n# [4 4]]\n\n" ]
[ [ "numpy.diff", "numpy.tile" ] ]
carismoses/scipy
[ "f368dbdf8b94e848527ab0c80fc3683b83385305" ]
[ "scipy/optimize/linesearch.py" ]
[ "\"\"\"\nFunctions\n---------\n.. autosummary::\n :toctree: generated/\n\n line_search_armijo\n line_search_wolfe1\n line_search_wolfe2\n scalar_search_wolfe1\n scalar_search_wolfe2\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom warnings import warn\n\nfrom scipy.optimize import minpack2\nimport numpy as np\nfrom scipy._lib.six import xrange\n\n__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',\n 'scalar_search_wolfe1', 'scalar_search_wolfe2',\n 'line_search_armijo']\n\nclass LineSearchWarning(RuntimeWarning):\n pass\n\n\n#------------------------------------------------------------------------------\n# Minpack's Wolfe line and scalar searches\n#------------------------------------------------------------------------------\n\ndef line_search_wolfe1(f, fprime, xk, pk, gfk=None,\n old_fval=None, old_old_fval=None,\n args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,\n xtol=1e-14):\n \"\"\"\n As `scalar_search_wolfe1` but do a line search to direction `pk`\n\n Parameters\n ----------\n f : callable\n Function `f(x)`\n fprime : callable\n Gradient of `f`\n xk : array_like\n Current point\n pk : array_like\n Search direction\n\n gfk : array_like, optional\n Gradient of `f` at point `xk`\n old_fval : float, optional\n Value of `f` at point `xk`\n old_old_fval : float, optional\n Value of `f` at point preceding `xk`\n\n The rest of the parameters are the same as for `scalar_search_wolfe1`.\n\n Returns\n -------\n stp, f_count, g_count, fval, old_fval\n As in `line_search_wolfe1`\n gval : array\n Gradient of `f` at the final point\n\n \"\"\"\n if gfk is None:\n gfk = fprime(xk)\n\n if isinstance(fprime, tuple):\n eps = fprime[1]\n fprime = fprime[0]\n newargs = (f, eps) + args\n gradient = False\n else:\n newargs = args\n gradient = True\n\n gval = [gfk]\n gc = [0]\n fc = [0]\n\n def phi(s):\n fc[0] += 1\n return f(xk + s*pk, *args)\n\n def derphi(s):\n gval[0] = fprime(xk + s*pk, *newargs)\n if gradient:\n gc[0] += 1\n else:\n fc[0] += len(xk) + 1\n return np.dot(gval[0], pk)\n\n derphi0 = np.dot(gfk, pk)\n\n stp, fval, old_fval = scalar_search_wolfe1(\n phi, derphi, old_fval, old_old_fval, derphi0,\n c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)\n\n return stp, fc[0], gc[0], fval, old_fval, gval[0]\n\n\ndef scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,\n c1=1e-4, c2=0.9,\n amax=50, amin=1e-8, xtol=1e-14):\n \"\"\"\n Scalar function search for alpha that satisfies strong Wolfe conditions\n\n alpha > 0 is assumed to be a descent direction.\n\n Parameters\n ----------\n phi : callable phi(alpha)\n Function at point `alpha`\n derphi : callable phi'(alpha)\n Objective function derivative. Returns a scalar.\n phi0 : float, optional\n Value of phi at 0\n old_phi0 : float, optional\n Value of phi at previous point\n derphi0 : float, optional\n Value derphi at 0\n c1 : float, optional\n Parameter for Armijo condition rule.\n c2 : float, optional\n Parameter for curvature condition rule.\n amax, amin : float, optional\n Maximum and minimum step size\n xtol : float, optional\n Relative tolerance for an acceptable step.\n\n Returns\n -------\n alpha : float\n Step size, or None if no suitable step was found\n phi : float\n Value of `phi` at the new point `alpha`\n phi0 : float\n Value of `phi` at `alpha=0`\n\n Notes\n -----\n Uses routine DCSRCH from MINPACK.\n\n \"\"\"\n\n if phi0 is None:\n phi0 = phi(0.)\n if derphi0 is None:\n derphi0 = derphi(0.)\n\n if old_phi0 is not None and derphi0 != 0:\n alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)\n if alpha1 < 0:\n alpha1 = 1.0\n else:\n alpha1 = 1.0\n\n phi1 = phi0\n derphi1 = derphi0\n isave = np.zeros((2,), np.intc)\n dsave = np.zeros((13,), float)\n task = b'START'\n\n maxiter = 100\n for i in xrange(maxiter):\n stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,\n c1, c2, xtol, task,\n amin, amax, isave, dsave)\n if task[:2] == b'FG':\n alpha1 = stp\n phi1 = phi(stp)\n derphi1 = derphi(stp)\n else:\n break\n else:\n # maxiter reached, the line search did not converge\n stp = None\n\n if task[:5] == b'ERROR' or task[:4] == b'WARN':\n stp = None # failed\n\n return stp, phi1, phi0\n\n\nline_search = line_search_wolfe1\n\n\n#------------------------------------------------------------------------------\n# Pure-Python Wolfe line and scalar searches\n#------------------------------------------------------------------------------\n\ndef line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,\n old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,\n extra_condition=None, maxiter=10):\n \"\"\"Find alpha that satisfies strong Wolfe conditions.\n\n Parameters\n ----------\n f : callable f(x,*args)\n Objective function.\n myfprime : callable f'(x,*args)\n Objective function gradient.\n xk : ndarray\n Starting point.\n pk : ndarray\n Search direction.\n gfk : ndarray, optional\n Gradient value for x=xk (xk being the current parameter\n estimate). Will be recomputed if omitted.\n old_fval : float, optional\n Function value for x=xk. Will be recomputed if omitted.\n old_old_fval : float, optional\n Function value for the point preceding x=xk\n args : tuple, optional\n Additional arguments passed to objective function.\n c1 : float, optional\n Parameter for Armijo condition rule.\n c2 : float, optional\n Parameter for curvature condition rule.\n amax : float, optional\n Maximum step size\n extra_condition : callable, optional\n A callable of the form ``extra_condition(alpha, x, f, g)``\n returning a boolean. Arguments are the proposed step ``alpha``\n and the corresponding ``x``, ``f`` and ``g`` values. The line search \n accepts the value of ``alpha`` only if this \n callable returns ``True``. If the callable returns ``False`` \n for the step length, the algorithm will continue with \n new iterates. The callable is only called for iterates \n satisfying the strong Wolfe conditions.\n maxiter : int, optional\n Maximum number of iterations to perform\n\n Returns\n -------\n alpha : float or None\n Alpha for which ``x_new = x0 + alpha * pk``,\n or None if the line search algorithm did not converge.\n fc : int\n Number of function evaluations made.\n gc : int\n Number of gradient evaluations made.\n new_fval : float or None\n New function value ``f(x_new)=f(x0+alpha*pk)``,\n or None if the line search algorithm did not converge.\n old_fval : float\n Old function value ``f(x0)``.\n new_slope : float or None\n The local slope along the search direction at the\n new value ``<myfprime(x_new), pk>``,\n or None if the line search algorithm did not converge.\n\n\n Notes\n -----\n Uses the line search algorithm to enforce strong Wolfe\n conditions. See Wright and Nocedal, 'Numerical Optimization',\n 1999, pg. 59-60.\n\n For the zoom phase it uses an algorithm by [...].\n\n \"\"\"\n fc = [0]\n gc = [0]\n gval = [None]\n gval_alpha = [None]\n\n def phi(alpha):\n fc[0] += 1\n return f(xk + alpha * pk, *args)\n\n if isinstance(myfprime, tuple):\n def derphi(alpha):\n fc[0] += len(xk) + 1\n eps = myfprime[1]\n fprime = myfprime[0]\n newargs = (f, eps) + args\n gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use\n gval_alpha[0] = alpha\n return np.dot(gval[0], pk)\n else:\n fprime = myfprime\n\n def derphi(alpha):\n gc[0] += 1\n gval[0] = fprime(xk + alpha * pk, *args) # store for later use\n gval_alpha[0] = alpha\n return np.dot(gval[0], pk)\n\n if gfk is None:\n gfk = fprime(xk, *args)\n derphi0 = np.dot(gfk, pk)\n\n if extra_condition is not None:\n # Add the current gradient as argument, to avoid needless\n # re-evaluation\n def extra_condition2(alpha, phi):\n if gval_alpha[0] != alpha:\n derphi(alpha)\n x = xk + alpha * pk\n return extra_condition(alpha, x, phi, gval[0])\n else:\n extra_condition2 = None\n\n alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(\n phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,\n extra_condition2, maxiter=maxiter)\n\n if derphi_star is None:\n warn('The line search algorithm did not converge', LineSearchWarning)\n else:\n # derphi_star is a number (derphi) -- so use the most recently\n # calculated gradient used in computing it derphi = gfk*pk\n # this is the gradient at the next step no need to compute it\n # again in the outer loop.\n derphi_star = gval[0]\n\n return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star\n\n\ndef scalar_search_wolfe2(phi, derphi, phi0=None,\n old_phi0=None, derphi0=None,\n c1=1e-4, c2=0.9, amax=None,\n extra_condition=None, maxiter=10):\n \"\"\"Find alpha that satisfies strong Wolfe conditions.\n\n alpha > 0 is assumed to be a descent direction.\n\n Parameters\n ----------\n phi : callable phi(alpha)\n Objective scalar function.\n derphi : callable phi'(alpha)\n Objective function derivative. Returns a scalar.\n phi0 : float, optional\n Value of phi at 0\n old_phi0 : float, optional\n Value of phi at previous point\n derphi0 : float, optional\n Value of derphi at 0\n c1 : float, optional\n Parameter for Armijo condition rule.\n c2 : float, optional\n Parameter for curvature condition rule.\n amax : float, optional\n Maximum step size\n extra_condition : callable, optional\n A callable of the form ``extra_condition(alpha, phi_value)``\n returning a boolean. The line search accepts the value\n of ``alpha`` only if this callable returns ``True``.\n If the callable returns ``False`` for the step length,\n the algorithm will continue with new iterates.\n The callable is only called for iterates satisfying\n the strong Wolfe conditions.\n maxiter : int, optional\n Maximum number of iterations to perform\n\n Returns\n -------\n alpha_star : float or None\n Best alpha, or None if the line search algorithm did not converge.\n phi_star : float\n phi at alpha_star\n phi0 : float\n phi at 0\n derphi_star : float or None\n derphi at alpha_star, or None if the line search algorithm\n did not converge.\n\n Notes\n -----\n Uses the line search algorithm to enforce strong Wolfe\n conditions. See Wright and Nocedal, 'Numerical Optimization',\n 1999, pg. 59-60.\n\n For the zoom phase it uses an algorithm by [...].\n\n \"\"\"\n\n if phi0 is None:\n phi0 = phi(0.)\n\n if derphi0 is None:\n derphi0 = derphi(0.)\n\n alpha0 = 0\n if old_phi0 is not None and derphi0 != 0:\n alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)\n alpha1 = alpha1.squeeze()\n else:\n alpha1 = 1.0\n\n if alpha1 < 0:\n alpha1 = 1.0\n\n if amax is not None:\n alpha1 = min(alpha1, amax)\n\n phi_a1 = phi(alpha1)\n #derphi_a1 = derphi(alpha1) evaluated below\n\n phi_a0 = phi0\n derphi_a0 = derphi0\n\n if extra_condition is None:\n extra_condition = lambda alpha, phi: True\n\n for i in xrange(maxiter):\n if alpha1 == 0 or (amax is not None and alpha0 == amax):\n # alpha1 == 0: This shouldn't happen. Perhaps the increment has\n # slipped below machine precision?\n alpha_star = None\n phi_star = phi0\n phi0 = old_phi0\n derphi_star = None\n\n if alpha1 == 0:\n msg = 'Rounding errors prevent the line search from converging'\n else:\n msg = \"The line search algorithm could not find a solution \" + \\\n \"less than or equal to amax: %s\" % amax\n\n warn(msg, LineSearchWarning)\n break\n\n if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \\\n ((phi_a1 >= phi_a0) and (i > 1)):\n alpha_star, phi_star, derphi_star = \\\n _zoom(alpha0, alpha1, phi_a0,\n phi_a1, derphi_a0, phi, derphi,\n phi0, derphi0, c1, c2, extra_condition)\n break\n\n derphi_a1 = derphi(alpha1)\n if (abs(derphi_a1) <= -c2*derphi0):\n if extra_condition(alpha1, phi_a1):\n alpha_star = alpha1\n phi_star = phi_a1\n derphi_star = derphi_a1\n break\n\n if (derphi_a1 >= 0):\n alpha_star, phi_star, derphi_star = \\\n _zoom(alpha1, alpha0, phi_a1,\n phi_a0, derphi_a1, phi, derphi,\n phi0, derphi0, c1, c2, extra_condition)\n break\n\n alpha2 = 2 * alpha1 # increase by factor of two on each iteration\n if amax is not None:\n alpha2 = min(alpha2, amax)\n alpha0 = alpha1\n alpha1 = alpha2\n phi_a0 = phi_a1\n phi_a1 = phi(alpha1)\n derphi_a0 = derphi_a1\n\n else:\n # stopping test maxiter reached\n alpha_star = alpha1\n phi_star = phi_a1\n derphi_star = None\n warn('The line search algorithm did not converge', LineSearchWarning)\n\n return alpha_star, phi_star, phi0, derphi_star\n\n\ndef _cubicmin(a, fa, fpa, b, fb, c, fc):\n \"\"\"\n Finds the minimizer for a cubic polynomial that goes through the\n points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.\n\n If no minimizer can be found return None\n\n \"\"\"\n # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D\n\n with np.errstate(divide='raise', over='raise', invalid='raise'):\n try:\n C = fpa\n db = b - a\n dc = c - a\n denom = (db * dc) ** 2 * (db - dc)\n d1 = np.empty((2, 2))\n d1[0, 0] = dc ** 2\n d1[0, 1] = -db ** 2\n d1[1, 0] = -dc ** 3\n d1[1, 1] = db ** 3\n [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,\n fc - fa - C * dc]).flatten())\n A /= denom\n B /= denom\n radical = B * B - 3 * A * C\n xmin = a + (-B + np.sqrt(radical)) / (3 * A)\n except ArithmeticError:\n return None\n if not np.isfinite(xmin):\n return None\n return xmin\n\n\ndef _quadmin(a, fa, fpa, b, fb):\n \"\"\"\n Finds the minimizer for a quadratic polynomial that goes through\n the points (a,fa), (b,fb) with derivative at a of fpa,\n\n \"\"\"\n # f(x) = B*(x-a)^2 + C*(x-a) + D\n with np.errstate(divide='raise', over='raise', invalid='raise'):\n try:\n D = fa\n C = fpa\n db = b - a * 1.0\n B = (fb - D - C * db) / (db * db)\n xmin = a - C / (2.0 * B)\n except ArithmeticError:\n return None\n if not np.isfinite(xmin):\n return None\n return xmin\n\n\ndef _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,\n phi, derphi, phi0, derphi0, c1, c2, extra_condition):\n \"\"\"\n Part of the optimization algorithm in `scalar_search_wolfe2`.\n \"\"\"\n\n maxiter = 10\n i = 0\n delta1 = 0.2 # cubic interpolant check\n delta2 = 0.1 # quadratic interpolant check\n phi_rec = phi0\n a_rec = 0\n while True:\n # interpolate to find a trial step length between a_lo and\n # a_hi Need to choose interpolation here. Use cubic\n # interpolation and then if the result is within delta *\n # dalpha or outside of the interval bounded by a_lo or a_hi\n # then use quadratic interpolation, if the result is still too\n # close, then use bisection\n\n dalpha = a_hi - a_lo\n if dalpha < 0:\n a, b = a_hi, a_lo\n else:\n a, b = a_lo, a_hi\n\n # minimizer of cubic interpolant\n # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)\n #\n # if the result is too close to the end points (or out of the\n # interval) then use quadratic interpolation with phi_lo,\n # derphi_lo and phi_hi if the result is still too close to the\n # end points (or out of the interval) then use bisection\n\n if (i > 0):\n cchk = delta1 * dalpha\n a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,\n a_rec, phi_rec)\n if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):\n qchk = delta2 * dalpha\n a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)\n a_j = a_j.squeeze()\n if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):\n a_j = a_lo + 0.5*dalpha\n\n # Check new value of a_j\n\n phi_aj = phi(a_j)\n if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):\n phi_rec = phi_hi\n a_rec = a_hi\n a_hi = a_j\n phi_hi = phi_aj\n else:\n derphi_aj = derphi(a_j)\n if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):\n a_star = a_j\n val_star = phi_aj\n valprime_star = derphi_aj\n break\n if derphi_aj*(a_hi - a_lo) >= 0:\n phi_rec = phi_hi\n a_rec = a_hi\n a_hi = a_lo\n phi_hi = phi_lo\n else:\n phi_rec = phi_lo\n a_rec = a_lo\n a_lo = a_j\n phi_lo = phi_aj\n derphi_lo = derphi_aj\n i += 1\n if (i > maxiter):\n # Failed to find a conforming step size\n a_star = None\n val_star = None\n valprime_star = None\n break\n return a_star, val_star, valprime_star\n\n\n#------------------------------------------------------------------------------\n# Armijo line and scalar searches\n#------------------------------------------------------------------------------\n\ndef line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):\n \"\"\"Minimize over alpha, the function ``f(xk+alpha pk)``.\n\n Parameters\n ----------\n f : callable\n Function to be minimized.\n xk : array_like\n Current point.\n pk : array_like\n Search direction.\n gfk : array_like\n Gradient of `f` at point `xk`.\n old_fval : float\n Value of `f` at point `xk`.\n args : tuple, optional\n Optional arguments.\n c1 : float, optional\n Value to control stopping criterion.\n alpha0 : scalar, optional\n Value of `alpha` at start of the optimization.\n\n Returns\n -------\n alpha\n f_count\n f_val_at_alpha\n\n Notes\n -----\n Uses the interpolation algorithm (Armijo backtracking) as suggested by\n Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57\n\n \"\"\"\n xk = np.atleast_1d(xk)\n fc = [0]\n\n def phi(alpha1):\n fc[0] += 1\n return f(xk + alpha1*pk, *args)\n\n if old_fval is None:\n phi0 = phi(0.)\n else:\n phi0 = old_fval # compute f(xk) -- done in past loop\n\n derphi0 = np.dot(gfk, pk)\n alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,\n alpha0=alpha0)\n return alpha, fc[0], phi1\n\n\ndef line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):\n \"\"\"\n Compatibility wrapper for `line_search_armijo`\n \"\"\"\n r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,\n alpha0=alpha0)\n return r[0], r[1], 0, r[2]\n\n\ndef scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):\n \"\"\"Minimize over alpha, the function ``phi(alpha)``.\n\n Uses the interpolation algorithm (Armijo backtracking) as suggested by\n Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57\n\n alpha > 0 is assumed to be a descent direction.\n\n Returns\n -------\n alpha\n phi1\n\n \"\"\"\n phi_a0 = phi(alpha0)\n if phi_a0 <= phi0 + c1*alpha0*derphi0:\n return alpha0, phi_a0\n\n # Otherwise compute the minimizer of a quadratic interpolant:\n\n alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)\n phi_a1 = phi(alpha1)\n\n if (phi_a1 <= phi0 + c1*alpha1*derphi0):\n return alpha1, phi_a1\n\n # Otherwise loop with cubic interpolation until we find an alpha which\n # satisfies the first Wolfe condition (since we are backtracking, we will\n # assume that the value of alpha is not too small and satisfies the second\n # condition.\n\n while alpha1 > amin: # we are assuming alpha>0 is a descent direction\n factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)\n a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \\\n alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)\n a = a / factor\n b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \\\n alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)\n b = b / factor\n\n alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)\n phi_a2 = phi(alpha2)\n\n if (phi_a2 <= phi0 + c1*alpha2*derphi0):\n return alpha2, phi_a2\n\n if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:\n alpha2 = alpha1 / 2.0\n\n alpha0 = alpha1\n alpha1 = alpha2\n phi_a0 = phi_a1\n phi_a1 = phi_a2\n\n # Failed to find a suitable step length\n return None, phi_a1\n\n\n#------------------------------------------------------------------------------\n# Non-monotone line search for DF-SANE\n#------------------------------------------------------------------------------\n\ndef _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,\n gamma=1e-4, tau_min=0.1, tau_max=0.5):\n \"\"\"\n Nonmonotone backtracking line search as described in [1]_\n\n Parameters\n ----------\n f : callable\n Function returning a tuple ``(f, F)`` where ``f`` is the value\n of a merit function and ``F`` the residual.\n x_k : ndarray\n Initial position\n d : ndarray\n Search direction\n prev_fs : float\n List of previous merit function values. Should have ``len(prev_fs) <= M``\n where ``M`` is the nonmonotonicity window parameter.\n eta : float\n Allowed merit function increase, see [1]_\n gamma, tau_min, tau_max : float, optional\n Search parameters, see [1]_\n\n Returns\n -------\n alpha : float\n Step length\n xp : ndarray\n Next position\n fp : float\n Merit function value at next position\n Fp : ndarray\n Residual at next position\n\n References\n ----------\n [1] \"Spectral residual method without gradient information for solving\n large-scale nonlinear systems of equations.\" W. La Cruz,\n J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).\n\n \"\"\"\n f_k = prev_fs[-1]\n f_bar = max(prev_fs)\n\n alpha_p = 1\n alpha_m = 1\n alpha = 1\n\n while True:\n xp = x_k + alpha_p * d\n fp, Fp = f(xp)\n\n if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:\n alpha = alpha_p\n break\n\n alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)\n\n xp = x_k - alpha_m * d\n fp, Fp = f(xp)\n\n if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:\n alpha = -alpha_m\n break\n\n alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)\n\n alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)\n alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)\n\n return alpha, xp, fp, Fp\n\n\ndef _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,\n gamma=1e-4, tau_min=0.1, tau_max=0.5,\n nu=0.85):\n \"\"\"\n Nonmonotone line search from [1]\n\n Parameters\n ----------\n f : callable\n Function returning a tuple ``(f, F)`` where ``f`` is the value\n of a merit function and ``F`` the residual.\n x_k : ndarray\n Initial position\n d : ndarray\n Search direction\n f_k : float\n Initial merit function value\n C, Q : float\n Control parameters. On the first iteration, give values\n Q=1.0, C=f_k\n eta : float\n Allowed merit function increase, see [1]_\n nu, gamma, tau_min, tau_max : float, optional\n Search parameters, see [1]_\n\n Returns\n -------\n alpha : float\n Step length\n xp : ndarray\n Next position\n fp : float\n Merit function value at next position\n Fp : ndarray\n Residual at next position\n C : float\n New value for the control parameter C\n Q : float\n New value for the control parameter Q\n\n References\n ----------\n .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line\n search and its application to the spectral residual\n method'', IMA J. Numer. Anal. 29, 814 (2009).\n\n \"\"\"\n alpha_p = 1\n alpha_m = 1\n alpha = 1\n\n while True:\n xp = x_k + alpha_p * d\n fp, Fp = f(xp)\n\n if fp <= C + eta - gamma * alpha_p**2 * f_k:\n alpha = alpha_p\n break\n\n alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)\n\n xp = x_k - alpha_m * d\n fp, Fp = f(xp)\n\n if fp <= C + eta - gamma * alpha_m**2 * f_k:\n alpha = -alpha_m\n break\n\n alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)\n\n alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)\n alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)\n\n # Update C and Q\n Q_next = nu * Q + 1\n C = (nu * Q * (C + eta) + fp) / Q_next\n Q = Q_next\n\n return alpha, xp, fp, Fp, C, Q\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.isfinite", "numpy.clip", "numpy.asarray", "numpy.atleast_1d", "scipy.optimize.minpack2.dcsrch", "scipy._lib.six.xrange", "numpy.errstate", "numpy.zeros", "numpy.empty" ] ]
lantian165/Wechat_AutoJump
[ "665e80aa0c580bdb2a0c692e45ad334094d9e2b8", "665e80aa0c580bdb2a0c692e45ad334094d9e2b8" ]
[ "test/plt.py", "nn_play.py" ]
[ "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# 以灰度模式读入图片,这样img.shape就只有二维。否则还会多一维表示彩色\nimg=cv2.imread('Lena.jpg',cv2.IMREAD_GRAYSCALE)\nimg2 = img.copy()\n# 指定第二个参数0会导致mathTemplate执行报错:template = cv2.imread('Lena_eyes.png',0)\ntemplate = cv2.imread('Lena_eyes.png',cv2.IMREAD_GRAYSCALE)\n\nw, h = template.shape[::-1]\n\n# Apply template Matching\nres = cv2.matchTemplate(img2,template,cv2.TM_CCOEFF_NORMED)\nmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\ntop_left = max_loc\nbottom_right = (top_left[0] + w, top_left[1] + h)\n\ncv2.rectangle(img2,top_left,bottom_right,255,2)\n\n# 绘制1行2列的grid,图片显示在第二个位置\nplt.subplot(122), plt.imshow(img2,cmap = 'gray')\n# 使用plt.xticks([])关闭坐标轴刻度\nplt.title('Detected Point'), plt.xticks([]), plt.yticks([])\nplt.suptitle(\"TM_CCOEFF_NORMED\")\nplt.show()\n", "# -*- coding:utf-8 -*-\n# Created Time: 六 12/30 13:49:21 2017\n# Author: Taihong Xiao <xiaotaihong@126.com>\n\nimport numpy as np\nimport time\nimport os, glob, shutil\nimport cv2\nimport argparse\nimport tensorflow as tf\nfrom model import JumpModel\nfrom model_fine import JumpModelFine\nimport random\nimport sys\n\n# 多尺度搜索\ndef multi_scale_search(pivot, screen, range=0.3, num=10):\n # 分别获取屏幕截图及player的尺寸\n H, W = screen.shape[:2]\n h, w = pivot.shape[:2]\n\n found = None\n # 利用np.linspace(0.7,1.3,10)[::-1]生成0.7-1.3之间的10个等差数,-1指定倒序排列\n for scale in np.linspace(1-range, 1+range, num)[::-1]:\n # 对截屏按等差数列进行缩放:\n resized = cv2.resize(screen, (int(W * scale), int(H * scale)))\n\n # 计算缩放率 rate\n r = W / float(resized.shape[1])\n # 如果缩放后的截屏比player的尺寸还小,停止匹配player\n if resized.shape[0] < h or resized.shape[1] < w:\n break\n # 使用:归一化相关系数匹配法 进行模板匹配和识别\n # 使用的是相关匹配算法,res越大匹配效果越准确\n # res存储的是一幅灰度图片,每个元素表示其附近元素与模板的匹配度\n res = cv2.matchTemplate(resized, pivot, cv2.TM_CCOEFF_NORMED)\n\n # 通过res >=res.max()来判定本次识别结果是否比之前最好还好,\n # 如果是,则更新player位于截屏中坐标\n loc = np.where(res >= res.max())\n pos_h, pos_w = list(zip(*loc))[0]\n\n if found is None or res.max() > found[-1]:\n found = (pos_h, pos_w, r, res.max())\n\n if found is None: return (0,0,0,0,0)\n pos_h, pos_w, r, score = found\n start_h, start_w = int(pos_h * r), int(pos_w * r)\n end_h, end_w = int((pos_h + h) * r), int((pos_w + w) * r)\n return [start_h, start_w, end_h, end_w, score]\n\nclass WechatAutoJump(object):\n def __init__(self, phone, sensitivity, serverURL, debug, resource_dir):\n self.phone = phone\n self.sensitivity = sensitivity\n self.debug = debug\n self.resource_dir = resource_dir\n # 初始化已跳跃步数\n self.step = 0\n self.ckpt = os.path.join(self.resource_dir, 'train_logs_coarse/best_model.ckpt-13999')\n self.ckpt_fine = os.path.join(self.resource_dir, 'train_logs_fine/best_model.ckpt-53999')\n self.serverURL = serverURL\n\n # 加载:player.png,初始化tf.Session()\n self.load_resource()\n if self.phone == 'IOS':\n import wda\n # 连接到手机\n self.client = wda.Client(self.serverURL)\n # 启动应用\n self.s = self.client.session()\n if self.debug:\n if not os.path.exists(self.debug):\n os.mkdir(self.debug)\n\n def load_resource(self):\n # 加载 小人图片 player.png\n self.player = cv2.imread(os.path.join(self.resource_dir, 'player.png'), 0)\n\n # network initization\n self.net = JumpModel()\n self.net_fine = JumpModelFine()\n\n # 定义占位符:\n # 将采集到的大小为1280*720的图像沿x方向上下各截去320*720大小,只保留中心640*720的图像作为训练数据\n self.img = tf.placeholder(tf.float32, [None, 640, 720, 3], name='img')\n\n self.img_fine = tf.placeholder(tf.float32, [None, 320, 320, 3], name='img_fine')\n\n # 定义标签:\n self.label = tf.placeholder(tf.float32, [None, 2], name='label')\n\n self.is_training = tf.placeholder(np.bool, name='is_training')\n self.keep_prob = tf.placeholder(np.float32, name='keep_prob')\n\n #\n self.pred = self.net.forward(self.img, self.is_training, self.keep_prob)\n self.pred_fine = self.net_fine.forward(self.img_fine, self.is_training, self.keep_prob)\n\n # 初始化并运行 self.sess\n self.sess = tf.Session()\n # 对所有的图变量进行集体初始化并开始运行\n self.sess.run(tf.global_variables_initializer())\n\n all_vars = tf.all_variables()\n var_coarse = [k for k in all_vars if k.name.startswith('coarse')]\n var_fine = [k for k in all_vars if k.name.startswith('fine')]\n\n self.saver_coarse = tf.train.Saver(var_coarse)\n self.saver_fine = tf.train.Saver(var_fine)\n self.saver_coarse.restore(self.sess, self.ckpt)\n self.saver_fine.restore(self.sess, self.ckpt_fine)\n\n print('==== successfully restored ====')\n\n # 获取手机屏幕当前截图, 将截屏缩放成尺寸为:1280*720的图片返回\n def get_current_state(self):\n # 获取当前手机屏截屏,并把图片拉取到程序运行的当前目录\n if self.phone == 'Android':\n os.system('adb shell screencap -p /sdcard/1.png')\n os.system('adb pull /sdcard/1.png state.png')\n elif self.phone == 'IOS':\n self.client.screenshot('state.png')\n if not os.path.exists('state.png'):\n raise NameError('Cannot obtain screenshot from your phone! Please follow the instructions in readme!')\n\n if self.debug:\n shutil.copyfile('state.png', os.path.join(self.debug, 'state_{:03d}.png'.format(self.step)))\n\n # 读取这张截图\n state = cv2.imread('state.png')\n # iphone上得到的state的值是:(1334,750,3), 切片取前2个值\n # resolution[0]=y, resolution[1]=x\n # 另外一种赋值方式: rows, columns=state.shape[:2]\n self.resolution = state.shape[:2]\n\n # 下面要将采集到的图片等比例缩放成尺寸(x,y):720*1280\n scale = state.shape[1] / 720. # 计算x轴像素的缩放系数,然后应用到y轴进行缩放\n # 这里 state.shape[0]/scale = 1280.639999,取整后刚好是1280\n state = cv2.resize(state, (720, int(state.shape[0] / scale)), interpolation=cv2.INTER_NEAREST)\n\n # 如果缩放后,state.shape[0]的值还不是1280,要再进一步处理:\n if state.shape[0] > 1280:\n s = (state.shape[0] - 1280) // 2\n state = state[s:(s+1280),:,:]\n elif state.shape[0] < 1280:\n s1 = (1280 - state.shape[0]) // 2\n s2 = (1280 - state.shape[0]) - s1\n pad1 = 255 * np.ones((s1, 720, 3), dtype=np.uint8)\n pad2 = 255 * np.ones((s2, 720, 3), dtype=np.uint8)\n state = np.concatenate((pad1, state, pad2), 0)\n # 后续操作:每张图有判断意义的区域只有屏幕中央位置,截图的上下两部分是没有意义的\n # 后面会从上下各截去320*720大小,只保留中心640*720的图像作为训练数据\n return state\n\n def get_player_position(self, state):\n # 转换为灰度图片\n state = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)\n # 搜索player的坐标\n pos = multi_scale_search(self.player, state, 0.3, 10)\n h, w = int((pos[0] + 13 * pos[2])/14.), (pos[1] + pos[3])//2\n return np.array([h, w])\n\n def get_target_position(self, state, player_pos):\n feed_dict = {\n self.img: np.expand_dims(state[320:-320], 0),\n self.is_training: False,\n self.keep_prob: 1.0,\n }\n pred_out = self.sess.run(self.pred, feed_dict=feed_dict)\n pred_out = pred_out[0].astype(int)\n x1 = pred_out[0] - 160\n x2 = pred_out[0] + 160\n y1 = pred_out[1] - 160\n y2 = pred_out[1] + 160\n if y1 < 0:\n y1 = 0\n y2 = 320\n if y2 > state.shape[1]:\n y2 = state.shape[1]\n y1 = y2 - 320\n img_fine_in = state[x1: x2, y1: y2, :]\n feed_dict_fine = {\n self.img_fine: np.expand_dims(img_fine_in, 0),\n self.is_training: False,\n self.keep_prob: 1.0,\n }\n pred_out_fine = self.sess.run(self.pred_fine, feed_dict=feed_dict_fine)\n pred_out_fine = pred_out_fine[0].astype(int)\n out = pred_out_fine + np.array([x1, y1])\n return out\n\n def get_target_position_fast(self, state, player_pos):\n state_cut = state[:player_pos[0],:,:]\n m1 = (state_cut[:, :, 0] == 245)\n m2 = (state_cut[:, :, 1] == 245)\n m3 = (state_cut[:, :, 2] == 245)\n m = np.uint8(np.float32(m1 * m2 * m3) * 255)\n b1, b2 = cv2.connectedComponents(m)\n for i in range(1, np.max(b2) + 1):\n x, y = np.where(b2 == i)\n if len(x) > 280 and len(x) < 310:\n r_x, r_y = x, y\n h, w = int(r_x.mean()), int(r_y.mean())\n return np.array([h, w])\n\n def jump(self, player_pos, target_pos):\n distance = np.linalg.norm(player_pos - target_pos)\n press_time = distance * self.sensitivity\n press_time = int(np.rint(press_time))\n press_h, press_w = int(0.82*self.resolution[0]), self.resolution[1]//2\n if self.phone == 'Android':\n cmd = 'adb shell input swipe {} {} {} {} {}'.format(press_w, press_h, press_w, press_h, press_time)\n print(cmd)\n os.system(cmd)\n elif self.phone == 'IOS':\n self.s.tap_hold(press_w, press_h, press_time / 1000.)\n\n def debugging(self):\n current_state = self.state.copy()\n cv2.circle(current_state, (self.player_pos[1], self.player_pos[0]), 5, (0,255,0), -1)\n cv2.circle(current_state, (self.target_pos[1], self.target_pos[0]), 5, (0,0,255), -1)\n cv2.imwrite(os.path.join(self.debug, 'state_{:03d}_res_h_{}_w_{}.png'.format(self.step, self.target_pos[0], self.target_pos[1])), current_state)\n\n # Added by yichen\n def personification(self):\n if self.step % 70 == 0:\n next_rest = 18\n rest=True\n elif self.step % 40 == 0:\n next_rest = 13\n rest=True\n elif self.step % 20 == 0:\n next_rest = 11\n rest=True\n elif self.step % 10 == 0:\n next_rest = 8\n rest=True\n else:\n rest=False\n\n if rest:\n for rest_time in range(next_rest):\n sys.stdout.write('\\r程序将在 {}s 后继续' .format(next_rest-rest_time))\n sys.stdout.flush()\n time.sleep(1)\n print('\\n继续')\n\n time.sleep(random.uniform(1.5, 3.0))\n\n if self.step % 5 == 0:\n self.sensitivity = 2.145\n elif self.step % 7 == 0:\n self.sensitivity = 2.000\n elif self.step % 9 == 0:\n self.sensitivity = 1.985\n elif self.step % 3 == 0:\n self.sensitivity = 1.970\n\n def play(self):\n # 获取 1280*720大小的屏幕截图\n self.state = self.get_current_state()\n # 计算 player的坐标\n self.player_pos = self.get_player_position(self.state)\n\n # 计算player要跳到哪个坐标\n if self.phone == 'IOS':\n self.target_pos = self.get_target_position(self.state, self.player_pos)\n print('CNN-search: %04d' % self.step)\n else:\n try:\n self.target_pos = self.get_target_position_fast(self.state, self.player_pos)\n print('fast-search: %04d' % self.step)\n except UnboundLocalError:\n self.target_pos = self.get_target_position(self.state, self.player_pos)\n print('CNN-search: %04d' % self.step)\n if self.debug:\n self.debugging()\n\n # 触发跳跃动作\n self.jump(self.player_pos, self.target_pos)\n self.step += 1\n\n time.sleep(1.5)\n\n\n def run(self):\n try:\n while True:\n self.play()\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--phone', default='Android', choices=['Android', 'IOS'], type=str, help='mobile phone OS')\n parser.add_argument('--sensitivity', default=2.045, type=float, help='constant for press time')\n parser.add_argument('--serverURL', default='http://localhost:8100', type=str, help='ServerURL for wda Client')\n parser.add_argument('--resource', default='resource', type=str, help='resource dir')\n parser.add_argument('--debug', default=None, type=str, help='debug mode, specify a directory for storing log files.')\n args = parser.parse_args()\n # print(args)\n\n # 初始化对象\n AI = WechatAutoJump(args.phone, args.sensitivity, args.serverURL, args.debug, args.resource)\n # 调用run()方法->self.play()\n AI.run()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xticks", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.show" ], [ "tensorflow.all_variables", "numpy.expand_dims", "numpy.linspace", "numpy.rint", "tensorflow.placeholder", "numpy.linalg.norm", "numpy.ones", "numpy.concatenate", "tensorflow.global_variables_initializer", "numpy.max", "tensorflow.Session", "numpy.float32", "tensorflow.train.Saver", "numpy.array", "numpy.where" ] ]
eldadassa/eligibility_propagation
[ "6ca02ab509785e40c01161f8aa23ef60c49887bb" ]
[ "Syclop_e-prop-rl/compile_test.py" ]
[ "import argparse\nimport subprocess as sp\nimport tensorflow as tf\n\n# from subprocess import PIPE, run\n\ndef out(command):\n result = sp.run(command, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True, shell=True)\n return result.stdout.rstrip()\n\ncflags = \" \".join(tf.sysconfig.get_compile_flags())\nlflags = \" \".join(tf.sysconfig.get_link_flags())\ntf_inc = tf.sysconfig.get_include()\ntf_lib = tf.sysconfig.get_lib()\n\ncv_libs = out('pkg-config --libs opencv')\ncv_cflags = out('pkg-config --cflags opencv')\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument('ale_path', type=str, default='')\n# args = parser.parse_args()\n# ale_path = args.ale_path\n# if ale_path == '':\n# print('[ ! must set ale_path ]')\n\n# cmd = f'g++ -std=c++11 -shared ale.cc -o tfaleop.so -fPIC -I {tf_inc} -O2 -D_GLIBCXX_USE_CXX11_ABI=1 -L{tf_lib} {cflags} {lflags} -I{ale_path}/include -L{ale_path}/lib -lale'\ncmd = f'g++ -std=c++11 -shared optest.cc -o tfoptest.so -fPIC -I {tf_inc} -O2 -D_GLIBCXX_USE_CXX11_ABI=1 -L{tf_lib} {cflags} {lflags} {cv_cflags} {cv_libs}'\nprint(f'- compiling using command: {cmd}')\nres = sp.check_call(cmd, shell=True)\nif res == 0:\n print('[ sucessfully compiled ]')\n\n" ]
[ [ "tensorflow.sysconfig.get_compile_flags", "tensorflow.sysconfig.get_link_flags", "tensorflow.sysconfig.get_include", "tensorflow.sysconfig.get_lib" ] ]
damonge/CCL
[ "04cb83885ccd5f343b48cd8151bebd5779518ca6" ]
[ "benchmarks/test_correlation_MG2.py" ]
[ "import os\nimport numpy as np\nimport pyccl as ccl\nfrom scipy.interpolate import interp1d\nimport pytest\n\n\n@pytest.fixture(scope='module', params=['fftlog', 'bessel'])\ndef corr_method(request):\n errfacs = {'fftlog': 0.21, 'bessel': 0.05}\n return request.param, errfacs[request.param]\n\n\n@pytest.fixture(scope='module')\ndef set_up(request):\n dirdat = os.path.dirname(__file__) + '/data/'\n h0 = 0.67702026367187500\n logA = 3.05 # log(10^10 A_s)\n cosmo = ccl.Cosmology(Omega_c=0.12/h0**2, Omega_b=0.0221/h0**2, Omega_k=0,\n h=h0, A_s=np.exp(logA)/10**10, n_s=0.96, Neff=3.046,\n m_nu=0.0, w0=-1, wa=0, T_CMB=2.7255,\n mu_0=0.1, sigma_0=0.1,\n transfer_function='boltzmann_isitgr',\n matter_power_spectrum='linear')\n cosmo.cosmo.gsl_params.INTEGRATION_LIMBER_EPSREL = 2.5E-5\n cosmo.cosmo.gsl_params.INTEGRATION_EPSREL = 2.5E-5\n\n # Ell-dependent correction factors\n # Set up array of ells\n fl = {}\n lmax = 10000\n nls = (lmax - 400)//20+141\n ells = np.zeros(nls)\n ells[:101] = np.arange(101)\n ells[101:121] = ells[100] + (np.arange(20) + 1) * 5\n ells[121:141] = ells[120] + (np.arange(20) + 1) * 10\n ells[141:] = ells[140] + (np.arange(nls - 141) + 1) * 20\n fl['lmax'] = lmax\n fl['ells'] = ells\n\n # Load dNdz's\n z1, pz1 = np.loadtxt(dirdat + \"bin1_histo.txt\", unpack=True)\n z2, pz2 = np.loadtxt(dirdat + \"bin2_histo.txt\", unpack=True)\n\n # Set up the linear galaxy bias as used in generating benchmarks\n bz1 = 1.45*np.ones_like(pz1)\n bz2 = 1.55*np.ones_like(pz2)\n\n # Initialize tracers\n trc = {}\n trc['g1'] = ccl.NumberCountsTracer(cosmo, False, (z1, pz1), (z1, bz1))\n trc['g2'] = ccl.NumberCountsTracer(cosmo, False, (z2, pz2), (z2, bz2))\n trc['l1'] = ccl.WeakLensingTracer(cosmo, (z1, pz1))\n trc['l2'] = ccl.WeakLensingTracer(cosmo, (z2, pz2))\n\n # Read benchmarks\n bms = {}\n bms['dd_11'] = np.loadtxt(dirdat+'/wtheta_linear_prediction.dat')[0:15]\n bms['dd_22'] = np.loadtxt(dirdat+'/wtheta_linear_prediction.dat')[15:30]\n bms['dl_11'] = np.loadtxt(dirdat+'/gammat_linear_prediction.dat')[0:15]\n bms['dl_12'] = np.loadtxt(dirdat+'/gammat_linear_prediction.dat')[15:30]\n bms['dl_21'] = np.loadtxt(dirdat+'/gammat_linear_prediction.dat')[30:45]\n bms['dl_22'] = np.loadtxt(dirdat+'/gammat_linear_prediction.dat')[45:60]\n bms['ll_11_p'] = np.loadtxt(dirdat+'/Xip_linear_prediction.dat')[0:15]\n bms['ll_12_p'] = np.loadtxt(dirdat+'/Xip_linear_prediction.dat')[15:30]\n bms['ll_22_p'] = np.loadtxt(dirdat+'/Xip_linear_prediction.dat')[30:45]\n bms['ll_11_m'] = np.loadtxt(dirdat+'/Xim_linear_prediction.dat')[0:15]\n bms['ll_12_m'] = np.loadtxt(dirdat+'/Xim_linear_prediction.dat')[15:30]\n bms['ll_22_m'] = np.loadtxt(dirdat+'/Xim_linear_prediction.dat')[30:45]\n theta = np.loadtxt(dirdat+'/theta_corr_MG.dat')\n bms['theta'] = theta\n\n # Read error bars\n ers = {}\n d = np.loadtxt(\"benchmarks/data/sigma_clustering_Nbin5\",\n unpack=True)\n ers['dd_11'] = interp1d(d[0], d[1],\n fill_value=d[1][0],\n bounds_error=False)(theta)\n ers['dd_22'] = interp1d(d[0], d[2],\n fill_value=d[2][0],\n bounds_error=False)(theta)\n d = np.loadtxt(\"benchmarks/data/sigma_ggl_Nbin5\",\n unpack=True)\n ers['dl_12'] = interp1d(d[0], d[1],\n fill_value=d[1][0],\n bounds_error=False)(theta)\n ers['dl_11'] = interp1d(d[0], d[2],\n fill_value=d[2][0],\n bounds_error=False)(theta)\n ers['dl_22'] = interp1d(d[0], d[3],\n fill_value=d[3][0],\n bounds_error=False)(theta)\n ers['dl_21'] = interp1d(d[0], d[4],\n fill_value=d[4][0],\n bounds_error=False)(theta)\n d = np.loadtxt(\"benchmarks/data/sigma_xi+_Nbin5\",\n unpack=True)\n # We cut the largest theta angle from xip because of issues\n # with the benchmark.\n ers['ll_11_p'] = interp1d(d[0], d[1],\n fill_value=d[1][0],\n bounds_error=False)(theta)\n ers['ll_22_p'] = interp1d(d[0], d[2],\n fill_value=d[2][0],\n bounds_error=False)(theta)\n ers['ll_12_p'] = interp1d(d[0], d[3],\n fill_value=d[3][0],\n bounds_error=False)(theta)\n d = np.loadtxt(\"benchmarks/data/sigma_xi-_Nbin5\",\n unpack=True)\n ers['ll_11_m'] = interp1d(d[0], d[1],\n fill_value=d[1][0],\n bounds_error=False)(theta)\n ers['ll_22_m'] = interp1d(d[0], d[2],\n fill_value=d[2][0],\n bounds_error=False)(theta)\n ers['ll_12_m'] = interp1d(d[0], d[3],\n fill_value=d[3][0],\n bounds_error=False)(theta)\n return cosmo, trc, bms, ers, fl\n\n\n@pytest.mark.parametrize(\"t1,t2,bm,er,kind,pref\",\n [('g1', 'g1', 'dd_11', 'dd_11', 'NN', 1),\n ('g2', 'g2', 'dd_22', 'dd_22', 'NN', 1),\n ('g1', 'l1', 'dl_11', 'dl_11', 'NG', 1),\n ('g1', 'l2', 'dl_12', 'dl_12', 'NG', 1),\n ('g2', 'l1', 'dl_21', 'dl_21', 'NG', 1),\n ('g2', 'l2', 'dl_22', 'dl_22', 'NG', 1),\n ('l1', 'l1', 'll_11_p', 'll_11_p', 'GG+', 1),\n ('l1', 'l2', 'll_12_p', 'll_12_p', 'GG+', 1),\n ('l2', 'l2', 'll_22_p', 'll_22_p', 'GG+', 1),\n ('l1', 'l1', 'll_11_m', 'll_11_m', 'GG-', 1),\n ('l1', 'l2', 'll_12_m', 'll_12_m', 'GG-', 1),\n ('l2', 'l2', 'll_22_m', 'll_22_m', 'GG-', 1)])\ndef test_xi(set_up, corr_method, t1, t2, bm, er, kind, pref):\n cosmo, trcs, bms, ers, fls = set_up\n method, errfac = corr_method\n\n # Debugging - define the same cosmology but in GR\n\n cl = ccl.angular_cl(cosmo, trcs[t1], trcs[t2], fls['ells'])\n\n ell = np.arange(fls['lmax'])\n cli = interp1d(fls['ells'], cl, kind='cubic')(ell)\n # Our benchmarks have theta in arcmin\n # but CCL requires it in degrees:\n theta_deg = bms['theta'] / 60.\n xi = ccl.correlation(cosmo, ell, cli, theta_deg, type=kind,\n method=method)\n xi *= pref\n\n print(xi)\n\n assert np.all(np.fabs(xi - bms[bm]) < ers[er] * errfac)\n" ]
[ [ "numpy.ones_like", "numpy.arange", "numpy.fabs", "scipy.interpolate.interp1d", "numpy.exp", "numpy.zeros", "numpy.loadtxt" ] ]
kartikvega/Udacity
[ "e84643fbb2bd307d571308a5912c513f0ee6c459" ]
[ "tv-script-generation/problem_unittests.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\ndef _print_success_message():\n print('Tests Passed')\n\n\ndef test_create_lookup_tables(create_lookup_tables):\n with tf.Graph().as_default():\n test_text = '''\n Moe_Szyslak Moe's Tavern Where the elite meet to drink\n Bart_Simpson Eh yeah hello is Mike there Last name Rotch\n Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately\n Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick\n Moe_Szyslak Whats the matter Homer You're not your normal effervescent self\n Homer_Simpson I got my problems Moe Give me another one\n Moe_Szyslak Homer hey you should not drink to forget your problems\n Barney_Gumble Yeah you should only drink to enhance your social skills'''\n\n test_text = test_text.lower()\n test_text = test_text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(test_text)\n\n # Check types\n assert isinstance(vocab_to_int, dict),\\\n 'vocab_to_int is not a dictionary.'\n assert isinstance(int_to_vocab, dict),\\\n 'int_to_vocab is not a dictionary.'\n\n # Compare lengths of dicts\n assert len(vocab_to_int) == len(int_to_vocab),\\\n 'Length of vocab_to_int and int_to_vocab don\\'t match. ' \\\n 'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))\n\n # Make sure the dicts have the same words\n vocab_to_int_word_set = set(vocab_to_int.keys())\n int_to_vocab_word_set = set(int_to_vocab.values())\n\n assert not (vocab_to_int_word_set - int_to_vocab_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)\n assert not (int_to_vocab_word_set - vocab_to_int_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)\n\n # Make sure the dicts have the same word ids\n vocab_to_int_word_id_set = set(vocab_to_int.values())\n int_to_vocab_word_id_set = set(int_to_vocab.keys())\n\n assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)\n assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)\n\n # Make sure the dicts make the same lookup\n missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]\n\n assert not missmatches,\\\n 'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(\n len(missmatches),\n *missmatches[0])\n\n assert len(vocab_to_int) > len(set(test_text))/2,\\\n 'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))\n\n _print_success_message()\n\n\ndef test_get_batches(get_batches):\n with tf.Graph().as_default():\n test_batch_size = 128\n test_seq_length = 5\n test_int_text = list(range(1000*test_seq_length))\n batches = get_batches(test_int_text, test_batch_size, test_seq_length)\n\n # Check type\n assert isinstance(batches, np.ndarray),\\\n 'Batches is not a Numpy array'\n\n # Check shape\n assert batches.shape == (7, 2, 128, 5),\\\n 'Batches returned wrong shape. Found {}'.format(batches.shape)\n\n for x in range(batches.shape[2]):\n assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])\n assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])\n\n\n last_seq_target = (test_batch_size-1) * 35 + 31\n last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))\n last_seq[-1] = batches[0,0,0,0]\n\n assert np.array_equal(batches[-1,1,-1], last_seq),\\\n 'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)\n\n _print_success_message()\n\n\ndef test_tokenize(token_lookup):\n with tf.Graph().as_default():\n symbols = set(['.', ',', '\"', ';', '!', '?', '(', ')', '--', '\\n'])\n token_dict = token_lookup()\n\n # Check type\n assert isinstance(token_dict, dict), \\\n 'Returned type is {}.'.format(type(token_dict))\n\n # Check symbols\n missing_symbols = symbols - set(token_dict.keys())\n unknown_symbols = set(token_dict.keys()) - symbols\n\n assert not missing_symbols, \\\n 'Missing symbols: {}'.format(missing_symbols)\n assert not unknown_symbols, \\\n 'Unknown symbols: {}'.format(unknown_symbols)\n\n # Check values type\n bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]\n\n assert not bad_value_type,\\\n 'Found token as {} type.'.format(bad_value_type[0])\n\n # Check for spaces\n key_has_spaces = [k for k in token_dict.keys() if ' ' in k]\n val_has_spaces = [val for val in token_dict.values() if ' ' in val]\n\n assert not key_has_spaces,\\\n 'The key \"{}\" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])\n assert not val_has_spaces,\\\n 'The value \"{}\" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])\n\n # Check for symbols in values\n symbol_val = ()\n for symbol in symbols:\n for val in token_dict.values():\n if symbol in val:\n symbol_val = (symbol, val)\n\n assert not symbol_val,\\\n 'Don\\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)\n\n _print_success_message()\n\n\ndef test_get_inputs(get_inputs):\n with tf.Graph().as_default():\n input_data, targets, lr = get_inputs()\n\n # Check type\n assert input_data.op.type == 'Placeholder',\\\n 'Input not a Placeholder.'\n assert targets.op.type == 'Placeholder',\\\n 'Targets not a Placeholder.'\n assert lr.op.type == 'Placeholder',\\\n 'Learning Rate not a Placeholder.'\n\n # Check name\n assert input_data.name == 'input:0',\\\n 'Input has bad name. Found name {}'.format(input_data.name)\n\n # Check rank\n input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())\n targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())\n lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())\n\n assert input_rank == 2,\\\n 'Input has wrong rank. Rank {} found.'.format(input_rank)\n assert targets_rank == 2,\\\n 'Targets has wrong rank. Rank {} found.'.format(targets_rank)\n assert lr_rank == 0,\\\n 'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)\n\n _print_success_message()\n\n\ndef test_get_init_cell(get_init_cell):\n with tf.Graph().as_default():\n test_batch_size_ph = tf.placeholder(tf.int32, [])\n test_rnn_size = 256\n\n cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)\n\n # Check type\n assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\\\n 'Cell is wrong type. Found {} type'.format(type(cell))\n\n # Check for name attribute\n assert hasattr(init_state, 'name'),\\\n 'Initial state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n\n # Check name\n assert init_state.name == 'initial_state:0',\\\n 'Initial state doesn\\'t have the correct name. Found the name {}'.format(init_state.name)\n\n _print_success_message()\n\n\ndef test_get_embed(get_embed):\n with tf.Graph().as_default():\n embed_shape = [50, 5, 256]\n test_input_data = tf.placeholder(tf.int32, embed_shape[:2])\n test_vocab_size = 27\n test_embed_dim = embed_shape[2]\n\n embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)\n\n # Check shape\n assert embed.shape == embed_shape,\\\n 'Wrong shape. Found shape {}'.format(embed.shape)\n\n _print_success_message()\n\n\ndef test_build_rnn(build_rnn):\n with tf.Graph().as_default():\n test_rnn_size = 256\n test_rnn_layer_size = 2\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])\n outputs, final_state = build_rnn(test_cell, test_inputs)\n\n # Check name\n assert hasattr(final_state, 'name'),\\\n 'Final state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n assert final_state.name == 'final_state:0',\\\n 'Final state doesn\\'t have the correct name. Found the name {}'.format(final_state.name)\n\n # Check shape\n assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\\\n 'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_build_nn(build_nn):\n with tf.Graph().as_default():\n test_input_data_shape = [128, 5]\n test_input_data = tf.placeholder(tf.int32, test_input_data_shape)\n test_rnn_size = 256\n test_embed_dim = 300\n test_rnn_layer_size = 2\n test_vocab_size = 27\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)\n\n # Check name\n assert hasattr(final_state, 'name'), \\\n 'Final state doesn\\'t have the \"name\" attribute. Are you using build_rnn?'\n assert final_state.name == 'final_state:0', \\\n 'Final state doesn\\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)\n\n # Check Shape\n assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \\\n 'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_get_tensors(get_tensors):\n test_graph = tf.Graph()\n with test_graph.as_default():\n test_input = tf.placeholder(tf.int32, name='input')\n test_initial_state = tf.placeholder(tf.int32, name='initial_state')\n test_final_state = tf.placeholder(tf.int32, name='final_state')\n test_probs = tf.placeholder(tf.float32, name='probs')\n\n input_text, initial_state, final_state, probs = get_tensors(test_graph)\n\n # Check correct tensor\n assert input_text == test_input,\\\n 'Test input is wrong tensor'\n assert initial_state == test_initial_state, \\\n 'Initial state is wrong tensor'\n assert final_state == test_final_state, \\\n 'Final state is wrong tensor'\n assert probs == test_probs, \\\n 'Probabilities is wrong tensor'\n\n _print_success_message()\n\n\ndef test_pick_word(pick_word):\n with tf.Graph().as_default():\n test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])\n test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}\n\n pred_word = pick_word(test_probabilities, test_int_to_vocab)\n\n # Check type\n assert isinstance(pred_word, str),\\\n 'Predicted word is wrong type. Found {} type.'.format(type(pred_word))\n\n # Check word is from vocab\n assert pred_word in test_int_to_vocab.values(),\\\n 'Predicted word not found in int_to_vocab.'\n\n\n _print_success_message()" ]
[ [ "tensorflow.Graph", "numpy.array_equal", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.placeholder", "numpy.array" ] ]
jgd10/PySALESetup
[ "b2d5e2b50c198f7ac25842bf26ebbf4cad9ada47" ]
[ "examples/extension_zones.py" ]
[ "\"\"\"\nExtension zones are a bit fiddly in PySALESetup. They need\nto be created before the mesh object and fed in as arguments.\n\nThey must always be accompanied by an ExtensionZoneFactor as well\nwhich dictates how much they alter the cell size by.\n\"\"\"\n\nfrom PySALESetup import PySALEObject, PySALEMesh\nfrom PySALESetup.mesh import ExtensionZone, Region, \\\n ExtensionZoneFactor\nimport matplotlib.pyplot as plt\n\n# Construct 4 extension zones: one for each region\nextension_zones = [ExtensionZone(15, region, 1.,\n ExtensionZoneFactor(1.05, 20))\n for region in [Region.NORTH, Region.SOUTH,\n Region.EAST, Region.WEST]]\n\n# Build a mesh using the extension zones\nm = PySALEMesh(100, 100, extension_zones=extension_zones,\n cell_size=1.)\n\nobject1 = PySALEObject.generate_ellipse([56, 56],\n 50., 50., material=1,\n rotation=0)\nobject2 = PySALEObject.generate_ellipse([159, 159],\n 50., 50., material=2,\n rotation=0)\nobject3 = PySALEObject.generate_ellipse([107.2, 107.2],\n 50., 50., material=3,\n rotation=0)\n\n\nm.project_polygons_onto_mesh([object1, object2, object3])\n\nf, ax = m.plot_materials()\n# NB x_range and y_range are cell centres, not cell edges!\nfor item in m.x_range:\n ax.axvline(x=item, lw=0.2, color='m')\nfor item in m.y_range:\n ax.axhline(y=item, lw=0.2, color='m')\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
PatrikValkovic/MasterThesis
[ "6e9f3b186541db6c8395ebc96ace7289d01c805b" ]
[ "src/FFEAT/test/strategies/initialization/UniformTest.py" ]
[ "###############################\n#\n# Created by Patrik Valkovic\n# 3/12/2021\n#\n###############################\nimport unittest\nimport torch as t\nfrom ffeat.strategies import initialization\n\n\nclass UniformTest(unittest.TestCase):\n def test_population_size_match(self):\n i = initialization.Uniform(51, -2.0, 2.0, 312)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 312))\n\n def test_population_dimension_match(self):\n i = initialization.Uniform(51, -2.0, 2.0, (8,7))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 8, 7))\n\n def test_dimension_from_min(self):\n i = initialization.Uniform(51, t.full((312,), -2.0), 2.0)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 312))\n\n def test_dimension_from_min_multidimensional(self):\n i = initialization.Uniform(51, t.full((8,7), -2.0), 2.0)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 8, 7))\n\n def test_dimension_from_max(self):\n i = initialization.Uniform(51, -2.0, t.full((312,), 2.0))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 312))\n\n def test_dimension_from_max_multidimensional(self):\n i = initialization.Uniform(51, -2.0, t.full((8,7), 2.0))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 8, 7))\n\n def test_missing_dimension(self):\n with self.assertRaises(ValueError):\n initialization.Uniform(51, -2.0, 2.0)\n\n def test_max_min_dimensions_not_match(self):\n with self.assertRaises(ValueError):\n initialization.Uniform(51, t.full((4,5), -2.0), t.full((8,7), 2.0))\n\n def test_min_dimension_not_match(self):\n with self.assertRaises(ValueError):\n initialization.Uniform(51, t.full((4,5), -2.0), 2.0, (4,6))\n\n def test_max_dimension_not_match(self):\n with self.assertRaises(ValueError):\n initialization.Uniform(51, -2.0, t.full((4,5), 2.0), (4,6))\n\n def test_max_smaller_than_min(self):\n with self.assertRaises(ValueError):\n initialization.Uniform(51, 2.0, -2.0, (4,6))\n\n def test_min_int(self):\n i = initialization.Uniform(51, -2, 2.0, (4,6))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_min_float(self):\n i = initialization.Uniform(51, -2, 2.0, (4,6))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_min_list(self):\n i = initialization.Uniform(51, [[-2.0]*6]*4, 2.0)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_min_tensor(self):\n i = initialization.Uniform(51, t.full((4,6), -2.0), 2.0)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_max_int(self):\n i = initialization.Uniform(51, -2.0, 2, (4,6))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_max_float(self):\n i = initialization.Uniform(51, -2.0, 2.0, (4,6))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_max_list(self):\n i = initialization.Uniform(51, -2.0, [[2.0]*6]*4)\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_max_tensor(self):\n i = initialization.Uniform(51, -2.0, t.full((4,6), 2.0))\n pop, kargs = i()\n self.assertEqual(pop[0].shape, (51, 4, 6))\n\n def test_max_works(self):\n i = initialization.Uniform(51, -2.0, t.full((4,6), 2.0))\n for _ in range(100):\n pop, kargs = i()\n self.assertTrue(t.all(pop[0] < 2.0))\n\n def test_min_works(self):\n i = initialization.Uniform(51, t.full((4,6), -2.0), 2.0)\n for _ in range(100):\n pop, kargs = i()\n self.assertTrue(t.all(pop[0] >= -2.0))\n\n def test_shifted(self):\n i = initialization.Uniform(51, 3.0, t.full((4,6), 5.0))\n for _ in range(100):\n pop, kargs = i()\n self.assertTrue(t.all(pop[0] >= 3.0))\n self.assertTrue(t.all(pop[0] < 5.0))\n\n def test_float16_type(self):\n i = initialization.Uniform(51, 3.0, t.full((4,6), 5.0), dtype=t.float16)\n pop, kargs = i()\n self.assertEqual(pop[0].dtype, t.float16)\n\n def test_long_type(self):\n i = initialization.Uniform(51, 3.0, t.full((4,6), 5.0), dtype=t.long)\n pop, kargs = i()\n self.assertEqual(pop[0].dtype, t.long)\n\n def test_int8_type(self):\n i = initialization.Uniform(51, 3.0, t.full((4,6), 5.0), dtype=t.int8)\n pop, kargs = i()\n self.assertEqual(pop[0].dtype, t.int8)\n\n @unittest.skipIf(not t.cuda.is_available(), \"CUDA not available\")\n def test_on_cuda(self):\n i = initialization.Uniform(51, 3.0, 5.0, (7,8), device='cuda')\n pop, kargs = i()\n self.assertEqual(pop[0].device, t.device('cuda:0'))\n\n @unittest.skipIf(not t.cuda.is_available(), \"CUDA not available\")\n def test_device_from_min(self):\n i = initialization.Uniform(51, t.full((7,8), 3.0, device='cuda'), 5.0)\n pop, kargs = i()\n self.assertEqual(pop[0].device, t.device('cuda:0'))\n\n @unittest.skipIf(not t.cuda.is_available(), \"CUDA not available\")\n def test_device_from_max(self):\n i = initialization.Uniform(51, 3.0, t.full((7,8), 5.0, device='cuda'))\n pop, kargs = i()\n self.assertEqual(pop[0].device, t.device('cuda:0'))\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.device", "torch.all", "torch.cuda.is_available", "torch.full" ] ]
fermi-lat/pointlike
[ "edcdc696c3300e2f26ff3efa92a1bd9790074247" ]
[ "python/uw/utilities/fitter.py" ]
[ "\"\"\"\nBasic fitter utilities\n\nAuthors: Matthew Kerr, Toby Burnett\n$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/fitter.py,v 1.10 2013/07/28 15:27:44 burnett Exp $\n\n\"\"\"\nimport types\nimport numpy as np\nfrom scipy import optimize #for fmin,fmin_powell,fmin_bfgs\nfrom numpy import linalg #for inv\nimport numdifftools\n\nclass FitterException(Exception): pass\n\nclass Fitted(object):\n \"\"\" base class for a function object to define fit properties \"\"\"\n @property\n def bounds(self):\n return None\n @property\n def parameter_names(self):\n return None\n def get_parameters(self):\n raise FitterException('get_parameters is not implemented')\n def set_parameters(self, par):\n raise FitterException('set_parameters is not implemented')\n \n def minimize(self, **kwargs):\n \"\"\" minimize the function using optimize.fmin_l_bfgs_b\n\n \"\"\"\n use_gradient = kwargs.pop('use_gradient',True)#, self.gradient(self.get_parameters()) is None)\n ret =optimize.fmin_l_bfgs_b(self, self.get_parameters(), \n bounds=self.bounds, \n fprime= None, # expect gradient calculated by function\n approx_grad = not use_gradient,\n args = (use_gradient,), # pass to the function\n **kwargs)\n if ret[2]['warnflag']==0: \n self.set_parameters(ret[0])\n else:\n print ('Fit failure:\\n%s' % ret[2])\n return ret\n \n def hessian(self, pars=None, **kwargs):\n \"\"\" \n Return the Hessian matrix \n For sigmas and correlation coefficients, invert to covariance\n cov = self.hessian().I\n sigs = np.sqrt(cov.diagonal())\n corr = cov / np.outer(sigs,sigs)\n \"\"\"\n if pars is None: pars = self.get_parameters()\n return np.matrix(numdifftools.Hessian(self, **kwargs)(pars))\n\ndef test(fn = None, p0=None, pars=None):\n if fn is None:\n pars=[1.0, 2.]\n fn = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2\n return TestFunc(fn, [1.1]) \n \n \nclass Minimizer(object):\n \"\"\" this is mostly extracted as is from uw.like.specfitter and turned into a utility\n \"\"\"\n\n def __init__(self, fn, parameters=None, args=(), quiet=True):\n \"\"\" fn : function object\n note that it will be minimized, so should be negative of log likelihood\n \"\"\"\n self.quiet = quiet\n self.par = parameters\n self.args = args\n self.fn = fn \n npar = len(self.get_parameters())\n self.cov_matrix=np.zeros([npar,npar])\n \n def gradient(self,parameters,*args):\n \"\"\" access gradient if defined by the function\n \"\"\"\n assert hasattr(self.fn, 'gradient'), 'Minimize: use_gradient set, but function did not define a gradient'\n return self.fn.gradient(parameters)\n\n def get_parameters(self):\n return self.fn.get_parameters() if self.par is None else self.par\n\n def set_parameters(self, par):\n if self.par is None:\n self.fn.set_parameters(par)\n else:\n self.par = par\n\n def get_free_errors(self):\n \"\"\"Return the diagonal elements of the covariance matrix -- useful for step sizes in minimization, if known.\n \"\"\"\n assert False, 'get_free_errors not implemented yet'\n \n def optimize(self, optimizer, **kwargs):\n return optimizer( self.fn, self.get_parameters(), **kwargs)\n \n def __call__(self, method='simplex', tolerance = 0.01, save_values = True, \n estimate_errors=True, error_for_steps=False,\n use_gradient = True, gtol = 1e-1, **kwargs):\n \"\"\"Maximize likelihood and estimate errors.\n method -- ['simplex'] fitter; 'powell' or 'simplex' or 'minuit'\n tolerance -- (approximate) absolute tolerance \n \n \"\"\"\n if method.lower() not in ['simplex','powell','minuit', 'l-bfgs-b']:\n raise Exception('Unknown fitting method for F.fit(): \"%s\"' % method)\n\n use_gradient = use_gradient and hasattr(self.fn, 'gradient')\n use_bounds = kwargs.pop('use_bounds', self.fn.bounds is not None)\n if method == 'minuit':\n return self.minuit()\n # scipy\n ll_0 = self.fn(self.get_parameters(), *self.args) \n if ll_0==0: ll_0=1.0\n if use_gradient and not use_bounds:\n f0 = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,full_output=1,maxiter=500,gtol=gtol,disp=0)\n for i in xrange(10):\n f = self._save_bfgs = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,\n full_output=1,maxiter=500,gtol=gtol,disp=0)\n if abs(f0[1] - f[1]) < tolerance: break # note absolute tolerance\n if not self.quiet:\n print ('Did not converge on first gradient iteration. Trying again.')\n print (f0[1],f[1],abs(f0[1]-f[1]))\n f0 = f\n elif use_gradient:\n if not self.quiet: print ('using optimize.fmin_l_bfgs_b with parameter bounds %s\\n, kw= %s'% (self.fn.bounds, kwargs))\n ret = optimize.fmin_l_bfgs_b(self.fn, self.get_parameters(), \n bounds=self.fn.bounds, \n fprime=self.gradient , \n **kwargs)\n if ret[2]['warnflag']>0: \n print ('Fit failure:\\n%s' % ret[2])\n if not self.quiet:\n print (ret[2])\n f = ret \n else:\n minimizer = optimize.fmin_powell if method == 'powell' else optimize.fmin\n f = minimizer(self.fn, self.get_parameters(),full_output=1,\n maxiter=10000, maxfun=20000, ftol=0.01/abs(ll_0), disp=0 if self.quiet else 1)\n \n if not self.quiet: print ('Function value at minimum: %.8g'%f[1])\n self.set_parameters(f[0])\n self.fitvalue=f[1]\n if estimate_errors: \n self.__set_error__(use_gradient)\n if estimate_errors:\n diag = self.cov_matrix.diagonal().copy()\n bad = diag<0\n if np.any(bad):\n if not self.quiet: print ('Minimizer warning: bad errors for values %s'\\\n %np.asarray(self.fn.parameter_names)[bad]) # %np.arange(len(bad))[bad]\n diag[bad]=np.nan\n return f[1], f[0], np.sqrt(diag)\n return f[1], f[0]\n \n def minuit(self):\n from uw.utilities.minuit import Minuit\n temp_params = self.get_parameters()\n npars = temp_params.shape[0]\n param_names = ['p%i'%i for i in xrange(npars)]\n \n if use_gradient :\n gradient = self.gradient\n force_gradient = 1\n else:\n gradient = None\n force_gradient = 0\n\n if error_for_steps:\n steps = self.get_free_errors()\n steps[steps<1e-6] = 0.04 # for models without error estimates, put in the defaults\n steps[steps > 1] = 1 # probably don't want to step more than 100%...\n m = Minuit(self.fn,temp_params,up=.5,maxcalls=20000,tolerance=tolerance,printMode=-self.quiet,param_names=param_names,steps=steps)\n else:\n m = Minuit(self.fn,temp_params,up=.5,maxcalls=20000,tolerance=tolerance,printMode=-self.quiet,param_names=param_names)\n\n params,fval = m.minimize()\n\n if save_values:\n if estimate_errors == True:\n self.__set_error_minuit(m,'HESSE')\n self.fn(params) # reset values to the ones found by minimization step\n self.fitvalue= fval\n return fval\n \n def __set_error_minuit(self,m,method='HESSE'):\n \"\"\"Compute errors for minuit fit.\"\"\"\n #Not sure yet if there will be problems with including the backgrounds.\n self.cov_matrix = m.errors(method=method)\n print ('Minuit error not done?')\n #self.bgm.set_covariance_matrix(self.cov_matrix,current_position = 0)\n #self.psm.set_covariance_matrix(self.cov_matrix,current_position = len(self.bgm.parameters()))\n\n def sigmas(self):\n \"\"\" quietly return nan for negative diagonal terms \"\"\"\n diag = self.cov_matrix.diagonal()\n bad = diag<0\n if np.any(bad): diag[bad]=np.nan\n return np.sqrt(diag)\n\n def correlations(self, percent=False):\n \"\"\"Return the linear correlation coefficients for the estimated covariance matrix.\n any rows or columns with a zero error (failed fit) will be nan\n \"\"\"\n s = self.sigmas()\n s[s==0] = np.nan\n t =self.cov_matrix / np.outer(s,s)\n return t*100. if percent else t\n\n def __set_error__(self,use_gradient=False):\n\n npar = len(self.get_parameters())\n if use_gradient:\n save_pars = self.get_parameters().copy()\n cov_matrix,hessian = Minimizer.mycov(self.gradient,self.get_parameters(),full_output=True)[:2]\n self.set_parameters(save_pars)\n mask = hessian.diagonal()>0\n else:\n hessian, bad_mask = Minimizer.hessian(self.fn, self.get_parameters(), quiet=self.quiet)\n cov_matrix = None\n mask = bad_mask==0\n if np.all(-mask):\n self.cov_matrix = np.zeros([npar,npar])\n success = False\n return\n full = np.all(mask)\n if not full:\n h = hessian[mask].T[mask]\n hessian = h\n success = False\n npar = len(self.get_parameters())\n try:\n if linalg.det(hessian)<=0:\n full=False\n \n if not self.quiet: print ('Attempting to invert full hessian...')\n self.cov_matrix =t = cov_matrix if cov_matrix is not None else linalg.inv(hessian)\n if np.any(np.isnan(self.cov_matrix)):\n if not self.quiet: print ('Found NaN in covariance matrix!')\n raise Exception('Found NaN in covariance matrix!')\n # now expand if necesary\n if not full:\n # must be better way to expand a matrix\n self.cov_matrix =np.zeros([npar,npar])\n k = np.arange(npar)[mask]\n for i in range(len(k)):\n ki = k[i]\n self.cov_matrix[k[i],k[i]] = t[i,i] \n for j in range(i+1, len(k)):\n self.cov_matrix[ki,k[j]] =self.cov_matrix[k[j],ki] = t[i,j]\n success = True\n except linalg.LinAlgError as e:\n if not qself.quiet:\n print ('Error generating cov matrix, %s' % e)\n self.cov_matrix = np.zeros([npar,npar])\n success = False\n return success\n\n @staticmethod\n def hessian(mf, pars, quiet=True, *args):\n \"\"\"Calculate the Hessian matrix using finite differences (adapted from specfitter.SpectralModelFitter.hessian)\n \n mf: minimizing function\n pars: parameters at the minimum,\n args: additional arguments for mf.\n \n returns matrix, error code array\n \"\"\"\n p = pars.copy()\n npar = len(pars)\n deltas = np.abs(0.01 * p) #initial guess\n hessian = np.zeros([npar,npar])\n bad_mask = np.asarray([False] * npar)\n return_code = np.zeros(npar)\n\n l0 = mf(p, *args)\n\n #find good values with which to estimate the covariance matrix -- look at diagonal deviations\n #iterate until change in function consistent with ~1 sigma conditional error\n for i in xrange(npar):\n if not quiet: print ('Working on parameter %d'%(i))\n h,l = p.copy(),p.copy()\n for j in xrange(10):\n h[:] = p[:]; l[:] = p[:];\n h[i] += deltas[i]\n l[i] -= deltas[i]\n\n delta_f_1 = mf(h, *args) - l0\n delta_f_2 = mf(l, *args) - l0\n delta_f = max(delta_f_1 + delta_f_2,0) #twice difference, really\n deltas[i] /= max(delta_f**0.5,0.33) # can change by half decade\n if delta_f < 5 and delta_f > 0.5: break\n\n if delta_f < 5e-3:\n # no constraint on parameter -- ignore it in further fittingor :\n bad_mask[i] = True\n return_code[i] = 1\n if (delta_f_1/delta_f_2 > 10 or delta_f_1/delta_f_2 < 1./10):\n # significant asymmetry in likelihood \n bad_mask[i] = True\n return_code[i] = 2\n if (delta_f_2 < 5e-3 and delta_f_1 > 0.5):\n # not actually at maximum of likelihood -- upper limit condition\n bad_mask[i] = True\n return_code[i] = 3\n if not quiet: print ('fail, need upper limit')\n import pdb; pdb.set_trace()\n\n for i in xrange(npar):\n if bad_mask[i]:\n hessian[i,:] = 0 #no correlation?\n hessian[:,i] = 0\n continue\n for j in xrange(i,npar): #Second partials by finite difference\n \n xhyh,xhyl,xlyh,xlyl=p.copy(),p.copy(),p.copy(),p.copy()\n xdelt = deltas[i]\n ydelt = deltas[j]\n xhyh[i] += xdelt; xhyh[j] += ydelt\n xhyl[i] += xdelt; xhyl[j] -= ydelt\n xlyh[i] -= xdelt; xlyh[j] += ydelt\n xlyl[i] -= xdelt; xlyl[j] -= ydelt\n hessian[i][j]=hessian[j][i]=(mf(xhyh, *args)-mf(xhyl, *args)\n -mf(xlyh, *args)+mf(xlyl, *args))/\\\n (4*xdelt*ydelt)\n\n mf(p, *args) #call likelihood with original values; this resets model and any other values that might be used later\n return hessian,return_code\n\n @staticmethod\n def mycov(grad,par,full_output=False,init_step=0.04,min_step=1e-6,max_step=1,max_iters=5,target=0.5,min_func=1e-4,max_func=4):\n \"\"\"Perform finite differences on the _analytic_ gradient provided by user to calculate hessian/covariance matrix.\n\n Positional args:\n grad : a function to return a gradient\n par : vector of parameters (should be function minimum for covariance matrix calculation)\n\n Keyword args:\n\n full_output [False] : if True, return information about convergence, else just the covariance matrix\n init_step [1e-3] : initial step size (0.04 ~ 10% in log10 space); can be a scalar or vector\n min_step [1e-6] : the minimum step size to take in parameter space\n max_step [1] : the maximum step size to take in parameter sapce\n max_iters [5] : maximum number of iterations to attempt to converge on a good step size\n target [0.5] : the target change in the function value for step size\n min_func [1e-4] : the minimum allowable change in (abs) function value to accept for convergence\n max_func [4] : the maximum allowable change in (abs) function value to accept for convergence\n \"\"\"\n\n nparams = len(par)\n step_size = np.ones(nparams)*init_step\n step_size = np.maximum(step_size,min_step*1.1)\n step_size = np.minimum(step_size,max_step*0.9)\n hess = np.zeros([nparams,nparams])\n min_flags = np.asarray([False]*nparams)\n max_flags = np.asarray([False]*nparams)\n\n def revised_step(delta_f,current_step,index):\n if (current_step == max_step):\n max_flags[i] = True\n return True,0\n elif (current_step == min_step):\n min_flags[i] = True\n return True,0\n else:\n adf = abs(delta_f)\n if adf < 1e-8:\n # need to address a step size that results in a likelihood change that's too\n # small compared to precision\n pass\n \n if (adf < min_func) or (adf > max_func):\n new_step = current_step/(adf/target)\n new_step = min(new_step,max_step)\n new_step = max(new_step,min_step)\n return False,new_step\n else:\n return True,0\n \n iters = np.zeros(nparams)\n for i in xrange(nparams):\n converged = False\n for j in xrange(max_iters):\n iters[i] += 1\n di = step_size[i]\n par[i] += di\n g_up = grad(par)\n par[i] -= 2*di\n g_dn = grad(par)\n par[i] += di\n delta_f = (g_up - g_dn)[i]\n converged,new_step = revised_step(delta_f,di,i)\n #print ('Parameter %d -- Iteration %d -- Step size: %.2e -- delta: %.2e'%(i,j,di,delta_f))\n if converged: break\n else: step_size[i] = new_step\n hess[i,:] = (g_up - g_dn) / (2*di) # central difference\n if not converged:\n print ('Warning: step size for parameter %d (%.2g) did not result in convergence.'%(i,di))\n try:\n cov = np.linalg.inv(hess)\n except:\n print ('Error inverting hessian.')\n #cov = np.zeros([nparams,nparams])\n raise Exception('Error inverting hessian')\n if full_output:\n return cov,hess,step_size,iters,min_flags,max_flags\n else:\n return cov\n\nclass Projector(Fitted):\n \"\"\" adapt a function object to create a projection, a function of a subset of its parameters\n Require that it has a methods __call__, set_parmeters, get_parameters, and perhaps gradient\n \"\"\"\n def __init__(self, fn, select=[0], par=None, ):\n \"\"\" \n \n parameters:\n fn: function of par: should be minimizable \n par: array type or None\n default parameters to use: if None, get from fn.get_parameters)\n select: list of free parameter \n TODO: use mask instead or optionally\n \"\"\"\n self.fn=fn\n self.select = select\n self.mask = np.zeros(len(fn.get_parameters()),bool)\n self.mask[select]=True\n self.fpar= fn.get_parameters().copy()\n self.par = np.asarray(par[:]) if par is not None else self.fpar[self.mask]\n assert len(self.par)==sum(self.mask), 'wrong number of specified parameters'\n def get_parameters(self):\n return self.par\n def set_parameters(self,par=None):\n p = par if par is not None else self.par\n self.par = p\n self.fpar[self.mask] = p\n self.fn.set_parameters(self.fpar) # note this sets the original set\n \n def __call__(self, x):\n \"\"\" len of x must be number of selected parameters\"\"\"\n self.fpar[self.mask]=x\n ret= self.fn(self.fpar)\n #print ('value(%.2f)=%.2f' % (x,ret))\n return ret\n def gradient(self, x):\n \"\"\" the function object may not support this\n \"\"\"\n self.fpar[self.mask]=x\n t = self.fn.gradient(self.fpar)[self.mask]\n #print ('gradient(%.2f)=%.2f' % (x, t))\n return t\n @property\n def parameter_names(self):\n return None if not hasattr(self.fn,'parameter_names') else self.fn.parameter_names[self.mask]\n \n @property\n def bounds(self):\n return None if self.fn.bounds is None else np.array(self.fn.bounds)[self.mask]\n \n def fmin(self, x=None, **kwargs):\n \"\"\" run simple fmin \"\"\"\n try:\n par = optimize.fmin(self, [x] if x is not None else self.par, **kwargs)\n self.set_parameters(par)\n except:\n raise\n \n def minimize(self, par0=None, **fit_kw):\n \"\"\" create Minimizer of this, run it, update original parameters\n parameters:\n par0 : array type of float or None\n pass to Minimizer\n \n return value, parameter values, errors\n \"\"\"\n self.fitter = Minimizer(self, par0)\n \n c2, par, dpar = self.fitter(**fit_kw)\n self.par = par\n self.set_parameters(par)\n return c2, par, dpar\n\n\nclass Profile(Fitted):\n \"\"\" Manage a function of one parameter, projected from a multi-parameter function,\n with option evaluate by either optimizing on the remaining parameters or not\n \"\"\"\n\n def __init__(self, fn, index, par=None, profile=True):\n \"\"\"\n parameters\n ---------\n fn : function of a set of parameters\n Must implement Fitted interface\n index : integer or string\n the index to examine, or its parameter name\n par: arary type or None\n initial set of parameters for fn if not None\n profile: bool\n set False to not apply profile\n \"\"\"\n # local reference to the basic function, copy of original parametes\n self.fn = fn\n if type(index)==types.StringType:\n try:\n self.index = list(fn.parameter_names).index(index)\n except ValueError:\n raise FitterException('parameter name \"%s\" not one of %s' % (index, fn.parameter_names))\n except Exception as msg:\n raise\n else: self.index = index\n self.fpar = par if par is not None else fn.get_parameters().copy()\n npar = len(self.fpar)\n self.mask = np.ones(npar,bool)\n self.mask[self.index]=False\n \n # set up function of the selected parameter (self) and a function of the rest\n select = range(npar)\n assert self.index in select, 'Expect index to select to be one of parameters'\n self.par = self.fpar[self.index:self.index+1]\n select.remove(self.index)\n self.pfun = Projector(fn, select)\n self.profile = profile\n \n # set up a fitter for the remaining parameters\n self.fitter = Minimizer(self.pfun) \n \n def __call__(self, x):\n self.fpar[self.index]=x[0]\n # if don't optimize the other parameters\n if self.profile: \n v,p,s =self.fitter() #fit value, parameters, errors\n self.fpar[self.mask]=p\n r = self.fn(self.fpar)\n print (v,r)\n else:\n r = self.fn(self.fpar)\n return r\n \n @property\n def parameter_names(self):\n return self.fn.parameter_names[self.index:self.index+1]\n \n def get_parameters(self):\n return self.par\n \n def set_parameters(self, par=None):\n p = par if par is not None else self.par\n self.par = p\n self.fpar[self.index] = p\n \n \nclass TestFunc(Fitted):\n def __init__(self, fn, pars):\n self.fn = fn\n self.pars = pars\n @property \n def bounds(self):\n return [(0.9,2), ]\n def __call__(self, pars):\n return self.fn(pars)\n def get_parameters(self): return self.pars\n def set_parameters(self,pars):\n self.pars = pars\n\n \ndef test(x0=1.1, pars=[1.0, 1.5], **kwargs):\n \"\"\" test with a parabola corresponding to a Gaussian with mean, sigma in pars\n \n >>> pars=[1.0, 1.5]; x0=1.1\n >>> testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2\n >>> func = TestFunc(testf, [x0])\n >>> m = Minimizer(func) # create minimizer object\n >>> m() # run default fit\n (1.0000000000211928, array([ 0.99999023]), array([ 1.5]))\n \"\"\"\n testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2\n \n print ('input parameters:', pars)\n func = TestFunc(testf, [x0])\n m = Minimizer(func)\n #m = Minimizer(testf, [x0], )\n f = m(use_gradient=False)\n print ('solution at %.2f, +/- %.2f ' % (m.get_parameters(), np.sqrt(m.cov_matrix.diagonal())))\n return func, m, f\n\nif __name__ == \"__main__\":\n print (__doc__)\n import doctest\n doctest.testmod()\n " ]
[ [ "numpy.maximum", "numpy.abs", "numpy.sqrt", "numpy.minimum", "numpy.asarray", "numpy.linalg.inv", "numpy.isnan", "numpy.arange", "numpy.ones", "numpy.all", "numpy.linalg.det", "scipy.optimize.fmin", "numpy.any", "numpy.outer", "numpy.array", "numpy.zeros" ] ]
vsocrates/biomedical_el
[ "12e3a3cf72acd18437bc9d94fc385a67bf0b2023" ]
[ "entitylinker/train_utils.py" ]
[ "import json\nimport torch \nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nfrom torch.nn import CrossEntropyLoss, NLLLoss\nimport torch.nn.functional as F \nfrom torch import autograd\n\nimport numpy as np \n\nfrom transformers.models.luke.configuration_luke import LukeConfig\nfrom transformers import AutoTokenizer, AutoModel, AutoConfig\n\nfrom entitylinker.lukemodel import LukeModel \nfrom entitylinker.model_utils import *\n\nfrom entitylinker.dataset import MedMentionsDataset, Collater\nfrom entitylinker.model import EntityLinker\n\n\n\n\ndef sim_matrix(a, b, eps=1e-8):\n \"\"\"\n works on a 3D and 2D array\n \"\"\"\n a_n, b_n = torch.linalg.norm(a, dim=-1)[:, :, None], torch.linalg.norm(b, dim=-1)[:, None]\n a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))\n b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))\n sim_mt = torch.matmul(a_norm, b_norm.transpose(0, 1)) \n return sim_mt \n\n\n# define loss function\ndef mention_entity_loss(mention_pred, entity_pred, bio_tags, entity_ids, \n attention_mask, pretrained_entity_embedding, device, lm=0.1):\n # attention_mask torch.Size([27,256])\n # mention_pred torch.Size([27, 256, 3])\n # entity_pred torch.Size([27, 256, 256])\n # bio_tags torch.Size([27, 256])\n # entity_ids torch.Size([27, 256])\n \n # unpadded_men_pred torch.Size([25, 3, 256, 256])\n # unpadded_bio_tags torch.Size([25, 256, 256]) \n \n ### MENTION LOSS\n\n # first compute all loss\n mention_loss = F.nll_loss(mention_pred.permute(0,2,1).contiguous(), bio_tags, reduction=\"none\")\n\n # get only the unpadded losses: batch size\n num_unpadded = torch.sum(attention_mask, dim=1)\n masked_mention_loss = torch.where(attention_mask == 1, mention_loss, torch.tensor(0.).float().to(device))\n \n # compute average of the masked loss\n avg_mention_loss = torch.sum(masked_mention_loss, dim=1) / num_unpadded\n avg_mention_loss = torch.sum(avg_mention_loss)/len(avg_mention_loss)\n\n \n ### ENTITY LOSS \n # get all the IDs in entity_ids that aren't -1\n nonzero_ent_ids = entity_ids[entity_ids!=-1]\n # get the true entity embeddings\n # shape: [nonzero_ent_ids, embed_dim]\n true_ent_embeddings = pretrained_entity_embedding[nonzero_ent_ids, :]\n \n # get non-neg1 idxs \n ent_idxs = (entity_ids != -1).nonzero()\n real_entity_pred = entity_pred[ent_idxs[:, 0], ent_idxs[:, 1], :] \n entity_loss = torch.sum(1 - F.cosine_similarity(true_ent_embeddings, real_entity_pred, dim=1))/true_ent_embeddings.shape[0] \n \n\n loss = (lm * avg_mention_loss) + ((1-lm) * entity_loss)\n return loss\n\n\ndef get_bi_spans(batch_tags):\n \"\"\"Returns a list of N x 2 numpy arrays\"\"\"\n batch_tags = batch_tags.cpu().detach().numpy()\n bi_spans = []\n for batch_idx in range(batch_tags.shape[0]):\n b_tags = (batch_tags[batch_idx, :] == 0).astype(int)\n i_tags = (batch_tags[batch_idx, :] == 1).astype(int)\n \n # if there aren't any beginning tags, just return an empty array\n if not b_tags.any():\n bi_spans.append(np.array([]))\n continue\n \n # get all B tag locations\n b_idx, = b_tags.nonzero() \n\n # get idxs for I tags\n d = np.diff(i_tags)\n i_idx, = d.nonzero()\n\n # We need to start things after the change in \"condition\". Therefore, \n # we'll shift the index by 1 to the right.\n i_idx += 1\n\n # add end idxs for all B tags (just interweave +1 of b_idxs)\n b_idx = np.vstack((b_idx,b_idx+1)).reshape((-1,),order='F') \n\n if i_tags[0]:\n # If the start of condition is True prepend a 0\n i_idx = np.r_[0, i_idx]\n\n if i_tags[-1]:\n # If the end of condition is True, append the length of the array\n i_idx = np.r_[i_idx, i_tags.size] # Edit\n\n # reshape to idxs\n b_idx.shape = (-1,2)\n i_idx.shape = (-1,2) \n\n bi_idx = []\n # combine the b and i tags\n for start, stop in b_idx:\n # get idx where I tags start for each B tag, if exists\n i, = np.where(i_idx[:, 0] == stop) \n if i.size > 0:\n bi_idx.append([start, int(i_idx[i, 1])])\n else:\n bi_idx.append([start, stop])\n\n bi_spans.append(np.array(bi_idx))\n\n assert batch_tags.shape[0] == len(bi_spans), f\"{batch_tags.shape[0]},{len(bi_spans)},{b_idx}\"\n return bi_spans\n\n\ndef compute_conf_matrices(mention_preds, entity_preds, bio_tags, entity_ids, pretrained_entity_embedding):\n '''Computes both macro and micro confusion matrices given torch tensor outputs from our model\n '''\n # mention_pred Size([27, 256, 3])\n # entity_pred torch.Size([27, 256, 256])\n\n nb_classes = 2\n micro_confusion_matrix = torch.zeros(nb_classes, nb_classes)\n # micro_confusion_matrix [[TP, FN], [FP, TN]]\n \n macro_confusion_matrix = [] \n \n # first we need to find the max of the BIO tagging we have\n pred_bio_tags = torch.argmax(mention_preds, dim=2)\n # pred_bio_tags torch.Size([28, 256])\n \n # create two lists of spans\n pred_span_list = get_bi_spans(pred_bio_tags)\n target_span_list = get_bi_spans(bio_tags) \n\n for batch_idx, (pred_spans, target_spans) in enumerate(zip(pred_span_list, target_span_list)):\n tp_matches = 0\n \n batch_confusion_matrix = torch.zeros(nb_classes, nb_classes)\n\n # do the STRONG MATCHING here\n\n # first check if there are any predicted spans, if not, we just add a bunch of FNs\n if pred_spans.size <= 0 or pred_spans.shape[0] == 0:\n micro_confusion_matrix[0,1] += target_spans.shape[0]\n batch_confusion_matrix[0,1] += target_spans.shape[0]\n \n # conversely, if the target spans are empty, we add a bunch of FPs instead\n elif target_spans.size <= 0:\n micro_confusion_matrix[1,0] += pred_spans.shape[0]\n batch_confusion_matrix[1,0] += pred_spans.shape[0] \n \n else:\n for span_idx in range(pred_spans.shape[0]):\n \n if torch.all(torch.tensor(pred_spans[span_idx, :] == target_spans), dim=1).any():\n seq_idx = pred_spans[span_idx, 0]\n # we use the start span idx to get the entity embedding and compare it to the pretrained embeds\n\n cos_sim = sim_matrix(entity_preds[batch_idx, seq_idx, :].unsqueeze(0).unsqueeze(0), \n pretrained_entity_embedding)\n # this should return something of size [1, 1, sim_vals]\n pred_ent_embed_id = torch.argmax(torch.squeeze(cos_sim))\n target_ent_embed_id = entity_ids[batch_idx, seq_idx]\n # just to test: \n # TODO: REMOVE\n # if batch_idx == 1 and seq_idx == 0:\n # target_ent_embed_id = 2\n # TODO: ENDREMOVE\n if pred_ent_embed_id == target_ent_embed_id:\n # TP\n micro_confusion_matrix[0,0] += 1\n batch_confusion_matrix[0,0] += 1 \n tp_matches += 1\n else:\n # FP\n micro_confusion_matrix[1,0] += 1\n batch_confusion_matrix[1,0] += 1\n else:\n # FP \n micro_confusion_matrix[1,0] += 1\n batch_confusion_matrix[1,0] += 1\n \n # all FNs \n micro_confusion_matrix[0,1] += target_spans.shape[0] - tp_matches\n batch_confusion_matrix[0,1] += target_spans.shape[0] - tp_matches\n \n # we don't do TNs since we don't need them for P,R,F1\n \n # add on the batch confusion matrix to the batch\n macro_confusion_matrix.append(batch_confusion_matrix)\n\n return micro_confusion_matrix, macro_confusion_matrix\n\n\ndef compute_metrics(metrics, micro_conf, macro_confs):\n '''Given both the micro and macro confusion matrices, we compute a given set of metrics\n \n Matrix orientation is: \n | TP | FN |\n |-------------\n | FP | TN |\n '''\n micro_metrics = {}\n macro_metrics = {}\n \n if \"precision\" in metrics:\n micro_metrics['precision'] = micro_conf[0,0] / (micro_conf[0,0] + micro_conf[1,0])\n macro_metrics['precision'] = torch.mean(torch.tensor([batch_conf[0,0] / (batch_conf[0,0] + batch_conf[1,0]) for batch_conf in macro_confs]))\n if \"recall\" in metrics:\n micro_metrics['recall'] = micro_conf[0,0] / (micro_conf[0,0] + micro_conf[0,1])\n macro_metrics['recall'] = torch.mean(torch.tensor([batch_conf[0,0] / (batch_conf[0,0] + batch_conf[0,1]) for batch_conf in macro_confs]))\n if \"f1\" in metrics:\n micro_metrics['f1'] = micro_conf[0,0] / (micro_conf[0,0] + ((1/2) * (micro_conf[0,1] + micro_conf[1,0])))\n macro_metrics['f1'] = torch.mean(torch.tensor([batch_conf[0,0] / (batch_conf[0,0] + ((1/2) * (batch_conf[0,1] + batch_conf[1,0]))) for batch_conf in macro_confs]))\n \n return micro_metrics, macro_metrics\n\n\n# Train!!\ndef train(model, train_loader, optimizer, criterion, epoch, pretrained_entity_embeddings, device, scheduler):\n\n model.train()\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n tokenized_text, bio_tags, entity_ids = data\n bio_tags = bio_tags.to(device)\n entity_ids = entity_ids.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n # with autograd.detect_anomaly():\n\n # forward + backward + optimize\n input_ids = tokenized_text['input_ids'].to(device)\n token_type_ids = tokenized_text['token_type_ids'].to(device)\n attention_mask = tokenized_text['attention_mask'].to(device)\n \n mention_pred, entity_pred = model(input_ids,token_type_ids,attention_mask)\n loss = criterion(mention_pred, entity_pred, bio_tags, entity_ids, attention_mask, \n pretrained_entity_embeddings, device)\n loss.backward()\n optimizer.step()\n scheduler.step()\n # print statistics\n running_loss += loss.item()\n if i % 5 == 4: # print every 2000 mini-batches\n print('[%d, %5d] train loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 5))\n running_loss = 0.0\n print('Finished Training')\n\ndef evaluate(model, test_loader, criterion, epoch, pretrained_entity_embeddings, device):\n \n model.eval()\n total_loss = 0.0\n nb_classes = 2 \n total_micro_conf_mat = torch.zeros(nb_classes, nb_classes)\n # micro_confusion_matrix [[TP, FN], [FP, TN]]\n total_macro_conf_mat = []\n\n with torch.no_grad():\n running_loss = 0.0\n \n for i, data in enumerate(test_loader, 0):\n \n tokenized_text, bio_tags, entity_ids = data\n bio_tags = bio_tags.to(device)\n entity_ids = entity_ids.to(device)\n \n # with autograd.detect_anomaly():\n\n # forward + backward + optimize\n input_ids = tokenized_text['input_ids'].to(device)\n token_type_ids = tokenized_text['token_type_ids'].to(device)\n attention_mask = tokenized_text['attention_mask'].to(device)\n\n mention_pred, entity_pred = model(input_ids,token_type_ids,attention_mask)\n # print(\"ent pred\", entity_pred.shape)\n loss = criterion(mention_pred, entity_pred, bio_tags, entity_ids, \n attention_mask, pretrained_entity_embeddings, device)\n # print(loss)\n\n\n micro_conf_mat, macro_conf_mat = compute_conf_matrices(mention_pred, entity_pred, bio_tags, entity_ids, pretrained_entity_embeddings)\n total_micro_conf_mat += micro_conf_mat\n total_macro_conf_mat.extend(macro_conf_mat)\n\n running_loss += loss.item()\n total_loss += loss.item()\n if i % 5 == 4: # print every 2000 mini-batches\n print('[%d, %5d] test loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 5))\n running_loss = 0.0\n\n\n micro_metrics, macro_metrics = compute_metrics([\"precision\", \"recall\",\"f1\"], total_micro_conf_mat, total_macro_conf_mat)\n print('-' * 89)\n print(f\"Micro-Precision: {micro_metrics['precision']}, Macro-Precision: {macro_metrics['precision']}\\n\")\n print(f\"Micro-Recall: {micro_metrics['recall']}, Macro-Recall: {macro_metrics['recall']}\\n\")\n print(f\"Micro-F1: {micro_metrics['f1']}, Macro-F1: {macro_metrics['f1']}\\n\") \n print('-' * 89)\n\n # whatever i is at the end is the len of test_loader\n return total_loss / i\n" ]
[ [ "torch.zeros", "torch.squeeze", "torch.sum", "torch.linalg.norm", "torch.tensor", "torch.no_grad", "numpy.diff", "torch.nn.functional.cosine_similarity", "numpy.array", "numpy.where", "torch.ones_like", "numpy.vstack", "torch.argmax" ] ]
stephen-hoover/portdash
[ "f358bcc53a17034dcf52c8c9d650a7fd88b4fec2" ]
[ "portdash/io/quotes.py" ]
[ "\"\"\"Retrieve information about assets from external sources\n\"\"\"\nfrom datetime import datetime\nimport logging\nimport os\nfrom typing import Tuple, Union\n\nimport pandas as pd\n\nfrom portdash.io._alphavantage import fetch_from_web, InvalidAPICall\n\nDEFAULT_QUOTE_SOURCE = \"alphavantage\"\nVALID_QUOTE_SOURCES = [\"alphavantage\", \"csv\", \"const\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef _split_source_str(source: str) -> Tuple[str, Union[str, None]]:\n if not source:\n return DEFAULT_QUOTE_SOURCE, None\n tokens = source.split(\":\")\n if len(tokens) == 1:\n return tokens[0], None\n elif len(tokens) == 2:\n return tokens[0], tokens[1]\n else:\n raise ValueError(f\"Unrecognized source string: {source}\")\n\n\ndef fetch(\n source: str, symbol: str, start_time: datetime = None\n) -> Union[pd.DataFrame, None]:\n \"\"\"Return a table of historical security valuations\n\n Use the `source` string to dispatch the lookup to the appropriate source.\n\n Parameters\n ----------\n source : str\n The source of the quotes, formatted as \"[source]:[args\", e.g.\n \"csv:/path/to/file\".\n symbol : str\n The stock ticker symbol\n start_time : datetime, optional\n If supplied, start the output table at this date.\n The default will return all available historical quotes.\n\n Returns\n -------\n pd.DataFrame\n A table of historical quotes, indexed by the datetime of the quote.\n The index name will be \"date\", and the table will have at least\n columns named \"price\", \"dividend_amount\", and \"volume\".\n The return value will be `None` if there's an error while attempting\n to read quotes from the web.\n \"\"\"\n source_name, source_arg = _split_source_str(source)\n if source_name == \"alphavantage\":\n try:\n return fetch_from_web(symbol=symbol, start_time=start_time).rename(\n columns={\"close\": \"price\"}\n )\n except InvalidAPICall:\n log.exception(f\"Unable to fetch quotes for {symbol}\")\n return\n elif source_name == \"const\":\n log.debug(f\"Filling {symbol} with constant \" f\"quotes values of {source_arg}.\")\n return _fetch_from_const(float(source_arg), start_time=start_time)\n elif source_name == \"csv\":\n log.debug(f\"Reading {symbol} values from {source_arg}.\")\n return _fetch_from_csv(filename=source_arg, start_time=start_time)\n else:\n raise ValueError(\n f\"Unknown source: {source}. Source name must be \"\n f\"one of {VALID_QUOTE_SOURCES}.\"\n )\n\n\ndef _fetch_from_const(value: float, start_time: datetime) -> pd.DataFrame:\n \"\"\"Return a table of historical security valuations, all with\n the same constant quote value and no distributions.\n\n Parameters\n ----------\n value : float\n Assign this value to the security at every date.\n start_time : datetime, optional\n If supplied, start the output table at this date.\n The default will return all available historical quotes.\n\n Returns\n -------\n pd.DataFrame\n A table of historical quotes, indexed by the datetime of the quote\n \"\"\"\n index = pd.date_range(\n start_time or \"2015-01-01\", datetime.today(), freq=\"D\"\n ).rename(\"date\")\n return pd.DataFrame(\n {\"price\": value, \"volume\": 0.0, \"dividend_amount\": 0.0}, index=index\n )\n\n\ndef _fetch_from_csv(filename: str, start_time: datetime) -> pd.DataFrame:\n \"\"\"Return a table of historical security valuations read from a CSV.\n\n Parameters\n ----------\n filename : str\n Path to the csv with the historical quote data.\n start_time : datetime, optional\n If supplied, start the output table at this date.\n The default will return all available historical quotes.\n\n Returns\n -------\n pd.DataFrame\n A table of historical quotes, indexed by the datetime of the quote\n \"\"\"\n df = pd.read_csv(\n os.path.expanduser(filename),\n index_col=0,\n parse_dates=True,\n infer_datetime_format=True,\n )\n df = df.rename(columns={\"close\": \"price\"})\n df.index.name = \"date\"\n if start_time:\n df = df.loc[df.index >= start_time]\n return df\n" ]
[ [ "pandas.DataFrame" ] ]
shibing624/cvnet
[ "5c5c688b530f07688cda85705591634286f0b848" ]
[ "ocr/create_imdb_dataset.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: \n\"\"\"\n\n\"\"\" a modified version of CRNN torch repository https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py \"\"\"\n\nimport os\n\nimport cv2\nimport fire\nimport lmdb\nimport numpy as np\n\n\ndef is_image_valid(imageBin):\n if imageBin is None:\n return False\n imageBuf = np.frombuffer(imageBin, dtype=np.uint8)\n img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)\n imgH, imgW = img.shape[0], img.shape[1]\n if imgH * imgW == 0:\n return False\n return True\n\n\ndef write_cache(env, cache):\n with env.begin(write=True) as txn:\n for k, v in cache.items():\n txn.put(k, v)\n\n\ndef create_dataset(input_path, gtFile, output_path, check_valid=True):\n \"\"\"\n Create LMDB dataset for training and evaluation.\n ARGS:\n input_path : input folder path where starts imagePath\n output_path : LMDB output path\n gtFile : list of image path and label\n check_valid : if true, check the validity of every image\n \"\"\"\n os.makedirs(output_path, exist_ok=True)\n env = lmdb.open(output_path, map_size=1099511627776)\n cache = {}\n cnt = 1\n\n with open(gtFile, 'r', encoding='utf-8') as data:\n datalist = data.readlines()\n\n nSamples = len(datalist)\n for i in range(nSamples):\n imagePath, label = datalist[i].strip('\\n').split('\\t')\n imagePath = os.path.join(input_path, imagePath)\n\n # # only use alphanumeric data\n # if re.search('[^a-zA-Z0-9]', label):\n # continue\n\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n if check_valid:\n try:\n if not is_image_valid(imageBin):\n print('%s is not a valid image' % imagePath)\n continue\n except:\n print('error occured', i)\n with open(output_path + '/error_image_log.txt', 'a') as log:\n log.write('%s-th image data occured error\\n' % str(i))\n continue\n\n imageKey = 'image-%09d'.encode() % cnt\n labelKey = 'label-%09d'.encode() % cnt\n cache[imageKey] = imageBin\n cache[labelKey] = label.encode()\n\n if cnt % 1000 == 0:\n write_cache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt - 1\n cache['num-samples'.encode()] = str(nSamples).encode()\n write_cache(env, cache)\n print('Created dataset with %d samples' % nSamples)\n\n\nif __name__ == '__main__':\n fire.Fire(create_dataset)\n" ]
[ [ "numpy.frombuffer" ] ]
JNMaree/solvdoku
[ "d7cbce8618b5a94db8781d88cf3db102e728f4f6" ]
[ "src/numerical/1D_FEM.py" ]
[ "import numpy\nimport matplotlib.pyplot as plot\n\nfrom base.nodespace_1D import NodeSpace1D\nfrom base.elementspace_1D import ElementSpace1D\nfrom base.polynomial import Polynomial\nfrom base.matrix import Matrix\nfrom gaussian_quadrature import GaussianQuad\n\nclass FiniteElementMethod:\n\n # Define the 1D mesh for the Finite Element Method\n mesh = [] # ElementSpace\n\n # Define the global stiffness matrix (K)\n material_matrix = [] # Matrix(square)\n \n # Define the global (RHS) force matrix (F)\n force_vector = [] # Matrix(vector)\n \n # Define the global (LHS) solution matrix (U)\n solution_space = [] # Matrix(vector)\n \n # Define the material properties for the material used by the \n material_function = [] # Polynomial\n\n # Define the Gaussian Quaqdrature positions & weights:\n gaussian = [] # GaussianQuad instance\n\n def __init__(self, element_space, mat_property_func, bc_type1, bc_type2, gauss_order = 2):\n self.mesh = element_space\n self.material_matrix = Matrix(numpy.zeros((element_space.n_nodes, element_space.n_nodes)))\n\n # Define linear material property function\n self.material_function = mat_property_func\n\n # Define the solution space to accomodate the initial type1 boundary conditions\n if isinstance(bc_type1, NodeSpace1D):\n self.solution_space = Matrix(bc_type1.nodes)\n elif isinstance(bc_type1, numpy.ndarray):\n self.solution_space = Matrix(bc_type1)\n elif isinstance(bc_type1, Matrix):\n self.solution_space = bc_type1\n else:\n raise TypeError(\"bc_type1: Unknown Type\")\n\n # Define the force vector to include the initial type2 boundary conditions\n if isinstance(bc_type2, NodeSpace1D):\n self.force_vector = Matrix(bc_type2.nodes)\n elif isinstance(bc_type2, numpy.ndarray):\n self.force_vector = Matrix(bc_type2)\n elif isinstance(bc_type2, Matrix):\n self.force_vector = bc_type2\n else:\n raise TypeError(\"bc_type2: Unknown Type\")\n\n self.gaussian = GaussianQuad(gauss_order)\n #print(f\"self_gaussian:{self.gaussian}\")\n \n # Setup the matrices for solving the equations\n def setup(self):\n #print(\"setup_mesh:\", self.mesh)\n #print(\"element_type:\", type(self.mesh.elements))\n\n # setup the material matrix, K\n for e in range(self.mesh.n_elements):\n\n # Set Temporary Nodes A,B for element [e]\n # - For Setting up Stiffness Matrix\n nodeA = int( self.mesh.elements[e, 0] )\n nodeB = int( self.mesh.elements[e, 1] )\n xA = self.mesh.nodes[nodeA]\n xB = self.mesh.nodes[nodeB]\n dx = xB - xA\n #print(f\"elem_{e}|A:{nodeA}, B:{nodeB}|xA:{xA}, xB:{xB}, dX:{dx}\")\n\n for q in range(self.gaussian.order):\n xDim = (dx/2) + self.gaussian.quadrature[q, 0] * (dx/2)\n xQ = xA + xDim\n wQ = self.gaussian.quadrature[q, 1] * dx\n\n # Generate and add local matrices to global matrix\n # - loop through i in stiffness_matrix[i,j]\n for i in range(2):\n # define function & gradient depending on side of Gauss point\n if i == 0:\n fi = (xQ - xB) / (-dx)\n fi_prime = 1.0 / (-dx)\n else:\n fi = (xQ - xA) / (dx)\n fi_prime = 1.0 / (dx)\n \n # Add RHS conditions to force_matrix\n self.force_vector[e + i] += wQ * fi * 16\n\n # loop through j in stiffness_matrix[i,j]\n for j in range(2):\n # Define gradient depending on side of Gauss point\n if j == 0:\n fj_prime = 1.0 / (-dx)\n else:\n fj_prime = 1.0 / (dx)\n\n # Add Result to existing stiffness values in stiffness matrix \n self.material_matrix[e + i, e + j] += wQ * fi_prime * fj_prime\n \n # Set constant values in matrix to enforce boundary conditions\n self.material_matrix[0,0] = 1.0\n self.material_matrix[self.mesh.n_nodes - 1, self.mesh.n_nodes - 1] = 1.0\n\n # Linear interpolation provides an estimate for a Y-value between\n # two existing X,Y pairs based on a linear function between them.\n def linear_interpolationY(self, x_0, y_0, x_2, y_2, X1):\n return y_0 + (X1 - x_0)*(y_2 - y_0)/(x_2 - x_0)\n\n # The Partial Differential Equations are solved using ...\n def solve(self):\n self.solution_space = self.material_matrix.get_inverse() * self.force_vector\n #print(\"Material_matrix:\", self.material_matrix)\n #print(\"Inverse_material_matrix:\", self.material_matrix.get_inverse())\n #print(\"Force_vector:\", self.force_vector)\n print(\"Solution_space:\", self.solution_space)\n\n # Plot the solution_space values on the respective node coordinates\n def plot(self):\n plot.plot(self.mesh.nodes, self.solution_space.matrix)\n plot.xlabel(\"X Coordinates\")\n plot.ylabel(\"Degree-of-Freedom Value\")\n plot.show()\n\n# Test methods and classes\ndef main():\n # Heat transfer test method\n # Create mesh using parameters:\n x_dimension = 12 # Distance in meters\n n_elements = 8 # Number of finite elements in domain\n start_pos = 0 # First Node position\n nodes_per_element = 2 # Number of Nodes per element \n\n # Create mesh of discrete elements that consist of nodes_per_element\n fem_espace = ElementSpace1D(n_elements, x_dimension, start_pos, nodes_per_element)\n \n # Analysis Conditions:\n # - Material Properties:\n K = Polynomial([0, 20]) # Stiffness Coefficient Polynomial \n # - (Material Property Function)\n\n # - Type 1 (Dirichlet) boundary conditions:\n Type1_BC = 24 # Temperature specification\n Type1_Nodes = [0] # Node indices subject to Type 1 BC\n BC_Type1 = NodeSpace1D( numpy.zeros(fem_espace.n_nodes) )\n BC_Type1.assign_values(Type1_BC, Type1_Nodes)\n print(BC_Type1)\n\n # - Type 2 (Neumann) boundary condition:\n Type2_BC = 16 # Heat Flux Specification\n Type2_Nodes = [n_elements] # Node indices subject to Type 2 BC\n BC_Type2 = NodeSpace1D( numpy.zeros(fem_espace.n_nodes) )\n BC_Type2.assign_values(Type2_BC, Type2_Nodes)\n print(BC_Type2)\n\n # Numerical Conditions:\n Gaussian_order = 3\n\n # Calculate exact linear solution for verification:\n tLeft = Type1_BC\n q = Type2_BC\n k = K.evaluate(1)\n x = x_dimension\n tRight = (q*x) / (k) + tLeft\n n = n_elements + 1\n # Printout to verify with Solution_Space tR=33.6\n print(f\"LHS|t0:{tLeft} --- x:{x} --- q:{q} --- k:{k} --- RHS|t{n}:{tRight}\\n\")\n\n # Create Instance of FEM analysis\n FEM = FiniteElementMethod(fem_espace, K, BC_Type1, BC_Type2, Gaussian_order)\n #print(\"FEM_setup..........................................................\")\n FEM.setup()\n #print(\"FEM_solve..........................................................\")\n FEM.solve()\n #print(\"FEM_plot...........................................................\")\n FEM.plot()\n\n \n\nif __name__ == \"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
haocwang/Deep-Learning-Specialization
[ "e2122a8e6be71b412a616b56f19c9b492e97e82b" ]
[ "Improving Deep Neural Networks/Week 1 Practical aspects of Deep Learning/Assignments/Regularization/reg_utils.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nimport scipy.io\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef relu(x):\n \"\"\"\n Compute the relu of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- relu(x)\n \"\"\"\n s = np.maximum(0,x)\n \n return s\n\ndef load_planar_dataset(seed):\n \n np.random.seed(seed)\n \n m = 400 # number of examples\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N*j,N*(j+1))\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n b1 -- bias vector of shape (layer_dims[l], 1)\n Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])\n bl -- bias vector of shape (1, layer_dims[l])\n \n Tips:\n - For example: the layer_dims for the \"Planar Data classification model\" would have been [2,2,1]. \n This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!\n - In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation (and computes the loss) presented in Figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape ()\n b1 -- bias vector of shape ()\n W2 -- weight matrix of shape ()\n b2 -- bias vector of shape ()\n W3 -- weight matrix of shape ()\n b3 -- bias vector of shape ()\n \n Returns:\n loss -- the loss function (vanilla logistic loss)\n \"\"\"\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache\n\ndef backward_propagation(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n cache -- cache output from forward_propagation()\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(i)] = Wi\n parameters['b' + str(i)] = bi\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(i)] = dWi\n grads['db' + str(i)] = dbi\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n \n n = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for k in range(n):\n parameters[\"W\" + str(k+1)] = parameters[\"W\" + str(k+1)] - learning_rate * grads[\"dW\" + str(k+1)]\n parameters[\"b\" + str(k+1)] = parameters[\"b\" + str(k+1)] - learning_rate * grads[\"db\" + str(k+1)]\n \n return parameters\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a n-layer neural network.\n \n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n \n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n \n m = X.shape[1]\n p = np.zeros((1,m), dtype = np.int)\n \n # Forward propagation\n a3, caches = forward_propagation(X, parameters)\n \n # convert probas to 0/1 predictions\n for i in range(0, a3.shape[1]):\n if a3[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n\n # print results\n\n #print (\"predictions: \" + str(p[0,:]))\n #print (\"true labels: \" + str(y[0,:]))\n print(\"Accuracy: \" + str(np.mean((p[0,:] == y[0,:]))))\n \n return p\n\ndef compute_cost(a3, Y):\n \"\"\"\n Implement the cost function\n \n Arguments:\n a3 -- post-activation, output of forward propagation\n Y -- \"true\" labels vector, same shape as a3\n \n Returns:\n cost - value of the cost function\n \"\"\"\n m = Y.shape[1]\n \n logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)\n cost = 1./m * np.nansum(logprobs)\n \n return cost\n\ndef load_dataset():\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\n test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n \n train_set_x = train_set_x_orig/255\n test_set_x = test_set_x_orig/255\n\n return train_set_x, train_set_y, test_set_x, test_set_y, classes\n\n\ndef predict_dec(parameters, X):\n \"\"\"\n Used for plotting decision boundary.\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (m, K)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Predict using forward propagation and a classification threshold of 0.5\n a3, cache = forward_propagation(X, parameters)\n predictions = (a3>0.5)\n return predictions\n\ndef load_planar_dataset(randomness, seed):\n \n np.random.seed(seed)\n \n m = 50\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 2 # maximum ray of the flower\n\n for j in range(2):\n \n ix = range(N*j,N*(j+1))\n if j == 0:\n t = np.linspace(j, 4*3.1415*(j+1),N) #+ np.random.randn(N)*randomness # theta\n r = 0.3*np.square(t) + np.random.randn(N)*randomness # radius\n if j == 1:\n t = np.linspace(j, 2*3.1415*(j+1),N) #+ np.random.randn(N)*randomness # theta\n r = 0.2*np.square(t) + np.random.randn(N)*randomness # radius\n \n X[ix] = np.c_[r*np.cos(t), r*np.sin(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=np.squeeze(y), cmap=plt.cm.Spectral)\n plt.show()\n \ndef load_2D_dataset():\n data = scipy.io.loadmat('datasets/data.mat')\n train_X = data['X'].T\n train_Y = data['y'].T\n test_X = data['Xval'].T\n test_Y = data['yval'].T\n\n plt.scatter(train_X[0, :], train_X[1, :], c=np.squeeze(train_Y), s=40, cmap=plt.cm.Spectral);\n \n return train_X, train_Y, test_X, test_Y" ]
[ [ "numpy.dot", "matplotlib.pyplot.contourf", "numpy.sqrt", "numpy.linspace", "numpy.squeeze", "numpy.random.randn", "numpy.mean", "numpy.exp", "numpy.square", "numpy.arange", "numpy.sin", "numpy.nansum", "numpy.zeros", "numpy.log", "numpy.int64", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.maximum", "numpy.random.seed", "numpy.cos", "matplotlib.pyplot.xlabel" ] ]
JakubSakowski/CMI2020
[ "ebe3388544f690a20fab7d926364b67e7f441044" ]
[ "dziadekGnom/main.py" ]
[ "import sys\nimport pandas as pd \n\n# reading the data\ndf = pd.read_csv(sys.stdin, skiprows = 1, names = ['x', 'y'], sep = \" \")\n\n# printing the data\n# print(df)\n\n# finding min and max values\nxmin = df['x'].min()\nxmax = df['x'].max()\nymin = df['y'].min()\nymax = df['y'].max()\n# print (xmin, xmax, ymin, ymax)\n\n# calculating the perimeter of the fence\\\na = xmax - xmin + 4 \nb = ymax - ymin + 4\nfencePerimeter = (2 * a + 2 * b)\n\n# printing the perimeter of the fence\nprint(fencePerimeter)" ]
[ [ "pandas.read_csv" ] ]
RobRomijnders/bbvi
[ "613c4c9ba79f0b40488fe1d18a0b7f3c023b639f" ]
[ "bbvi/util.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom sklearn.datasets import load_wine\nfrom sklearn.model_selection import train_test_split\nfrom scipy.stats import multivariate_normal\n\n\nclass DataLoader:\n \"\"\"\n Small wrapper to abstract all code relating to loading data\n \"\"\"\n def __init__(self, batch_size=16):\n # Wine data set\n dataset = load_wine()\n\n # For now, just create a binary classification problem\n selection = dataset.target < 2\n\n X, y = dataset.data[selection], dataset.target[selection]\n y = self.random_flip(y, 0.1)\n\n # Dummy data set\n # Uncomment these lines to create a dummy data sets of two highly separable point clouds\n # num_feat = 8\n # num_half = 500\n # X1 = multivariate_normal(5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half)\n # X2 = multivariate_normal(-5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half)\n #\n # X = np.concatenate((X1, X2), axis=0)\n # y = np.concatenate((np.zeros((num_half)), np.ones((num_half))), axis=0)\n\n self.data = dict()\n self.data['X_train'], self.data['X_test'], self.data['y_train'], self.data['y_test'] = train_test_split(X, y)\n\n self.mean, self.std = None, None\n # self._normalize_data()\n\n self.batch_size = batch_size\n\n @property\n def num_features(self):\n return self.data['X_train'].shape[1]\n\n @staticmethod\n def random_flip(data, portion):\n \"\"\"\n Randomly flip a portion of the binary labels. To spice up the problem a bit :)\n :param data:\n :param portion:\n :return:\n \"\"\"\n # Establish the sizes\n num_samples = len(data)\n num_flip = int(num_samples * portion)\n\n # Select random indices to flip\n idx = np.random.choice(num_samples, num_flip, replace=False)\n\n # Do the flipping\n data[idx] = (data[idx] - 1/2) * -1 + 1/2\n return data\n\n def _normalize_data(self):\n # Calculate the first and second moment from the train data\n self.mean = np.mean(self.data['X_train'], axis=0)\n self.std = np.std(self.data['X_train'], axis=0)\n\n # Standardize the training data\n self.data['X_train'] -= self.mean\n self.data['X_train'] /= self.std\n\n # Standardize the test data\n self.data['X_test'] -= self.mean\n self.data['X_test'] /= self.std\n\n def sample_batch(self, data_split='train'):\n # Sample from batch\n datasplit_size = len(self.data['y_' + data_split])\n idx = np.random.choice(datasplit_size, self.batch_size, replace=False)\n\n return self.data['X_' + data_split][idx], self.data['y_' + data_split][idx]\n\n\ndef get_random_normal_variable(name, shape, dtype=tf.float32, num_samples=13):\n \"\"\"\n Create weight tensors with factorized Gaussian approximation of each element.\n\n Define the standard deviation behind a softplus to enforce positivity\n\n\n Credits for code inspiration: https://github.com/DeNeutoy/bayesian-rnn/\n :param name: Name for the corresponding tf variables\n :param shape: shape for the variable. Note that weights are sampled and thus have +1 dimension\n :param dtype: dtype for the variables involved\n :param num_samples: number of samples from the variational distro over W\n :return:\n \"\"\"\n\n # Inverse of a softplus function, so that the value of the standard deviation\n # will be equal to what the user specifies, but we can still enforce positivity\n # by wrapping the standard deviation in the softplus function.\n # standard_dev = tf.log(tf.exp(standard_dev) - 1.0) * tf.ones(shape)\n\n # it's important to initialize variances with care, otherwise the model takes too long to converge\n sigma_min = 1-1/10\n sigma_max = 1+1/10\n\n rho_max_init = tf.log(tf.exp(sigma_max) - 1.0)\n rho_min_init = tf.log(tf.exp(sigma_min) - 1.0)\n std_init = tf.random_uniform_initializer(rho_min_init, rho_max_init)\n\n # Initialize the mean\n mean = tf.get_variable(name + \"_mean\", shape, dtype=dtype)\n\n # Initialize the standard deviation\n pre_sigma = tf.get_variable(name + \"_standard_deviation\",\n shape,\n initializer=std_init,\n dtype=dtype)\n\n standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5\n\n # The famous reparametrization formula for the factorized Gaussian\n noise = tf.random_normal([num_samples] + shape, 0.0, 1.0, dtype)\n weights = mean + standard_deviation * noise\n\n return weights, mean, standard_deviation, pre_sigma, noise" ]
[ [ "tensorflow.get_variable", "tensorflow.random_uniform_initializer", "numpy.random.choice", "sklearn.datasets.load_wine", "sklearn.model_selection.train_test_split", "tensorflow.exp", "numpy.std", "numpy.mean", "tensorflow.nn.softplus", "tensorflow.random_normal" ] ]
coreyauger/py_deep_learning_udemy
[ "39c55d3c59a9bcb39f12bc255d76fd5b52a53e34" ]
[ "data.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('./data/titanic-train.csv')\nprint(type(df))\nprint(df.head())\nprint(df.info())\nprint(df.describe())\n\n#Indexing\nprint(df.iloc[3])\nprint(df.loc[0:4,'Ticket'])\nprint(df['Ticket'].head())\nprint(df[['Embarked', 'Ticket']].head())\n\n#Selections\nprint(df[df['Age'] > 70])\nprint(df['Age'] > 70)\nprint(df.query(\"Age > 70\"))\nprint(df[(df['Age'] == 11) & (df['SibSp'] == 5)])\nprint(df[(df.Age == 11) | (df.SibSp == 5)])\nprint(df.query('(Age == 11) | (SibSp == 5)'))" ]
[ [ "pandas.read_csv" ] ]
borgwang/tinynn-autograd
[ "daf914614236e03b0884a77a5d5df5f033f1d3c4" ]
[ "core/layers.py" ]
[ "\"\"\"Network layers and activation layers.\"\"\"\n\nimport numpy as np\n\nimport core.ops as ops\nfrom core.initializer import XavierUniformInit\nfrom core.initializer import ZerosInit\n\n\nclass Layer(object):\n\n def __init__(self, name):\n self.name = name\n\n self.params, self.grads = {}, {}\n self.is_training = True\n\n def forward(self, inputs):\n raise NotImplementedError\n\n def set_phase(self, phase):\n self.is_training = True if phase == \"TRAIN\" else False\n\n\nclass Dense(Layer):\n\n def __init__(self,\n num_out,\n num_in=None,\n w_init=XavierUniformInit(),\n b_init=ZerosInit()):\n super().__init__(\"Linear\")\n self.initializers = {\"w\": w_init, \"b\": b_init}\n self.shapes = {\"w\": [num_in, num_out], \"b\": [1, num_out]}\n self.params = {\"w\": None, \"b\": None}\n\n self.is_init = False\n if num_in is not None:\n self._init_parameters(num_in)\n\n self.inputs = None\n\n def forward(self, inputs):\n # lazy initialize\n if not self.is_init:\n self._init_parameters(inputs.shape[1])\n\n self.inputs = inputs\n return inputs @ self.params[\"w\"] + self.params[\"b\"]\n\n def _init_parameters(self, input_size):\n self.shapes[\"w\"][0] = input_size\n self.params[\"w\"] = self.initializers[\"w\"](shape=self.shapes[\"w\"])\n self.params[\"w\"].zero_grad()\n self.params[\"b\"] = self.initializers[\"b\"](shape=self.shapes[\"b\"])\n self.params[\"b\"].zero_grad()\n self.is_init = True\n\n\nclass Activation(Layer):\n\n def __init__(self, name):\n super().__init__(name)\n self.inputs = None\n\n def forward(self, inputs):\n self.inputs = inputs\n return self.func(inputs)\n\n def func(self, x):\n raise NotImplementedError\n\n\nclass Sigmoid(Activation):\n\n def __init__(self):\n super().__init__(\"Sigmoid\")\n\n def func(self, x):\n return 1.0 / (1.0 + np.exp(-x))\n\n\nclass Tanh(Activation):\n\n def __init__(self):\n super().__init__(\"Tanh\")\n\n def func(self, x):\n return (1.0 - ops.exp(-x)) / (1.0 + ops.exp(-x))\n\n\nclass ReLU(Activation):\n\n def __init__(self):\n super().__init__(\"ReLU\")\n\n def func(self, x):\n return ops.clip(x, 0.0)\n" ]
[ [ "numpy.exp" ] ]
cdeil/pyregion
[ "83e235f51b41d327cdedf585f255a00af9f46c6d" ]
[ "doc/figures/test_region_drawing2.py" ]
[ "import matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nimport pyregion\nimport pyfits\n\n# read in the image\nxray_name=\"pspc_skyview.fits\"\nf_xray = pyfits.open(xray_name)\n\ntry:\n import pywcsgrid2\n ax=pywcsgrid2.subplot(111, header=f_xray[0].header)\nexcept ImportError:\n ax=plt.subplot(111)\n\nax.imshow(f_xray[0].data, cmap=cm.gray, vmin=0., vmax=0.00038, origin=\"lower\")\n\nreg_name = \"test.reg\"\nr = pyregion.open(reg_name).as_imagecoord(header=f_xray[0].header)\n\nfrom pyregion.mpl_helper import properties_func_default\n\n# Use custom function for patch attribute\ndef fixed_color(shape, saved_attrs):\n\n attr_list, attr_dict = saved_attrs\n attr_dict[\"color\"] = \"red\"\n kwargs = properties_func_default(shape, (attr_list, attr_dict))\n\n return kwargs\n\n# select region shape with tag==\"Group 1\"\nr1 = pyregion.ShapeList([rr for rr in r if rr.attr[1].get(\"tag\") == \"Group 1\"])\npatch_list1, artist_list1 = r1.get_mpl_patches_texts(fixed_color)\n\nr2 = pyregion.ShapeList([rr for rr in r if rr.attr[1].get(\"tag\") != \"Group 1\"])\npatch_list2, artist_list2 = r2.get_mpl_patches_texts()\n\nfor p in patch_list1 + patch_list2:\n ax.add_patch(p)\nfor t in artist_list1 + artist_list2:\n ax.add_artist(t)\n\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
jason718/auto-weights
[ "f9f9b4ecf3609dbde759203fa653dee9f2368a6e" ]
[ "libml/gradients_util.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Paper Authors.\n#\n# =======================================================================\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements the graph generation for computation of gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport warnings\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops.parallel_for import control_flow_ops as cfp\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function as framework_function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework.func_graph import FuncGraph\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_state\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Warn the user if we convert a sparse representation to dense with at\n# least this number of elements.\n_LARGE_SPARSE_NUM_ELEMENTS = 100000000\n\n\ndef _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):\n \"\"\"Converts an IndexedSlices object `value` to a Tensor.\n\n NOTE(mrry): This function is potentially expensive.\n\n Args:\n value: An ops.IndexedSlices object.\n dtype: The dtype of the Tensor to be returned.\n name: Optional name to use for the returned Tensor.\n as_ref: True if a ref is requested.\n\n Returns:\n A dense Tensor representing the values in the given IndexedSlices.\n\n Raises:\n ValueError: If the IndexedSlices does not have the same dtype.\n \"\"\"\n _ = as_ref\n if dtype and not dtype.is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for IndexedSlices with dtype %s\" %\n (dtype.name, value.dtype.name))\n if value.dense_shape is None:\n raise ValueError(\n \"Tensor conversion requested for IndexedSlices without dense_shape: %s\"\n % str(value))\n # TODO(mrry): Consider adding static shape information to\n # IndexedSlices, to avoid using numpy here.\n if not context.executing_eagerly():\n dense_shape_value = tensor_util.constant_value(value.dense_shape)\n if dense_shape_value is not None:\n num_elements = np.prod(dense_shape_value)\n if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:\n warnings.warn(\n \"Converting sparse IndexedSlices to a dense Tensor with %d \"\n \"elements. This may consume a large amount of memory.\" %\n num_elements)\n else:\n warnings.warn(\n \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n \"This may consume a large amount of memory.\")\n return math_ops.unsorted_segment_sum(\n value.values, value.indices, value.dense_shape[0], name=name)\n\n\nops.register_tensor_conversion_function(ops.IndexedSlices,\n _IndexedSlicesToTensor)\n\n\ndef _MarkReachedOps(from_ops, reached_ops, func_graphs):\n \"\"\"Mark all ops reached from \"from_ops\".\n\n Args:\n from_ops: list of Operations.\n reached_ops: set of Operations.\n func_graphs: list of FuncGraphs. This method will traverse through\n these functions if they capture from_ops or any reachable ops.\n \"\"\"\n queue = collections.deque()\n queue.extend(from_ops)\n while queue:\n op = queue.popleft()\n if op not in reached_ops:\n reached_ops.add(op)\n for output in op.outputs:\n if _IsBackpropagatable(output):\n queue.extend(_Consumers(output, func_graphs))\n\n\ndef _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,\n xs_set):\n \"\"\"Initialize the pending count for ops between two lists of Operations.\n 'pending_count[op]' indicates the number of backprop inputs\n to this operation.\n Args:\n to_ops: list of Operations.\n from_ops: list of Operations.\n colocate_gradients_with_ops: Python bool. See docstring of gradients().\n func_graphs: list of FuncGraphs. This method will traverse through\n these functions if they capture from_ops or any reachable ops. This is\n useful if to_ops occur in a function and from_ops are in an outer function\n or graph.\n xs_set: ObjectIdentitySet of Tensors.\n Returns:\n A tuple containing: (1) the subset of to_ops reachable from from_ops by a\n path of zero or more backpropagatable tensors, (2) a mapping from operation\n to the number of backprop inputs to that op, and (3) a ControlFlowState\n object which is not None if the ops between from_ops and to_ops contain\n control flow loops.\n \"\"\"\n # Mark reachable ops from from_ops.\n reached_ops = set()\n _MarkReachedOps(from_ops, reached_ops, func_graphs)\n # X in reached_ops iff X is reachable from from_ops by a path of zero or more\n # backpropagatable tensors.\n\n reachable_to_ops = set(op for op in to_ops if op in reached_ops)\n\n # Mark between ops.\n between_ops = set()\n between_op_list = []\n queue = collections.deque()\n queue.extend(to_ops)\n while queue:\n op = queue.popleft()\n # We are interested in this op.\n if op in reached_ops:\n between_ops.add(op)\n between_op_list.append(op)\n # Clear the boolean so we won't add the inputs again.\n reached_ops.remove(op)\n for inp in _NonEagerInputs(op, xs_set):\n queue.append(inp.op)\n # X in between_ops iff X is on a path of zero or more backpropagatable tensors\n # between from_ops and to_ops\n\n # 'loop_state' is None if there are no while loops.\n loop_state = control_flow_state.MaybeCreateControlFlowState(\n between_op_list, between_ops, colocate_gradients_with_ops)\n\n # Initialize pending count for between ops.\n pending_count = collections.defaultdict(int)\n for op in between_op_list:\n for x in _NonEagerInputs(op, xs_set):\n if x.op in between_ops:\n pending_count[x.op] += 1\n\n return reachable_to_ops, pending_count, loop_state\n\n\ndef _AsList(x):\n return x if isinstance(x, (list, tuple)) else [x]\n\n\ndef _DefaultGradYs(grad_ys,\n ys,\n colocate_gradients_with_ops,\n gradient_uid=\"__unsupported__\"):\n \"\"\"Fill in default values for grad_ys.\n\n Args:\n grad_ys: List of gradients, can contain None.\n ys: List of tensors.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n gradient_uid: A unique identifier within the graph indicating\n which invocation of gradients is being executed. Used to cluster\n ops for compilation.\n\n Returns:\n A list of gradients to use, without None.\n\n Raises:\n ValueError: If sizes of gradients and inputs don't match\n TypeError: If type of any gradient is not valid for its input.\n \"\"\"\n if len(grad_ys) != len(ys):\n raise ValueError(\"Passed %d grad_ys for %d ys\" % (len(grad_ys), len(ys)))\n grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name=\"grad_y\")\n new_grad_ys = []\n for i in xrange(len(grad_ys)):\n grad_y = grad_ys[i]\n y = ys[i]\n with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):\n if grad_y is None:\n if y.dtype.is_complex:\n raise TypeError(\n \"Gradients of complex tensors must set grad_ys (y.dtype = %r)\" %\n y.dtype)\n new_grad_ys.append(\n array_ops.fill(\n array_ops.shape(y),\n constant_op.constant(1, dtype=y.dtype, name=\"grad_ys_%d\" % i)))\n continue\n if y.dtype.is_floating or y.dtype.is_integer:\n if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:\n raise TypeError(\n \"Gradient type %s generated for real or \"\n \"integer-valued tensor %s with type %s must be \"\n \"real or integer\" % (dtypes.as_dtype(grad_y.dtype).name, y,\n dtypes.as_dtype(y.dtype).name))\n elif y.dtype.is_complex:\n if not grad_y.dtype.is_complex:\n raise TypeError(\n \"Gradient type %s generated for complex-valued \"\n \"tensor %s with type %s must be real\" % (dtypes.as_dtype(\n grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))\n elif y.dtype == dtypes.variant:\n if grad_y.dtype != dtypes.variant:\n raise TypeError(\n \"Gradient type %s generated for variant \"\n \"tensor %s with type %s must be variant\" % (dtypes.as_dtype(\n grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))\n elif y.dtype == dtypes.resource:\n # We assume y is the handle of a ResourceVariable. The gradient of a\n # ResourceVariable should be a numeric value, not another resource.\n if grad_y.dtype == dtypes.resource:\n raise TypeError(\"Input gradient %s for resource tensor %s should not \"\n \"be a resource\" % (grad_y, y))\n else:\n raise TypeError(\n \"Tensor %s with type %s must be numeric \"\n \"to obtain a default gradient\" % (y, dtypes.as_dtype(y.dtype).name))\n # Create a grad_y tensor in the name scope of the gradient.\n # Required for TensorArrays to identify which gradient call a\n # grad_y value is coming from.\n if isinstance(grad_y, ops.IndexedSlices):\n new_grad_ys.append(\n ops.IndexedSlices(\n indices=(array_ops.identity(\n grad_y.indices, name=\"grad_ys_%d_indices\" % i)\n if isinstance(grad_y.indices, ops.Tensor) else\n grad_y.indices),\n values=(array_ops.identity(\n grad_y.values, name=\"grad_ys_%d_values\" % i) if isinstance(\n grad_y.values, ops.Tensor) else grad_y.values),\n dense_shape=(array_ops.identity(\n grad_y.dense_shape, name=\"grad_ys_%d_shape\" % i)\n if isinstance(grad_y.dense_shape, ops.Tensor) else\n grad_y.dense_shape)))\n else:\n new_grad_ys.append(array_ops.identity(grad_y, name=\"grad_ys_%d\" % i))\n\n return new_grad_ys\n\n\ndef IsTrainable(tensor_or_dtype):\n if isinstance(tensor_or_dtype, ops.Tensor):\n dtype = tensor_or_dtype.dtype\n else:\n dtype = tensor_or_dtype\n dtype = dtypes.as_dtype(dtype)\n return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128,\n dtypes.resource, dtypes.variant)\n\n\ndef _IsBackpropagatable(tensor):\n if IsTrainable(tensor):\n return True\n dtype = dtypes.as_dtype(tensor.dtype)\n return dtype.base_dtype == dtypes.bfloat16\n\n\ndef _VerifyGeneratedGradients(grads, op):\n \"\"\"Verify that gradients are valid in number and type.\n\n Args:\n grads: List of generated gradients.\n op: Operation for which the gradients where generated.\n\n Raises:\n ValueError: if sizes of gradients and inputs don't match.\n TypeError: if type of any gradient is not valid for its input.\n \"\"\"\n # While ops have inputs added to them during the gradient computation, so we\n # skip the below check. See while_v2 for details.\n if op.type == \"While\": return\n\n if len(grads) != len(op.inputs):\n raise ValueError(\"Num gradients %d generated for op %s do not match num \"\n \"inputs %d\" % (len(grads), op.node_def, len(op.inputs)))\n\n\ndef _StopOps(from_ops, stop_gradient_ops, pending_count, xs):\n \"\"\"The set of ops that terminate the gradient computation.\n\n This computes the frontier of the forward graph *before* which backprop\n should stop. Operations in the returned set will not be differentiated.\n This set is defined as the subset of `from_ops` containing ops that have\n no predecessor in `from_ops`. `pending_count` is the result of\n `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`\n iff pending_count[op] > 0.\n\n In addition, none of `stop_gradient_ops` will be differentiated.\n\n Args:\n from_ops: list of Operations.\n stop_gradient_ops: list of Operations never to backprop through.\n pending_count: mapping from operation to number of backprop inputs.\n xs: list of Tensors.\n\n Returns:\n The set of operations.\n \"\"\"\n stop_ops = set()\n for op in from_ops:\n is_stop_op = True\n for inp in _NonEagerInputs(op, xs):\n if pending_count[inp.op] > 0:\n is_stop_op = False\n break\n if is_stop_op:\n stop_ops.add(op)\n stop_ops.update(op for op in stop_gradient_ops)\n return stop_ops\n\n\n@contextlib.contextmanager\ndef _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name\n \"\"\"Context to colocate with `op` if `colocate_gradients_with_ops`.\"\"\"\n if colocate_gradients_with_ops:\n with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access\n yield\n else:\n yield\n\n\ndef _IsPartitionedCall(op):\n return op.type == \"PartitionedCall\" or op.type == \"StatefulPartitionedCall\"\n\n\ndef _SymGrad(op, out_grads):\n \"\"\"Backprop through a function call node op given its outputs' gradients.\"\"\"\n f_in = [x for x in op.inputs] + out_grads\n f_types = [x.dtype for x in op.inputs]\n f = attr_value_pb2.NameAttrList()\n if _IsPartitionedCall(op):\n f.name = op.get_attr(\"f\").name\n else:\n f.name = op.type\n for k in op.node_def.attr:\n f.attr[k].CopyFrom(op.node_def.attr[k])\n # TODO(apassos) use a better dtype here\n in_grads = functional_ops.symbolic_gradient(\n input=f_in,\n Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],\n f=f)\n return in_grads\n\n\ndef _MaybeCompile(scope, op, func, grad_fn):\n \"\"\"Compile the calculation in grad_fn if op was marked as compiled.\"\"\"\n scope = scope.rstrip(\"/\").replace(\"/\", \"_\")\n if func is not None:\n xla_compile = func.definition.attr[\"_XlaCompile\"].b\n xla_separate_compiled_gradients = func.definition.attr[\n \"_XlaSeparateCompiledGradients\"].b\n xla_scope = func.definition.attr[\"_XlaScope\"].s.decode()\n else:\n try:\n xla_compile = op.get_attr(\"_XlaCompile\")\n xla_separate_compiled_gradients = op.get_attr(\n \"_XlaSeparateCompiledGradients\")\n xla_scope = op.get_attr(\"_XlaScope\").decode()\n except ValueError:\n return grad_fn() # Exit early\n\n if not xla_compile:\n return grad_fn() # Exit early\n\n # If the gradients are supposed to be compiled separately, we give them a\n # _XlaScope name that is based on the name_scope of the gradients. Otherwise\n # they just inherit the existing _XlaScope name, which lets them be merged\n # together with the non-gradient computation.\n if xla_separate_compiled_gradients:\n xla_grad_scope = \"%s_grad_%s\" % (xla_scope, scope)\n else:\n xla_grad_scope = xla_scope\n\n attrs = {\n \"_XlaCompile\": attr_value_pb2.AttrValue(b=xla_compile),\n \"_XlaScope\": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())\n }\n with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access\n return grad_fn()\n\n\ndef _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs):\n \"\"\"Raises an error if we backprop through a loop var.\"\"\"\n # Find the nearest 'to_op' reachable from 'op' to provide a more helpful error\n # message.\n target_op = None\n queue = collections.deque([op])\n visited = set()\n while queue:\n curr_op = queue.popleft()\n if curr_op in visited: continue\n visited.add(curr_op)\n if curr_op in from_ops:\n target_op = curr_op\n break\n queue.extend(t.op for t in _NonEagerInputs(curr_op, xs))\n assert target_op\n raise ValueError(\n \"Cannot compute gradient inside while loop with respect to op '%s'. \"\n \"We do not support taking the gradient wrt or through the initial value \"\n \"of a loop variable. Gradients can be computed through loop invariants \"\n \"or wrt the input parameters to the loop body.\"\n % target_op.name)\n\n\ndef _IsFunction(graph):\n return (isinstance(graph, FuncGraph) or\n isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access\n\n\ndef _Captures(func_graph):\n if isinstance(func_graph, FuncGraph):\n return func_graph.captures\n else:\n assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access\n return func_graph._captured # pylint: disable=protected-access\n\n\ndef _MaybeCaptured(t):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n t: Tensor\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n # pylint: disable=protected-access\n if (not isinstance(t, ops.EagerTensor) and\n _IsFunction(t.op.graph) and t.op.type == \"Placeholder\"):\n for input_t, placeholder_t in _Captures(t.op.graph).items():\n if t == placeholder_t:\n return _MaybeCaptured(input_t)\n # pylint: enable=protected-access\n return t\n\n\ndef _NonEagerInputs(op, xs):\n \"\"\"Returns the inputs of op, crossing closure boundaries where necessary.\n\n Does not return any captured EagerTensors, i.e., the number of tensors\n returned may be less than than the actual number of inputs.\n\n Args:\n op: Operation\n xs: list of Tensors we are differentiating w.r.t.\n\n Returns:\n A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\n is in a FuncGraph and has captured inputs.\n \"\"\"\n return [t for t in _Inputs(op, xs) if not isinstance(t, ops.EagerTensor)]\n\n\n# TODO(skyewm): plumbing xs through everywhere is ugly, consider making\n# _GradientsHelper a class with xs as a member variable.\ndef _Inputs(op, xs):\n \"\"\"Returns the inputs of op, crossing closure boundaries where necessary.\n\n Args:\n op: Operation\n xs: list of Tensors we are differentiating w.r.t.\n\n Returns:\n A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\n is in a FuncGraph and has captured inputs.\n \"\"\"\n if _IsFunction(op.graph): # pylint: disable=protected-access\n inputs = []\n for t in op.inputs:\n # If we're differentiating w.r.t. `t`, do not attempt to traverse through\n # it to a captured value. The algorithm needs to \"see\" `t` in this case,\n # even if it's a function input for a captured value, whereas usually we'd\n # like to traverse through these closures as if the captured value was the\n # direct input to op.\n if t not in xs:\n t = _MaybeCaptured(t)\n inputs.append(t)\n return inputs\n else:\n return op.inputs\n\n\ndef _Consumers(t, func_graphs):\n \"\"\"Returns the consumers of t, crossing closure boundaries where necessary.\n\n Args:\n t: Tensor\n func_graphs: a list of FuncGraphs that may have captured t.\n\n Returns:\n A list of tensors. The tensors will be from the current graph and/or\n func_graphs.\n \"\"\"\n consumers = t.consumers()\n for func in func_graphs:\n for input_t, placeholder in _Captures(func).items():\n if input_t == t:\n consumers.extend(_Consumers(placeholder, func_graphs))\n return consumers\n\n\ndef _GradientsHelper(ys,\n xs,\n grad_ys=None,\n name=\"gradients\",\n colocate_gradients_with_ops=False,\n gate_gradients=False,\n aggregation_method=None,\n stop_gradients=None,\n unconnected_gradients=UnconnectedGradients.NONE,\n src_graph=None):\n \"\"\"Implementation of gradients().\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.gradients is not supported when eager execution \"\n \"is enabled. Use tf.GradientTape instead.\")\n if src_graph is None:\n src_graph = ops.get_default_graph()\n try:\n unconnected_gradients = UnconnectedGradients(unconnected_gradients)\n except ValueError:\n raise ValueError(\n \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\n # If src_graph is a _FuncGraph (i.e. a function body), gather it and all\n # ancestor graphs. This is necessary for correctly handling captured values.\n func_graphs = []\n curr_graph = src_graph\n while _IsFunction(curr_graph):\n func_graphs.append(curr_graph)\n if isinstance(curr_graph, FuncGraph):\n curr_graph = curr_graph.outer_graph\n else:\n assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access\n curr_graph = curr_graph._outer_graph # pylint: disable=protected-access\n\n ys = _AsList(ys)\n xs = _AsList(xs)\n stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)\n if grad_ys is None:\n grad_ys = [None] * len(ys)\n else:\n grad_ys = _AsList(grad_ys)\n\n with ops.name_scope(\n name, \"gradients\",\n list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:\n # Get a uid for this call to gradients that can be used to help\n # cluster ops for compilation.\n gradient_uid = ops.get_default_graph().unique_name(\"uid\")\n ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name=\"y\")\n xs = [\n x.handle if resource_variable_ops.is_resource_variable(x) else x\n for x in xs\n ]\n xs = ops.internal_convert_n_to_tensor_or_indexed_slices(\n xs, name=\"x\", as_ref=True)\n grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,\n gradient_uid)\n\n # The approach we take here is as follows: Create a list of all ops in the\n # subgraph between the ys and xs. Visit these ops in reverse order of ids\n # to ensure that when we visit an op the gradients w.r.t its outputs have\n # been collected. Then aggregate these gradients if needed, call the op's\n # gradient function, and add the generated gradients to the gradients for\n # its input.\n\n # Initialize the pending count for ops in the connected subgraph from ys\n # to the xs.\n to_ops = [t.op for t in ys]\n from_ops = [t.op for t in xs]\n stop_gradient_ops = [t.op for t in stop_gradients]\n reachable_to_ops, pending_count, loop_state = _PendingCount(\n to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs)\n\n # Iterate over the collected ops.\n #\n # grads: op => list of gradients received on each output endpoint of the\n # op. The gradients for each endpoint are initially collected as a list.\n # When it is time to call the op's gradient function, for each endpoint we\n # aggregate the list of received gradients into a Add() Operation if there\n # is more than one.\n grads = {}\n out_dict = {}\n # Add the initial gradients for the ys.\n for y, grad_y in zip(ys, grad_ys):\n _SetGrad(grads, y, grad_y)\n\n # Initialize queue with to_ops.\n queue = collections.deque()\n # Add the ops in 'to_ops' into the queue.\n to_ops_set = set()\n for op in to_ops:\n # 'ready' handles the case where one output gradient relies on\n # another output's gradient.\n ready = (pending_count[op] == 0)\n if ready and op not in to_ops_set and op in reachable_to_ops:\n to_ops_set.add(op)\n queue.append(op)\n\n if loop_state:\n loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)\n for y in loop_exits:\n if IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs)\n xs_ops = [x.op for x in xs]\n while queue:\n # generate gradient subgraph for op.\n op = queue.popleft()\n # if op in xs_ops: \n # return {op: op}, _GetGrads(grads, op)\n with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=True)\n out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,\n aggregation_method)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=True)\n grad_fn = None\n func_call = None\n is_partitioned_call = _IsPartitionedCall(op)\n # pylint: disable=protected-access\n is_func_call = (\n src_graph._is_function(op.type) or is_partitioned_call)\n # pylint: enable=protected-access\n has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)\n if has_out_grads and (op not in stop_ops):\n try:\n grad_fn = ops.get_gradient_function(op)\n except LookupError:\n if is_func_call:\n if is_partitioned_call:\n func_call = src_graph._get_function( # pylint: disable=protected-access\n compat.as_bytes(op.get_attr(\"f\").name))\n else:\n func_call = src_graph._get_function(op.type) # pylint: disable=protected-access\n # Note that __defun is not set if the graph is\n # imported. If it's set, we prefer to access the original\n # defun.\n func_call = getattr(op, \"__defun\", func_call)\n grad_fn = func_call.python_grad_func\n else:\n raise LookupError(\n \"No gradient defined for operation '%s' (op type: %s)\" %\n (op.name, op.type))\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=False)\n\n # NOTE(skyewm): We don't support computing gradients wrt a loop variable\n # unless it's within the context of a single iteration (i.e. the\n # gradient is wrt to the loop parameter in the body function, not wrt or\n # through the initial value). This means if we're in a while loop\n # context, we should never see a switch node from this context.\n # pylint: disable=protected-access\n if (control_flow_util.IsSwitch(op) and\n op._control_flow_context is not None and\n op._control_flow_context.IsWhileContext() and\n op._control_flow_context ==\n ops.get_default_graph()._get_control_flow_context()):\n _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs)\n # pylint: enable=protected-access\n\n if (grad_fn or is_func_call) and has_out_grads:\n # NOTE: If _AggregatedGrads didn't compute a value for the i'th\n # output, it means that the cost does not depend on output[i],\n # therefore dC/doutput[i] is 0.\n for i, out_grad in enumerate(out_grads):\n if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (\n (not grad_fn and is_func_call) or IsTrainable(op.outputs[i])):\n # Only trainable outputs or outputs for a function call that\n # will use SymbolicGradient get a zero gradient. Gradient\n # functions should ignore the gradient for other outputs.\n # TODO(apassos) gradients of resource handles might be an\n # issue here because of zeros.\n if loop_state:\n out_grads[i] = loop_state.ZerosLike(op, i)\n else:\n out_grads[i] = control_flow_state.ZerosLikeOutsideLoop(op, i)\n with ops.name_scope(op.name + \"_grad\"):\n # pylint: disable=protected-access\n with src_graph._original_op(op):\n # pylint: enable=protected-access\n if grad_fn:\n # If grad_fn was found, do not use SymbolicGradient even for\n # functions.\n in_grads = _MaybeCompile(grad_scope, op, func_call,\n lambda: grad_fn(op, *out_grads))\n out_dict = PerExampleGradsForParameters(op, out_grads, out_dict)\n else:\n # For function call ops, we add a 'SymbolicGradient'\n # node to the graph to compute gradients.\n in_grads = _MaybeCompile(grad_scope, op, func_call,\n lambda: _SymGrad(op, out_grads))\n in_grads = _AsList(in_grads)\n _VerifyGeneratedGradients(in_grads, op)\n if gate_gradients and len([x for x in in_grads\n if x is not None]) > 1:\n with ops.device(None):\n with ops._colocate_with_for_gradient( # pylint: disable=protected-access\n None,\n gradient_uid,\n ignore_existing=True):\n in_grads = control_flow_ops.tuple(in_grads)\n _LogOpGradients(op, out_grads, in_grads)\n else:\n # If no grad_fn is defined or none of out_grads is available,\n # just propagate a list of None backwards.\n in_grads = [None] * len(_Inputs(op, xs))\n # Note: we don't filter out eager inputs here because the inputs need to\n # line up with in_grads.\n for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs), in_grads)):\n if in_grad is not None:\n if (isinstance(in_grad, ops.Tensor) and\n t_in.dtype != dtypes.resource):\n try:\n in_grad.set_shape(t_in.get_shape())\n except ValueError:\n raise ValueError(\n \"Incompatible shapes between op input and calculated \"\n \"input gradient. Forward operation: %s. Input index: %d. \"\n \"Original input shape: %s. \"\n \"Calculated input gradient shape: %s\" %\n (op.name, i, t_in.shape, in_grad.shape))\n if not isinstance(t_in, ops.EagerTensor):\n _SetGrad(grads, t_in, in_grad)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=False)\n\n # Update pending count for the inputs of op and enqueue ready ops.\n _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,\n xs)\n if loop_state:\n loop_state.PostProcessing() \n\n # return [_GetGrad(grads, x, unconnected_gradients) for x in xs], out_dict\n return out_dict\n\n\ndef PerExampleGradsForParameters(op, out_grads, out_dict):\n if \"dense/BiasAdd\" in op.name:\n try:\n data_format = op.get_attr(\"data_format\")\n except ValueError:\n data_format = None\n def loop_fn(grad_y):\n return gen_nn_ops.bias_add_grad(\n tf.expand_dims(grad_y, 0),\n data_format=data_format)\n out_g = tf.dtypes.cast(out_grads[0], dtype=tf.float64)\n out_dict[op.inputs[1].name[:-6]+'grad'] = tf.vectorized_map(loop_fn, out_g)\n elif \"dense/MatMul\" in op.name:\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n a = tf.expand_dims(math_ops.conj(op.inputs[0]), 1)\n b = tf.expand_dims(out_grads[0], 1)\n if not t_a and not t_b:\n out_dict[op.inputs[1].name[:-6]+'grad'] = tf.linalg.matmul(a, b, transpose_a=True)\n elif not t_a and t_b:\n out_dict[op.inputs[1].name[:-6]+'grad'] = tf.linalg.matmul(b, a, transpose_a=True)\n elif t_a and not t_b:\n out_dict[op.inputs[1].name[:-6]+'grad'] = tf.linalg.matmul(a, b)\n elif t_a and t_b:\n out_dict[op.inputs[1].name[:-6]+'grad'] = tf.linalg.matmul(b, a, transpose_a=True, transpose_b=True)\n\n return out_dict\n\n\ndef _HasAnyNotNoneGrads(grads, op):\n \"\"\"Return true iff op has real gradient.\"\"\"\n out_grads = _GetGrads(grads, op)\n for out_grad in out_grads:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n return True\n if out_grad and isinstance(out_grad, collections.Sequence):\n if any(g is not None for g in out_grad):\n return True\n return False\n\n\ndef _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,\n xs):\n \"\"\"Update pending count for the inputs of op and enqueue ready ops.\"\"\"\n for x in _NonEagerInputs(op, xs):\n pending_count[x.op] -= 1\n ready = (pending_count[x.op] == 0)\n if loop_state and not ready:\n ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)\n if ready:\n if control_flow_util.IsLoopExit(x.op):\n # if x is an exit without real gradient, defer processing them.\n grad_state = loop_state.GetGradState(x.op, before=False)\n grad_state.deferred_exits.append(x)\n grad_state.pending_exits_count -= 1\n if grad_state.pending_exits_count == 0:\n # We now have all the exits so process them.\n has_not_none_grad = False\n for y in grad_state.deferred_exits:\n if _HasAnyNotNoneGrads(grads, y.op):\n has_not_none_grad = True\n queue.append(y.op)\n else:\n grad_state.unused_exits.append(y)\n if has_not_none_grad:\n # For an unused exit, if it has trainable outputs, backprop\n # a zero gradient. Otherwise, just ignore it.\n for y in grad_state.unused_exits:\n if IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n else:\n # All exits are \"unused\" so use None as gradient.\n for y in grad_state.unused_exits:\n queue.append(y.op)\n else:\n queue.append(x.op)\n\n\ndef _SetGrad(grads, t, grad):\n \"\"\"Sets gradient \"grad\" in \"grads\" for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n op_grads = [[] for _ in xrange(len(op.outputs))]\n grads[op] = op_grads\n t_grads = op_grads[t.value_index]\n if isinstance(t_grads, list):\n t_grads.append(grad)\n else:\n assert control_flow_util.IsLoopSwitch(op)\n op_grads[t.value_index] = grad\n\n\ndef _GetGrad(grads, t, unconnected_gradients):\n \"\"\"Gets gradient for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n if unconnected_gradients == UnconnectedGradients.ZERO:\n t_dtype = t.dtype if t.dtype != dtypes.resource else dtypes.float32\n return array_ops.zeros_like(t, dtype=t_dtype)\n elif unconnected_gradients == UnconnectedGradients.NONE:\n return None\n else:\n raise ValueError(\n \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\n t_grad = op_grads[t.value_index]\n assert not isinstance(\n t_grad, list), (\"gradients list should have been aggregated by now.\")\n return t_grad\n\n\ndef _GetGrads(grads, op):\n \"\"\"Gets all gradients for op.\"\"\"\n if op in grads:\n return grads[op]\n else:\n return [[] for _ in xrange(len(op.outputs))]\n\n\ndef _HandleNestedIndexedSlices(grad):\n assert isinstance(grad, ops.IndexedSlices)\n if isinstance(grad.values, ops.Tensor):\n return grad\n else:\n assert isinstance(grad.values, ops.IndexedSlices)\n g = _HandleNestedIndexedSlices(grad.values)\n return ops.IndexedSlices(g.values, array_ops.gather(\n grad.indices, g.indices), g.dense_shape)\n\n\ndef _AccumulatorShape(inputs):\n shape = tensor_shape.unknown_shape()\n for i in inputs:\n if isinstance(i, ops.Tensor):\n shape = shape.merge_with(i.get_shape())\n return shape\n\n\ndef _LogOpGradients(op, out_grads, in_grads):\n \"\"\"Log the in and out grads of an op.\"\"\"\n logging.vlog(1, \"Gradient for '\" + op.name + \"'\")\n\n def _FilterGrad(x):\n if x is None:\n return False\n if isinstance(x, (list, tuple)):\n return bool(x)\n else:\n return True\n\n logging.vlog(1, \" in --> %s\",\n \", \".join([x.name for x in out_grads if _FilterGrad(x)]))\n logging.vlog(1, \" out --> %s\",\n \", \".join([x.name for x in in_grads if _FilterGrad(x)]))\n\n\ndef _MultiDeviceAddN(tensor_list, gradient_uid):\n \"\"\"Adds tensors from potentially multiple devices.\"\"\"\n # Basic function structure comes from control_flow_ops.group().\n # Sort tensors according to their devices.\n tensors_on_device = collections.defaultdict(lambda: [])\n for tensor in tensor_list:\n tensors_on_device[tensor.device].append(tensor)\n\n # For each device, add the tensors on that device first.\n # Then gather the partial sums from multiple devices.\n # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.\n # E.g., aggregate per GPU, then per task, and so on.\n summands = []\n\n def DeviceKey(dev):\n return \"\" if dev is None else dev\n\n for dev in sorted(tensors_on_device, key=DeviceKey):\n tensors = tensors_on_device[dev]\n with ops._colocate_with_for_gradient( # pylint: disable=protected-access\n tensors[0].op,\n gradient_uid,\n ignore_existing=True):\n summands.append(math_ops.add_n(tensors))\n\n return math_ops.add_n(summands)\n\n\n@tf_export(\"AggregationMethod\")\nclass AggregationMethod(object):\n \"\"\"A class listing aggregation methods used to combine gradients.\n\n Computing partial derivatives can require aggregating gradient\n contributions. This class lists the various methods that can\n be used to combine gradients in the graph.\n\n The following aggregation methods are part of the stable API for\n aggregating gradients:\n\n * `ADD_N`: All of the gradient terms are summed as part of one\n operation using the \"AddN\" op (see `tf.add_n`). This \n method has the property that all gradients must be ready and \n buffered separately in memory before any aggregation is performed.\n * `DEFAULT`: The system-chosen default aggregation method.\n\n The following aggregation methods are experimental and may not \n be supported in future releases:\n\n * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using\n using the \"AddN\" op. This method of summing gradients may reduce \n performance, but it can improve memory utilization because the \n gradients can be released earlier.\n\n * `EXPERIMENTAL_ACCUMULATE_N`: Gradient terms are summed using the\n \"AccumulateN\" op (see `tf.accumulate_n`), which accumulates the \n overall sum in a single buffer that is shared across threads.\n This method of summing gradients can result in a lower memory footprint \n and lower latency at the expense of higher CPU/GPU utilization.\n For gradients of types that \"AccumulateN\" does not support, this\n summation method falls back on the behavior of `EXPERIMENTAL_TREE`\n \"\"\"\n ADD_N = 0\n DEFAULT = ADD_N\n # The following are experimental and may not be supported in future releases.\n EXPERIMENTAL_TREE = 1\n EXPERIMENTAL_ACCUMULATE_N = 2\n\n\ndef _AggregatedGrads(grads,\n op,\n gradient_uid,\n loop_state,\n aggregation_method=None):\n \"\"\"Get the aggregated gradients for op.\n\n Args:\n grads: The map of memoized gradients.\n op: The op to get gradients for.\n gradient_uid: A unique identifier within the graph indicating\n which invocation of gradients is being executed. Used to cluster\n ops for compilation.\n loop_state: An object for maintaining the state of the while loops in the\n graph. It is of type ControlFlowState. None if the graph\n contains no while loops.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n\n Returns:\n A list of gradients, one per each output of `op`. If the gradients\n for a particular output is a list, this function aggregates it\n before returning.\n\n Raises:\n TypeError: if the incoming grads are not Tensors or IndexedSlices.\n ValueError: if the arguments are invalid.\n\n \"\"\"\n if aggregation_method is None:\n aggregation_method = AggregationMethod.DEFAULT\n if aggregation_method not in [\n AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n raise ValueError(\n \"Invalid aggregation_method specified %s.\" % aggregation_method)\n out_grads = _GetGrads(grads, op)\n for i, out_grad in enumerate(out_grads):\n if loop_state:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n assert control_flow_util.IsLoopSwitch(op)\n continue\n # Grads have to be Tensors or IndexedSlices\n if (isinstance(out_grad, collections.Sequence) and not all(\n isinstance(g, (ops.Tensor, ops.IndexedSlices))\n for g in out_grad\n if g is not None\n )):\n raise TypeError(\"gradients have to be either all Tensors \"\n \"or all IndexedSlices\")\n # Aggregate multiple gradients, and convert [] to None.\n # import pdb; pdb.set_trace()\n if out_grad:\n if len(out_grad) < 2:\n used = \"nop\"\n out_grads[i] = out_grad[0]\n elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):\n tensor_shape = _AccumulatorShape(out_grad)\n if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n and len(out_grad) > 2 and tensor_shape.is_fully_defined()):\n # The benefit of using AccumulateN is that its inputs can be combined\n # in any order and this can allow the expression to be evaluated with\n # a smaller memory footprint. When used with gpu_allocator_retry,\n # it is possible to compute a sum of terms which are much larger than\n # total GPU memory.\n # AccumulateN can currently only be used if we know the shape for\n # an accumulator variable. If this is not known, or if we only have\n # 2 grads then we fall through to the \"tree\" case below.\n used = \"accumulate_n\"\n out_grads[i] = math_ops.accumulate_n(out_grad)\n elif aggregation_method in [\n AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n # Aggregate all gradients by doing pairwise sums: this may\n # reduce performance, but it can improve memory because the\n # gradients can be released earlier.\n #\n # TODO(vrv): Consider replacing this with a version of\n # tf.AddN() that eagerly frees its inputs as soon as they are\n # ready, so the order of this tree does not become a problem.\n used = \"tree\"\n with ops.name_scope(op.name + \"_gradient_sum\"):\n running_sum = out_grad[0]\n for grad in out_grad[1:]:\n running_sum = math_ops.add_n([running_sum, grad])\n out_grads[i] = running_sum\n else:\n used = \"add_n\"\n out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)\n # logging.set_verbosity(logging.INFO) \n logging.vlog(2, \" _AggregatedGrads %d x %s using %s\", len(out_grad),\n tensor_shape, used)\n else:\n out_grads[i] = _AggregateIndexedSlicesGradients(out_grad)\n else: # not out_grad\n # out_grads[i] is [], thus its aggregation is simply None.\n out_grads[i] = None\n return out_grads\n\n\ndef _AggregateIndexedSlicesGradients(grads):\n \"\"\"Aggregates gradients of type `IndexedSlices` by concatenation.\"\"\"\n if len(grads) < 1:\n return None\n elif len(grads) == 1:\n return grads[0]\n else:\n grads = math_ops._as_indexed_slices_list( # pylint: disable=protected-access\n [g for g in grads if g is not None])\n grads = [_HandleNestedIndexedSlices(x) for x in grads] # pylint: disable=protected-access\n # Form IndexedSlices out of the concatenated values and indices.\n concat_grad = ops.IndexedSlices(\n array_ops.concat([x.values for x in grads], axis=0),\n array_ops.concat([x.indices for x in grads], axis=0),\n grads[0].dense_shape)\n\n return concat_grad\n" ]
[ [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_util.IsSwitch", "tensorflow.linalg.matmul", "tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.ops.get_gradient_function", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.dtypes.cast", "tensorflow.python.framework.ops.internal_convert_n_to_tensor_or_indexed_slices", "tensorflow.python.ops.control_flow_util.IsLoopSwitch", "tensorflow.python.framework.ops._colocate_with_for_gradient", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.control_flow_state.MaybeCreateControlFlowState", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.platform.tf_logging.vlog", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.vectorized_map", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.math_ops.unsorted_segment_sum", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.python.ops.control_flow_ops.tuple", "tensorflow.core.framework.attr_value_pb2.NameAttrList", "tensorflow.python.ops.functional_ops.symbolic_gradient", "tensorflow.python.ops.math_ops.conj", "tensorflow.python.ops.control_flow_util.IsLoopExit", "tensorflow.python.ops.unconnected_gradients.UnconnectedGradients", "tensorflow.python.ops.math_ops._as_indexed_slices_list", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.add_n", "tensorflow.expand_dims", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.accumulate_n", "numpy.prod", "tensorflow.python.ops.control_flow_state.ZerosLikeOutsideLoop", "tensorflow.python.ops.resource_variable_ops.is_resource_variable", "tensorflow.python.framework.constant_op.constant" ] ]
sailoridy/MAESTRO
[ "f957d148d2028324a2a1076be244f73dad63fd67" ]
[ "Util/postprocessing/central_angle_average/AngleAveragedProfile.py" ]
[ "\"\"\"\nClass for working with angle-averaged profiles.\n\nDonald E. Willcox\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass AngleAveragedProfile(object):\n def __init__(self, filename=None):\n self.init_vars()\n if filename:\n self.read_from_file(filename)\n \n def init_vars(self):\n self.header = {}\n self.data = {}\n self.data_keys = []\n self.filename = ''\n \n def read_from_file(self, filename):\n # Clear my variables\n self.init_vars()\n self.filename = filename\n # Given a profile filename, read the profile\n f = open(filename, 'r')\n # Get the header global values\n num_sep = 0 # Number of '-----' separator lines encountered\n readColumnLabels = False # True if next line has column labels\n for line in f:\n ls = line.strip()\n if ls[0] == '#' or readColumnLabels:\n if readColumnLabels:\n cl = []\n if ls[0] == '#':\n ls = ls[1:]\n for ci in ls.split(' '):\n ci = ci.strip()\n # Need to check because splitting by ' '\n # can yield ' ' which strip to ''.\n if ci:\n # If field is enclosed in [ ] brackets, remove them\n if ci[0] == '[' and ci[-1] == ']':\n ci = ci[1:-1].strip()\n cl.append(ci) \n self.data_keys = cl\n print(self.data_keys)\n for ci in cl:\n self.data[ci] = []\n readColumnLabels = False\n num_sep = 0\n else:\n # See if there's an equals sign and get value\n if '=' in ls:\n k, v = ls[2:].split('=', 1)\n k = k.strip()\n vv = []\n for vf in v.strip().split():\n try:\n vv.append(float(vf))\n except:\n vv.append(0.0)\n v = vv\n if len(v) > 1:\n v = np.array(v)\n else:\n v = v[0]\n self.header[k] = v\n elif '----------------------------' in ls:\n num_sep += 1\n if num_sep == 3 and not readColumnLabels:\n readColumnLabels = True\n else:\n # Read data line\n ld = [float(di) for di in ls.split()]\n for k, v in zip(self.data_keys, ld):\n self.data[k].append(v)\n f.close()\n # Turn data into numpy arrays\n for k in self.data.keys():\n self.data[k] = np.array(self.data[k])\n\n def gen_tick_spacing(self):\n # Generate possible tick spacings\n initvals = [0.25, 0.5, 1.0, 5.0]\n n = 0\n if initvals:\n for v in initvals:\n yield v\n else:\n while(True):\n n += 1\n yield float(10*n)\n\n def plot_var(self, var, fmt='png', rup=None, fmin=None, fmax=None, sep_log=None, show=False):\n # Plot the variable corresponding to the data key var\n # Independent axis is radius r\n # Plots are log scale on the dependent axis\n if var not in self.data.keys():\n return\n fig = plt.figure()\n ax = fig.add_subplot(111)\n idxup = -1\n if rup:\n ax.set_xlim([0, rup])\n # Get the lowest index where radius > rup\n idxup = np.where(self.data['r'] > rup)[0][0]\n neg_idx = np.where(self.data[var][:idxup] < 0.0)\n pos_idx = np.where(self.data[var][:idxup] > 0.0)\n ax.set_xlabel('r')\n # find smallest non-zero log10 magnitude in quantity to plot\n try:\n neg_min = np.log10(np.amin(np.absolute(self.data[var][neg_idx])))\n except:\n neg_min = None\n try:\n pos_min = np.log10(np.amin(np.absolute(self.data[var][pos_idx])))\n except:\n pos_min = None\n if pos_min and neg_min:\n lwb = min(neg_min, pos_min)\n else:\n if pos_min:\n lwb = pos_min\n elif neg_min:\n lwb = neg_min\n else:\n lwb = None\n upb = np.log10(np.amax(np.absolute(self.data[var][:idxup])))\n if ((not lwb) or upb-lwb <= 1.0) and not sep_log:\n # plot quantity on linear axis\n # plot linear scale magnitudes\n ax.plot(self.data['r'][:idxup], self.data[var][:idxup], color='green')\n # plot positive points in blue\n ax.plot(self.data['r'][:idxup][pos_idx], self.data[var][:idxup][pos_idx],\n linestyle='None', marker='^', color='blue', markersize=8, alpha=0.5)\n # plot negative points in red\n ax.plot(self.data['r'][:idxup][neg_idx], self.data[var][:idxup][neg_idx],\n linestyle='None', marker='v', color='red', markersize=8, alpha=0.5)\n ax.set_ylabel('$\\mathrm{' + var.replace('_','\\_') + '}$')\n if fmin and fmax:\n ax.set_ylim((fmin, fmax))\n else:\n # plot quantity on log10 axis\n # plot log scale magnitudes\n ax.plot(self.data['r'][:idxup], np.log10(np.absolute(self.data[var][:idxup])), color='green')\n # plot positive points in blue\n ax.plot(self.data['r'][:idxup][pos_idx], np.log10(self.data[var][:idxup][pos_idx]),\n linestyle='None', marker='^', color='blue', markersize=8, alpha=0.5)\n # plot negative points in red\n ax.plot(self.data['r'][:idxup][neg_idx], np.log10(np.absolute(self.data[var][:idxup][neg_idx])),\n linestyle='None', marker='v', color='red', markersize=8, alpha=0.5)\n ax.set_ylabel('$\\mathrm{Log_{10} \\ | ' + var.replace('_','\\_') + ' |}$')\n if fmin:\n lwb = np.log10(fmin)\n if fmax:\n upb = np.log10(fmax)\n lwb = max(lwb, upb-10) # Span at most 10 decades\n upb = np.ceil(upb*10.0)/10.0\n lwb = np.floor(lwb*10.0)/10.0\n # yticks = None\n # for tspac in self.gen_tick_spacing():\n # nticks = int(np.floor((upb-lwb)/tspac) + 1)\n # eps = upb - (lwb + tspac*(nticks-2))\n # if nticks <= 10 and eps > 0.5*tspac:\n # yticks = np.array([lwb + tspac*(j) for j in range(nticks-1)] + [upb])\n # break\n # ax.set_yticks(yticks)\n ax.set_ylim((lwb, upb))\n # List the time above the plot\n tart = ax.text(1.0, 1.01, 'time = {}'.format(self.header['time']),\n transform=ax.transAxes,\n verticalalignment='bottom',\n horizontalalignment='right')\n outname = '.'.join([self.filename, var.replace(' ','-'), fmt])\n if fmt=='png':\n plt.savefig(outname, bbox_extra_artists=(tart,), dpi=300)\n else:\n plt.savefig(outname, bbox_extra_artists=(tart,))\n if show:\n plt.show()\n plt.close(fig)\n \n def plot_all_vars(self, fmt='png', rup=None,\n field_mins={}, field_maxs={}, field_seps_type={}):\n # Plot all variables in the profile and save\n # fmt is the suffix passed to savefig\n for var in self.data.keys():\n if var != 'r':\n if var in field_mins:\n fmin = field_mins[var]\n else:\n fmin = None\n if var in field_maxs:\n fmax = field_maxs[var]\n else:\n fmax = None\n if var in field_seps_type:\n sep_log = field_seps_type[var]\n else:\n sep_log = None\n self.plot_var(var, fmt, rup, fmin, fmax, sep_log)\n" ]
[ [ "numpy.absolute", "matplotlib.pyplot.savefig", "numpy.ceil", "numpy.log10", "matplotlib.pyplot.close", "numpy.floor", "numpy.array", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
TBC-TJU/MetaBCI-brainda
[ "d2dc655163b771ca22e43432d886ece3d98235c8", "d2dc655163b771ca22e43432d886ece3d98235c8" ]
[ "brainda/datasets/tsinghua.py", "brainda/algorithms/deep_learning/guney_net.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Authors: Swolf <swolfforever@gmail.com>\n# Date: 2021/01/07\n# License: MIT License\n\"\"\"\nTsinghua BCI Lab.\n\"\"\"\nimport os, zipfile\nfrom typing import Union, Optional, Dict, List, Tuple\nfrom pathlib import Path\n\nimport numpy as np\nimport py7zr\nfrom mne import create_info\nfrom mne.io import RawArray, Raw\nfrom mne.channels import make_standard_montage\nfrom .base import BaseDataset\nfrom ..utils.download import mne_data_path\nfrom ..utils.io import loadmat\n\n# TSINGHUA_URL = 'http://bci.med.tsinghua.edu.cn/download.html'\n\nWang2016_URL = 'http://bci.med.tsinghua.edu.cn/upload/yijun/' #403 error, though it still works\n# Wang2016_URL = \"ftp://sccn.ucsd.edu/pub/ssvep_benchmark_dataset/\"\n# Wang2016_URL = 'http://www.thubci.com/uploads/down/' # This may work\nBETA_URL = 'http://bci.med.tsinghua.edu.cn/upload/liubingchuan/' #403 error\n# BETA_URL = 'https://figshare.com/articles/The_BETA_database/12264401'\n\n\nclass Wang2016(BaseDataset):\n \"\"\"SSVEP dataset from Yijun Wang.\n\n This dataset gathered SSVEP-BCI recordings of 35 healthy subjects (17 females, aged 17-34 years, mean age: 22 years) focusing on 40 characters flickering at different frequencies (8-15.8 Hz with an interval of 0.2 Hz). For each subject, the experiment consisted of 6 blocks. Each block contained 40 trials corresponding to all 40 characters indicated in a random order. Each trial started with a visual cue (a red square) indicating a target stimulus. The cue appeared for 0.5 s on the screen. Subjects were asked to shift their gaze to the target as soon as possible within the cue duration. Following the cue offset, all stimuli started to flicker on the screen concurrently and lasted 5 s. After stimulus offset, the screen was blank for 0.5 s before the next trial began, which allowed the subjects to have short breaks between consecutive trials. Each trial lasted a total of 6 s. To facilitate visual fixation, a red triangle appeared below the flickering target during the stimulation period. In each block, subjects were asked to avoid eye blinks during the stimulation period. To avoid visual fatigue, there was a rest for several minutes between two consecutive blocks.\n\n EEG data were acquired using a Synamps2 system (Neuroscan, Inc.) with a sampling rate of 1000 Hz. The amplifier frequency passband ranged from 0.15 Hz to 200 Hz. Sixty-four channels covered the whole scalp of the subject and were aligned according to the international 10-20 system. The ground was placed on midway between Fz and FPz. The reference was located on the vertex. Electrode impedances were kept below 10 KΩ. To remove the common power-line noise, a notch filter at 50 Hz was applied in data recording. Event triggers generated by the computer to the amplifier and recorded on an event channel synchronized to the EEG data. \n\n The continuous EEG data was segmented into 6 s epochs (500 ms pre-stimulus, 5.5 s post-stimulus onset). The epochs were subsequently downsampled to 250 Hz. Thus each trial consisted of 1500 time points. Finally, these data were stored as double-precision floating-point values in MATLAB and were named as subject indices (i.e., S01.mat, …, S35.mat). For each file, the data loaded in MATLAB generate a 4-D matrix named ‘data’ with dimensions of [64, 1500, 40, 6]. The four dimensions indicate ‘Electrode index’, ‘Time points’, ‘Target index’, and ‘Block index’. The electrode positions were saved in a ‘64-channels.loc’ file. Six trials were available for each SSVEP frequency. Frequency and phase values for the 40 target indices were saved in a ‘Freq_Phase.mat’ file.\n\n Information for all subjects was listed in a ‘Sub_info.txt’ file. For each subject, there are five factors including ‘Subject Index’, ‘Gender’, ‘Age’, ‘Handedness’, and ‘Group’. Subjects were divided into an ‘experienced’ group (eight subjects, S01-S08) and a ‘naive’ group (27 subjects, S09-S35) according to their experience in SSVEP-based BCIs.\n\n Frequency Table\n 8 9 10 11 12 13 14 15\n 8.2 9.2 10.2 11.2 12.2 13.2 14.2 15.2\n 8.4 9.4 10.4 11.4 12.4 13.4 14.4 15.4\n 8.6 9.6 10.6 11.6 12.6 13.6 14.6 15.6\n 8.8 9.8 10.8 11.8 12.8 13.8 14.8 15.8\n\n Notes\n -----\n 1. sub5 is not available from the download url.\n \"\"\"\n\n _CHANNELS = [\n 'FP1', 'FPZ', 'FP2', 'AF3', 'AF4', 'F7', 'F5', 'F3', 'F1',\n 'FZ', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5', 'FC3', 'FC1',\n 'FCZ', 'FC2', 'FC4', 'FC6', 'FT8', 'T7', 'C5', 'C3', 'C1',\n 'CZ', 'C2', 'C4', 'C6', 'T8', 'TP7', 'CP5', 'CP3', 'CP1',\n 'CPZ', 'CP2', 'CP4', 'CP6', 'TP8', 'P7', 'P5', 'P3', 'P1',\n 'PZ', 'P2', 'P4', 'P6', 'P8', 'PO7', 'PO5', 'PO3', 'POZ',\n 'PO4', 'PO6', 'PO8', 'O1', 'OZ', 'O2']\n\n _FREQS = [\n 8, 9, 10, 11, 12, 13, 14, 15, \n 8.2, 9.2, 10.2, 11.2, 12.2, 13.2, 14.2, 15.2, \n 8.4, 9.4, 10.4, 11.4, 12.4, 13.4, 14.4, 15.4,\n 8.6, 9.6, 10.6, 11.6, 12.6, 13.6, 14.6, 15.6,\n 8.8, 9.8, 10.8, 11.8, 12.8, 13.8, 14.8, 15.8\n ]\n\n _PHASES = [\n 0, 0.5, 1, 1.5, 0, 0.5, 1, 1.5,\n 0.5, 1, 1.5, 0, 0.5, 1, 1.5, 0,\n 1, 1.5, 0, 0.5, 1, 1.5, 0, 0.5,\n 1.5, 0, 0.5, 1, 1.5, 0, 0.5, 1,\n 0, 0.5, 1, 1.5, 0, 0.5, 1, 1.5\n ]\n\n _EVENTS = {str(freq): (i+1, (0, 5)) for i, freq in enumerate(_FREQS)}\n \n def __init__(self):\n super().__init__(\n dataset_code='wang2016', \n subjects=list(range(1, 36)),\n events=self._EVENTS, \n channels=self._CHANNELS, \n srate=250,\n paradigm='ssvep'\n )\n\n def data_path(self, \n subject: Union[str, int], \n path: Optional[Union[str, Path]] = None, \n force_update: bool = False,\n update_path: Optional[bool] = None,\n proxies: Optional[Dict[str, str]] = None,\n verbose: Optional[Union[bool, str, int]] = None) -> List[List[Union[str, Path]]]:\n if subject not in self.subjects:\n raise(ValueError(\"Invalid subject id\"))\n\n url = '{:s}S{:d}.mat.7z'.format(Wang2016_URL, subject)\n file_dest = mne_data_path(url, 'tsinghua', \n path=path, proxies=proxies, force_update=force_update, update_path=update_path)\n \n if not os.path.exists(file_dest[:-3]):\n # decompression the data\n with py7zr.SevenZipFile(file_dest, 'r') as archive:\n archive.extractall(path=Path(file_dest).parent)\n dests = [\n [\n file_dest[:-3]\n ]\n ]\n return dests\n\n def _get_single_subject_data(self, subject: Union[str, int], \n verbose: Optional[Union[bool, str, int]] = None) -> Dict[str, Dict[str, Raw]]:\n dests = self.data_path(subject)\n raw_mat = loadmat(dests[0][0])\n epoch_data = raw_mat['data'] * 1e-6\n stim = np.zeros((1, *epoch_data.shape[1:]))\n # insert event label at stimulus-onset\n # 0.5s latency\n stim[0, 125] = np.tile(np.arange(1, 41)[:, np.newaxis], (1, epoch_data.shape[-1]))\n epoch_data = np.concatenate((epoch_data, stim), axis=0)\n data = np.transpose(epoch_data, (0, 3, 2, 1))\n\n montage = make_standard_montage('standard_1005')\n montage.rename_channels({ch_name: ch_name.upper() for ch_name in montage.ch_names})\n # montage.ch_names = [ch_name.upper() for ch_name in montage.ch_names]\n ch_names = [ch_name.upper() for ch_name in self._CHANNELS]\n ch_names.insert(32, 'M1')\n ch_names.insert(42, 'M2')\n ch_names.insert(59, 'CB1')\n ch_names = ch_names + ['CB2', 'STI 014']\n ch_types = ['eeg']*65\n ch_types[59] = 'misc'\n ch_types[63] = 'misc'\n ch_types[-1] = 'stim'\n\n info = create_info(\n ch_names=ch_names, ch_types=ch_types, sfreq=self.srate\n )\n\n runs = dict()\n for i in range(data.shape[1]):\n raw = RawArray(data=np.reshape(data[:, i, ...], (data.shape[0], -1)), info=info)\n raw.set_montage(montage)\n runs['run_{:d}'.format(i)] = raw\n\n sess = {\n 'session_0': runs\n }\n return sess\n\n def get_freq(self, event: str):\n return self._FREQS[self._EVENTS[event][0]-1]\n\n def get_phase(self, event: str):\n return self._PHASES[self._EVENTS[event][0]-1]\n\n\nclass BETA(BaseDataset):\n \"\"\"BETA SSVEP dataset [1]_.\n\n EEG data after preprocessing are store as a 4-way tensor, with a dimension of channel x time point x block x condition. Each trial comprises 0.5-s data before the event onset and 0.5-s data after the time window of 2 s or 3 s. For S1-S15, the time window is 2 s and the trial length is 3 s, whereas for S16-S70 the time window is 3 s and the trial length is 4 s. Additional details about the channel and condition information can be found in the following supplementary information.\n\n Eight supplementary information is comprised of personal information, channel information, frequency and initial phase associated to each condition, SNR and sampling rate. The personal information contains age and gender of the subject. For the channel information, a location matrix (64 x 4) is provided, with the first column indicating channel index, the second column and third column indicating the degree and radius in polar coordinates, and the last column indicating channel name. The SNR information contains the mean narrow-band SNR and wide-band SNR matrix for each subject, calculated in (3) and (4), respectively. The initial phase is in radius.\n\n 3-100Hz bandpass filtering (eegfilt), downsampled to 250 Hz\n\n References\n ----------\n .. [1] Liu B, Huang X, Wang Y, et al. BETA: A Large Benchmark Database Toward SSVEP-BCI Application[J]. Frontiers in neuroscience, 2020, 14: 627.\n \"\"\"\n\n _CHANNELS = [\n 'FP1', 'FPZ', 'FP2', 'AF3', 'AF4', 'F7', 'F5', 'F3', 'F1',\n 'FZ', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5', 'FC3', 'FC1',\n 'FCZ', 'FC2', 'FC4', 'FC6', 'FT8', 'T7', 'C5', 'C3', 'C1',\n 'CZ', 'C2', 'C4', 'C6', 'T8', 'TP7', 'CP5', 'CP3', 'CP1',\n 'CPZ', 'CP2', 'CP4', 'CP6', 'TP8', 'P7', 'P5', 'P3', 'P1',\n 'PZ', 'P2', 'P4', 'P6', 'P8', 'PO7', 'PO5', 'PO3', 'POZ',\n 'PO4', 'PO6', 'PO8', 'O1', 'OZ', 'O2']\n\n _FREQS = [\n 8.6, 8.8, \n 9, 9.2, 9.4, 9.6, 9.8,\n 10, 10.2, 10.4, 10.6, 10.8, \n 11, 11.2, 11.4, 11.6, 11.8,\n 12, 12.2, 12.4, 12.6, 12.8,\n 13, 13.2, 13.4, 13.6, 13.8,\n 14, 14.2, 14.4, 14.6, 14.8, \n 15, 15.2, 15.4, 15.6, 15.8, \n 8, 8.2, 8.4\n ]\n _PHASES = [\n 1.5, 0,\n 0.5, 1, 1.5, 0, 0.5,\n 1, 1.5, 0, 0.5, 1,\n 1.5, 0, 0.5, 1, 1.5,\n 0, 0.5, 1, 1.5, 0,\n 0.5, 1, 1.5, 0, 0.5,\n 1, 1.5, 0, 0.5, 1,\n 1.5, 0, 0.5, 1, 1.5,\n 0, 0.5, 1\n ]\n\n _EVENTS = {str(freq): (i+1, (0, 2)) for i, freq in enumerate(_FREQS)}\n \n def __init__(self):\n super().__init__(\n dataset_code='beta', \n subjects=list(range(1, 71)),\n events=self._EVENTS, \n channels=self._CHANNELS, \n srate=250,\n paradigm='ssvep'\n )\n\n def data_path(self, \n subject: Union[str, int], \n path: Optional[Union[str, Path]] = None, \n force_update: bool = False,\n update_path: Optional[bool] = None,\n proxies: Optional[Dict[str, str]] = None,\n verbose: Optional[Union[bool, str, int]] = None) -> List[List[Union[str, Path]]]:\n if subject not in self.subjects:\n raise(ValueError(\"Invalid subject id\"))\n\n if subject < 11:\n url = '{:s}S1-S10.mat.zip'.format(BETA_URL)\n elif subject < 21:\n url = '{:s}S11-S20.mat.zip'.format(BETA_URL)\n elif subject < 31:\n url = '{:s}S21-S30.mat.zip'.format(BETA_URL)\n elif subject < 41:\n url = '{:s}S31-S40.mat.zip'.format(BETA_URL)\n elif subject < 51:\n url = '{:s}S41-S50.mat.zip'.format(BETA_URL)\n elif subject < 61:\n url = '{:s}S51-S60.mat.zip'.format(BETA_URL)\n else:\n url = '{:s}S61-S70.mat.zip'.format(BETA_URL)\n\n file_dest = mne_data_path(url, 'tsinghua', \n path=path, proxies=proxies, force_update=force_update, update_path=update_path)\n\n parent_dir = Path(file_dest).parent\n \n if not os.path.exists(os.path.join(parent_dir, 'S{:d}.mat'.format(subject))):\n # decompression the data\n with zipfile.ZipFile(file_dest, 'r') as archive:\n archive.extractall(path=parent_dir)\n dests = [\n [\n os.path.join(parent_dir, 'S{:d}.mat'.format(subject))\n ]\n ]\n return dests\n\n def _get_single_subject_data(self, subject: Union[str, int], \n verbose: Optional[Union[bool, str, int]] = None) -> Dict[str, Dict[str, Raw]]:\n dests = self.data_path(subject)\n raw_mat = loadmat(dests[0][0])\n epoch_data = raw_mat['data']['EEG'] * 1e-6\n stim = np.zeros((1, *epoch_data.shape[1:]))\n # 0.5s latency\n stim[0, 125] = np.tile(np.arange(1, 41), (epoch_data.shape[-2], 1))\n epoch_data = np.concatenate((epoch_data, stim), axis=0)\n data = np.transpose(epoch_data, (0, 3, 2, 1))\n\n montage = make_standard_montage('standard_1005')\n montage.rename_channels({ch_name: ch_name.upper() for ch_name in montage.ch_names})\n # montage.ch_names = [ch_name.upper() for ch_name in montage.ch_names]\n ch_names = [ch_name.upper() for ch_name in self._CHANNELS]\n ch_names.insert(32, 'M1')\n ch_names.insert(42, 'M2')\n ch_names.insert(59, 'CB1')\n ch_names = ch_names + ['CB2', 'STI 014']\n ch_types = ['eeg']*65\n ch_types[59] = 'misc'\n ch_types[63] = 'misc'\n ch_types[-1] = 'stim'\n\n info = create_info(\n ch_names=ch_names, ch_types=ch_types, sfreq=self.srate\n )\n\n runs = dict()\n for i in range(data.shape[-2]):\n raw = RawArray(data=np.reshape(data[..., i, :], (data.shape[0], -1)), info=info)\n raw.set_montage(montage)\n runs['run_{:d}'.format(i)] = raw\n\n sess = {\n 'session_0': runs\n }\n return sess\n\n def get_freq(self, event: str):\n return self._FREQS[self._EVENTS[event][0]-1]\n\n def get_phase(self, event: str):\n return self._PHASES[self._EVENTS[event][0]-1]\n\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nGuney's network proposed in A Deep Neural Network for SSVEP-based Brain-Computer Interfaces.\n\nModified from https://github.com/osmanberke/Deep-SSVEP-BCI.git\n\"\"\"\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .base import compute_same_pad2d, _narrow_normal_weight_zero_bias, compute_out_size, SkorchNet\n\n\n@SkorchNet\nclass GuneyNet(nn.Module):\n \"\"\"\n Guney's network for decoding SSVEP.\n They used two stages to train the network. \n \n The first stage is with all training data in the dataset. \n lr: 1e-4, batch_size: 100, l2_regularization: 1e-3, epochs: 1000\n \n The second stage is a fine-tuning process with each subject's training data.\n lr: 1e-4, batch_size: full size, l2_regularization: 1e-3, epochs: 1000\n spatial_dropout=time1_dropout=0.6\n \"\"\"\n def __init__(self, n_channels, n_samples, n_classes, n_bands,\n n_spatial_filters=120, spatial_dropout=0.1,\n time1_kernel=2, time1_stride=2, n_time1_filters=120,\n time1_dropout=0.1,\n time2_kernel=10, n_time2_filters=120,\n time2_dropout=0.95):\n # super(GuneyNet, self).__init__()\n super().__init__()\n self.n_channels = n_channels\n self.n_samples = n_samples\n self.n_classes = n_classes\n self.n_bands = n_bands\n\n self.model = nn.Sequential(OrderedDict([\n ('band_layer', nn.Conv2d(n_bands, 1, (1, 1), bias=False)),\n ('spatial_layer', nn.Conv2d(1, n_spatial_filters, (n_channels, 1))),\n ('spatial_dropout', nn.Dropout(spatial_dropout)),\n ('time1_layer', \n nn.Conv2d(n_spatial_filters, n_time1_filters, (1, time1_kernel), \n stride=(1, time1_stride))),\n ('time1_dropout', nn.Dropout(time1_dropout)),\n ('relu', nn.ReLU()),\n ('same_padding',\n nn.ConstantPad2d(\n compute_same_pad2d(\n (1, compute_out_size(n_samples, time1_kernel, stride=time1_stride)), \n (1, time2_kernel), \n stride=(1, 1)), \n 0)),\n ('time2_layer', \n nn.Conv2d(n_time1_filters, n_time2_filters, (1, time2_kernel), \n stride=(1, 1))),\n ('time2_dropout', nn.Dropout(time2_dropout)),\n ('flatten', nn.Flatten()),\n ('fc_layer', nn.Linear(\n n_time2_filters*compute_out_size(n_samples, time1_kernel, stride=time1_stride),\n n_classes))\n ]))\n self.reset_parameters()\n\n @torch.no_grad()\n def reset_parameters(self):\n _narrow_normal_weight_zero_bias(self)\n nn.init.ones_(self.model[0].weight)\n # MATLAB uses xavier_uniform_ with varaiance 2/(input+output)\n # perhaps this is a mistake in Help document\n nn.init.xavier_normal_(self.model[-1].weight, gain=1)\n\n def forward(self, X):\n # X: (n_batch, n_bands, n_channels, n_samples)\n out = self.model(X)\n return out\n" ]
[ [ "numpy.reshape", "numpy.arange", "numpy.concatenate", "numpy.transpose", "numpy.zeros" ], [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.nn.Flatten", "torch.nn.init.ones_", "torch.no_grad", "torch.nn.ReLU" ] ]
shaojinding/Multilingual_Text_to_Speech
[ "5bcbe1154c759357a28a42f131dc59e77fa49e14" ]
[ "utils/logging.py" ]
[ "import random\nimport time\nimport logging\nimport os\n\nimport librosa.display\nimport matplotlib.pyplot as plt\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utils import audio, text\nfrom params.params import Params as hp\n\n\nclass Logger:\n \"\"\"Static class wrapping methods for Tensorboard logging and plotting of spectrograms, alignments, etc.\"\"\"\n\n @staticmethod\n def initialize(logdir, flush_seconds):\n \"\"\"Initialize Tensorboard logger.\n\n Arguments:\n logdir -- location of Tensorboard log files\n flush_seconds -- see Tensorboard documentation\n \"\"\"\n Logger._sw = SummaryWriter(log_dir=logdir, flush_secs=flush_seconds)\n\n @staticmethod\n def progress(progress, prefix='', length=70):\n \"\"\"Prints a pretty console progress bar.\n\n Arguments:\n progress -- percentage (from 0 to 1.0)\n Keyword argumnets:\n prefix (default: '') -- string which is prepended to the progress bar\n length (default: 70) -- size of the full-size bar\n \"\"\"\n progress *= 100\n step = 100/length\n filled, reminder = int(progress // step), progress % step\n loading_bar = filled * '█'\n loading_bar += '░' if reminder < step / 3 else '▒' if reminder < step * 2/3 else '▓'\n loading_bar += max(0, length - filled) * '░' if progress < 100 else ''\n print(f'\\r{prefix} {loading_bar} {progress:.1f}%', end=('' if progress < 100 else '\\n'), flush=True)\n\n @staticmethod\n def training(train_step, losses, gradient, learning_rate, duration, classifier):\n \"\"\"Log batch training.\n \n Arguments:\n train_step -- number of the current training step\n losses (dictionary of {loss name, value})-- dictionary with values of batch losses\n gradient (float) -- gradient norm\n learning_rate (float) -- current learning rate\n duration (float) -- duration of the current step\n classifier (float) -- accuracy of the reversal classifier\n \"\"\" \n\n # log losses\n total_loss = sum(losses.values())\n Logger._sw.add_scalar(f'Train/loss_total', total_loss, train_step)\n for n, l in losses.items():\n Logger._sw.add_scalar(f'Train/loss_{n}', l, train_step) \n\n # log gradient norm\n Logger._sw.add_scalar(\"Train/gradient_norm\", gradient, train_step)\n \n # log learning rate\n Logger._sw.add_scalar(\"Train/learning_rate\", learning_rate, train_step)\n \n # log duration\n Logger._sw.add_scalar(\"Train/duration\", duration, train_step)\n\n # log classifier accuracy\n if hp.reversal_classifier:\n Logger._sw.add_scalar(f'Train/classifier', classifier, train_step)\n\n @staticmethod\n def evaluation(eval_step, losses, mcd, source_len, target_len, source, target, prediction_forced, prediction, stop_prediction, stop_target, alignment, classifier):\n \"\"\"Log evaluation results.\n \n Arguments:\n eval_step -- number of the current evaluation step (i.e. epoch)\n losses (dictionary of {loss name, value})-- dictionary with values of batch losses\n mcd (float) -- evaluation Mel Cepstral Distorsion\n source_len (tensor) -- number of characters of input utterances\n target_len (tensor) -- number of frames of ground-truth spectrograms\n source (tensor) -- input utterances\n target (tensor) -- ground-truth spectrograms\n prediction_forced (tensor) -- ground-truth-aligned spectrograms\n prediction (tensor) -- predicted spectrograms\n stop_prediction (tensor) -- predicted stop token probabilities\n stop_target (tensor) -- true stop token probabilities\n alignment (tensor) -- alignments (attention weights for each frame) of the last evaluation batch\n classifier (float) -- accuracy of the reversal classifier\n \"\"\" \n\n # log losses\n total_loss = sum(losses.values())\n Logger._sw.add_scalar(f'Eval/loss_total', total_loss, eval_step)\n for n, l in losses.items():\n Logger._sw.add_scalar(f'Eval/loss_{n}', l, eval_step) \n\n # show random sample: spectrogram, stop token probability, alignment and audio\n idx = random.randint(0, alignment.size(0) - 1)\n predicted_spec = prediction[idx, :, :target_len[idx]].data.cpu().numpy()\n f_predicted_spec = prediction_forced[idx, :, :target_len[idx]].data.cpu().numpy()\n target_spec = target[idx, :, :target_len[idx]].data.cpu().numpy() \n\n # log spectrograms\n if hp.normalize_spectrogram:\n predicted_spec = audio.denormalize_spectrogram(predicted_spec, not hp.predict_linear)\n f_predicted_spec = audio.denormalize_spectrogram(f_predicted_spec, not hp.predict_linear)\n target_spec = audio.denormalize_spectrogram(target_spec, not hp.predict_linear)\n Logger._sw.add_figure(f\"Predicted/generated\", Logger._plot_spectrogram(predicted_spec), eval_step)\n Logger._sw.add_figure(f\"Predicted/forced\", Logger._plot_spectrogram(f_predicted_spec), eval_step)\n Logger._sw.add_figure(f\"Target/eval\", Logger._plot_spectrogram(target_spec), eval_step) \n \n # log audio\n waveform = audio.inverse_spectrogram(predicted_spec, not hp.predict_linear)\n Logger._sw.add_audio(f\"Audio/generated\", waveform, eval_step, sample_rate=hp.sample_rate) \n waveform = audio.inverse_spectrogram(f_predicted_spec, not hp.predict_linear)\n Logger._sw.add_audio(f\"Audio/forced\", waveform, eval_step, sample_rate=hp.sample_rate) \n \n # log alignment\n alignment = alignment[idx, :target_len[idx], :source_len[idx]].data.cpu().numpy().T\n Logger._sw.add_figure(f\"Alignment/eval\", Logger._plot_alignment(alignment), eval_step) \n \n # log source text\n utterance = text.to_text(source[idx].data.cpu().numpy()[:source_len[idx]], hp.use_phonemes)\n Logger._sw.add_text(f\"Text/eval\", utterance, eval_step) \n \n # log stop tokens\n Logger._sw.add_figure(f\"Stop/eval\", Logger._plot_stop_tokens(stop_target[idx].data.cpu().numpy(), stop_prediction[idx].data.cpu().numpy()), eval_step) \n \n # log mel cepstral distorsion\n Logger._sw.add_scalar(f'Eval/mcd', mcd, eval_step)\n \n # log reversal language classifier accuracy\n if hp.reversal_classifier:\n Logger._sw.add_scalar(f'Eval/classifier', classifier, eval_step)\n\n\n @staticmethod\n def _plot_spectrogram(s):\n fig = plt.figure(figsize=(16, 4))\n hf = int(hp.sample_rate * hp.stft_shift_ms / 1000)\n librosa.display.specshow(s, sr=hp.sample_rate, hop_length=hf, x_axis='time', y_axis='mel', cmap='magma')\n plt.colorbar(format='%+2.0f dB')\n return fig\n\n @staticmethod\n def _plot_alignment(alignment):\n fig = plt.figure(figsize=(6, 4))\n ax = fig.add_subplot(111)\n cax = ax.imshow(alignment, origin='lower', aspect='auto', interpolation='nearest')\n fig.colorbar(cax, ax=ax)\n plt.ylabel('Input index')\n plt.xlabel('Decoder step')\n plt.tight_layout() \n return fig\n\n @staticmethod\n def _plot_stop_tokens(target, prediciton):\n fig = plt.figure(figsize=(14, 4))\n ax = fig.add_subplot(111)\n ax.scatter(range(len(target)), target, alpha=0.5, color='blue', marker='+', s=1, label='target')\n ax.scatter(range(len(prediciton)), prediciton, alpha=0.5, color='red', marker='.', s=1, label='predicted')\n plt.xlabel(\"Frames (Blue target, Red predicted)\")\n plt.ylabel(\"Stop token probability\")\n plt.tight_layout()\n return fig\n\n @staticmethod\n def _plot_mfcc(mfcc):\n fig = plt.figure(figsize=(16, 4))\n librosa.display.specshow(mfcc, x_axis='time', cmap='magma')\n plt.colorbar()\n plt.title('MFCC')\n plt.tight_layout()\n plt.show()\n return fig\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\", logger=None):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n self.logger = logger\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n if self.logger:\n self.logger.info('\\t'.join(entries))\n else:\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\ndef create_logger(log_dir, phase='train'):\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n log_file = '{}_{}.log'.format(time_str, phase)\n final_log_file = os.path.join(log_dir, log_file)\n head = '%(asctime)-15s %(message)s'\n logging.basicConfig(filename=str(final_log_file),\n format=head)\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n logging.getLogger('').addHandler(console)\n\n return logger\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.colorbar", "torch.utils.tensorboard.SummaryWriter", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
mm1860/Densenet_FCN
[ "a5360dce226257a8c826addec22bcf1cdbc66f69" ]
[ "test.py" ]
[ "import argparse\nimport os\nimport os.path as osp\nimport sys\nfrom pprint import pprint\n\nimport tensorflow as tf\n\nfrom config import cfg, update_cfg\nfrom fcn import FC_DenseNet\nfrom unet import UNet\nimport udn\nfrom solver import test_model_2D, test_model_3D\nfrom utils.logger import create_logger\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Test a FCN-DenseNet network\")\n parser.add_argument(\"--mode\", dest=\"mode\", default=\"2D\", type=str, \n choices=[\"2D\", \"3D\"],\n help=\"test mode (2D/3D image, default is 2D)\")\n parser.add_argument(\"--best\", dest=\"best\", default=\"True\", type=str,\n help=\"use the best model or not, if not, cfg.TEST.ITER must be provided\")\n parser.add_argument(\"--cfg\", dest=\"cfg_file\", default=None, type=str,\n help=\"extra configuration (it will cover default config in config.py)\")\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n \n if args.cfg_file:\n update_cfg(args.cfg_file)\n \n logdir = osp.join(cfg.SRC_DIR, cfg.LOG_DIR)\n if not osp.exists(logdir):\n os.makedirs(logdir)\n logfile = osp.join(logdir, \"test_%s_%s_iter_%d\" % (cfg.TAG, cfg.PREFIX, cfg.TEST.ITER))\n logger = create_logger(log_file=logfile, withtime=True, propagate=False, name=cfg.LOGGER)\n\n if cfg.TEST.SAVE_MODEL:\n cfg.TRAIN.BS = cfg.TEST.BS_2D\n\n logger.info(\"Configuration: \")\n for handler in logger.handlers:\n pprint(cfg, handler.stream)\n\n model_path = osp.join(cfg.SRC_DIR, cfg.OUTPUT_DIR, cfg.TAG)\n if args.best.lower() not in [\"false\", \"0\"]:\n model_file = osp.join(model_path, \"{:s}_best.ckpt\".format(cfg.PREFIX))\n else:\n model_file = osp.join(model_path, \"{:s}_iter_{:d}.ckpt\".format(cfg.PREFIX, cfg.TEST.ITER))\n\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n\n # define computation graph\n main_graph = tf.Graph()\n\n sess = tf.Session(config=tfconfig, graph=main_graph)\n\n with main_graph.as_default():\n if cfg.BACKBONE == \"FC-Densenet\":\n net = FC_DenseNet(cfg.MODEL.INIT_CHANNELS, \n cfg.MODEL.BLOCKS, \n cfg.MODEL.NUM_LAYERS_PER_BLOCK,\n cfg.MODEL.GROWTH_RATE, \n bc_mode=True, \n name=\"FCN-DenseNet\")\n elif cfg.BACKBONE == \"UNet\":\n net = UNet(cfg.UNET.INIT_CHANNELS,\n cfg.UNET.NUM_DOWN_SAMPLE,\n cfg.UNET.NUM_CONV_PER_LAYER,\n name=\"UNet\")\n elif cfg.BACKBONE == \"UDN\":\n net = udn.Tiramisu(cfg.UDN.INIT_CHANNELS,\n cfg.UDN.NUM_BLOCKS,\n cfg.UDN.NUM_LAYERS_PER_BLOCK,\n cfg.UDN.GROWTH_RATE,\n bc_mode=True,\n name=\"UDNet\")\n else:\n raise ValueError(\"Un supported backbone: {:s}\".format(cfg.BACKBONE))\n\n net.create_architecture(\"TEST\")\n\n if osp.exists(model_file + \".index\"):\n logger.info(\"Loading checkpoint from \" + model_file)\n saver = tf.train.Saver()\n saver.restore(sess, model_file)\n logger.info(\"Model loaded\")\n if cfg.TEST.SAVE_MODEL:\n saver.save(sess, osp.join(osp.dirname(model_file), \"deploy_\" + osp.basename(model_file)))\n else:\n raise FileNotFoundError(\"Invalid model tag or iters! Model file: {:s}\".format(model_file))\n \n if not cfg.TEST.SAVE_MODEL:\n if cfg.PRED_TAG != \"\":\n test_path = osp.join(cfg.SRC_DIR, cfg.PRED_DIR, cfg.PRED_TAG)\n if not osp.exists(test_path):\n os.makedirs(test_path)\n else:\n test_path = None\n \n if args.mode == \"2D\":\n test_model_2D(sess, net, cfg.DATA.TESTSET, test_path)\n elif args.mode == \"3D\":\n test_model_3D(sess, net, cfg.DATA.TESTSET_3D, test_path)\n else:\n raise ValueError(\"Only support 2D and 3D test routine.\")\n\n sess.close()\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.Graph", "tensorflow.train.Saver", "tensorflow.Session" ] ]
gmichalo/UmlsBERT
[ "ba4b084c44107bb509aee3ebc8772daeac7cf87d" ]
[ "text-classification/dataset/mednli/mednli.py" ]
[ "import pandas as pd\n\n\nclass mednil:\n def __init__(self, path1, path_med):\n self.path1 = path1\n self.path_med = path_med\n self.label = {}\n self.label[1] = 'entailment'\n self.label[0] = 'contradiction'\n self.label[2] = 'neutral'\n\n\n def read(self, path2, path3):\n list_dataset = []\n dataset = self.path_med + path2\n f = open(dataset, \"r\")\n for x in f:\n sentence1 = x.split('sentence1\":')[1].split(\"pairID\")[0][2:-4].strip()\n sentence2 = x.split('sentence2\":')[1].split(\"sentence2_parse\")[0][2:-4].strip()\n label = x.split(\"gold_label\")[1][4:-3].strip()\n list_dataset.append([sentence1, sentence2, label])\n df = pd.DataFrame(list_dataset, columns=[\"sentence1\", \"sentence2\", \"label\"])\n\n\n df.to_csv(self.path1 + path3, index=False, sep=\"\\t\")\n return\n\n\nreader = mednil(\"mednli/\", 'mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0/')\nreader.read(\"mli_train_v1.jsonl\", \"train.tsv\")\nreader.read(\"mli_dev_v1.jsonl\", \"dev_matched.tsv\")\nreader.read(\"mli_test_v1.jsonl\", \"test_matched.tsv\")\n" ]
[ [ "pandas.DataFrame" ] ]
AntoniosBarotsis/midi2img
[ "848f54c0f3a5175ee636c693b04b6363d00ee9c8" ]
[ "midi2img.py" ]
[ "from music21 import converter, instrument, note, chord\nimport json\nimport sys\nimport numpy as np\nfrom imageio import imwrite\n\ndef extractNote(element):\n return int(element.pitch.ps)\n\ndef extractDuration(element):\n return element.duration.quarterLength\n\ndef get_notes(notes_to_parse):\n\n \"\"\" Get all the notes and chords from the midi files in the ./midi_songs directory \"\"\"\n durations = []\n notes = []\n start = []\n\n for element in notes_to_parse:\n if isinstance(element, note.Note):\n if element.isRest:\n continue\n\n start.append(element.offset)\n notes.append(extractNote(element))\n durations.append(extractDuration(element))\n \n elif isinstance(element, chord.Chord):\n if element.isRest:\n continue\n for chord_note in element.notes:\n start.append(element.offset)\n durations.append(extractDuration(element))\n notes.append(extractNote(chord_note))\n\n return {\"start\":start, \"pitch\":notes, \"dur\":durations}\n\ndef midi2image(midi_path, reps):\n try:\n mid = converter.parse(midi_path)\n except Exception:\n f = open(\"out.log\", \"a\")\n f.write(f\"FAILING PATH: {midi_path}\")\n f.close()\n return\n \n instruments = instrument.partitionByInstrument(mid)\n\n data = {}\n\n try:\n i=0\n for instrument_i in instruments.parts:\n notes_to_parse = instrument_i.recurse()\n\n if instrument_i.partName is None:\n data[\"instrument_{}\".format(i)] = get_notes(notes_to_parse)\n i+=1\n else:\n data[instrument_i.partName] = get_notes(notes_to_parse)\n except Exception:\n notes_to_parse = mid.flat.notes\n data[\"instrument_0\".format(i)] = get_notes(notes_to_parse)\n\n resolution = 0.25\n\n for instrument_name, values in data.items():\n # https://en.wikipedia.org/wiki/Scientific_pitch_notation#Similar_systems\n upperBoundNote = 127\n lowerBoundNote = 21\n maxSongLength = 100\n\n index = 0\n prev_index = 0\n repetitions = 0\n while repetitions < int(reps):\n if prev_index >= len(values[\"pitch\"]):\n break\n\n # Filter out songs that do not include piano here to save time\n if \"piano\" not in instrument_name.lower():\n index += 1\n repetitions+=1\n continue\n\n matrix = np.zeros((upperBoundNote-lowerBoundNote,maxSongLength))\n\n pitchs = values[\"pitch\"]\n durs = values[\"dur\"]\n starts = values[\"start\"]\n\n for i in range(prev_index,len(pitchs)):\n pitch = pitchs[i]\n\n dur = int(durs[i]/resolution)\n start = int(starts[i]/resolution)\n\n if dur+start - index*maxSongLength < maxSongLength:\n for j in range(start,start+dur):\n if j - index*maxSongLength >= 0:\n matrix[pitch-lowerBoundNote,j - index*maxSongLength] = 255\n else:\n prev_index = i\n break\n\n # Remove empty and nearly empty images\n if (np.all(matrix == 0) or is_almost_empty(matrix)):\n index += 1\n repetitions+=1\n continue\n\n try:\n imwrite(\"imgOut/\" + midi_path.split(\"/\")[-1].replace(\".mid\",f\"_{instrument_name}_{index}.png\"),matrix)\n except Exception:\n f = open(\"out.log\", \"a\")\n f.write(midi_path.split(\"/\")[-1].replace(\".mid\",f\"_{instrument_name}_{index}.png\") + \"\\n\")\n f.close()\n\n index += 1\n repetitions+=1\n\ndef main_midi(midi_path, reps):\n import sys\n midi2image(midi_path, reps)\n\ndef is_almost_empty(matrix):\n count = 0\n maxCount = 0\n for i in range(0, matrix[0].size):\n if np.all(matrix[:,i] == 0):\n count = count + 1\n \n if maxCount < count:\n maxCount = count\n else:\n count = 0\n\n return maxCount > 18;\n\n# import sys\n# midi_path = sys.argv[1]\n# midi2image(midi_path)" ]
[ [ "numpy.all", "numpy.zeros" ] ]
omriarad/mcas
[ "f47aab12754c91ebd75b0e1881c8a7cc7aa81278" ]
[ "src/python/pymm/testing/tensors.py" ]
[ "#!/usr/bin/python3 -m unittest\n#\n# testing transient memory (needs modified Numpy)\n#\nimport unittest\nimport pymm\nimport numpy as np\nimport torch\n\ndef colored(r, g, b, text):\n return \"\\033[38;2;{};{};{}m{} \\033[38;2;255;255;255m\".format(r, g, b, text)\n\ndef log(*args):\n print(colored(0,255,255,*args))\n\nshelf = pymm.shelf('myShelf',size_mb=128,pmem_path='/mnt/pmem0',force_new=True)\n\nclass TestTensors(unittest.TestCase):\n\n def test_torch_tensor_shadow_list(self):\n log(\"Testing: tensor ctor\")\n shelf.x = pymm.torch_tensor([1,1,1,1,1])\n print(shelf.x)\n\n def test_torch_tensor_shadow_ndarray_A(self):\n log(\"Testing: tensor ctor\")\n shelf.y = pymm.torch_tensor(np.arange(0,10))\n shelf.y.fill(-1.2)\n print(shelf.y)\n\n def test_torch_tensor_shadow_ndarray_B(self):\n log(\"Testing: tensor ctor\")\n print(shelf.y)\n shelf.y = pymm.torch_tensor(np.arange(0,10))\n shelf.y.fill(-1.3)\n print(shelf.y)\n\n def test_torch_tensor_copy(self):\n log(\"Testing: tensor copy\")\n T = torch.tensor([[1,1,1,1,1],[2,2,2,2,2],[3,3,3,3,3]])\n U = torch.tensor([[1,1,1,1,1],[2,2,2,2,2],[3,3,3,3,3]])\n print(T.shape)\n shelf.x = T\n print(shelf.x)\n self.assertTrue(shelf.x.equal(T))\n Q = torch.tensor([[1,2,3],[4,5,6]])\n\n def test_torch_ones(self):\n log(\"Testing: torch ones\")\n shelf.x = torch.ones([3,5],dtype=torch.float64)\n print(shelf.x)\n shelf.x += 0.5\n print(shelf.x)\n\n def test_torch_leaf(self):\n log(\"Testing: torch tensor leaf\")\n shelf.x = torch.randn(1, 1)\n self.assertTrue(shelf.x.is_leaf)\n\n def test_torch_zerodim_shadow(self):\n log(\"Testing: zero dim shadow\")\n shelf.x = pymm.torch_tensor(1.0)\n self.assertTrue(shelf.x.dim() == 0)\n print(type(shelf.x))\n self.assertTrue(str(type(shelf.x)) == \"<class 'pymm.torch_tensor.shelved_torch_tensor'>\")\n\n def test_torch_zerodim(self):\n log(\"Testing: zero dim copy\")\n shelf.y = torch.tensor(2.0)\n self.assertTrue(shelf.y.dim() == 0)\n self.assertTrue(str(type(shelf.y)) == \"<class 'pymm.torch_tensor.shelved_torch_tensor'>\")\n\n # NOT SUPPORTED\n def NORUN_test_torch_require_grad(self):\n log(\"Testing: requires_grad= param\")\n shelf.x = torch.tensor(1.0, requires_grad = True)\n shelf.z = shelf.x ** 3\n shelf.z.backward() #Computes the gradient \n print(shelf.x.grad.data) #Prints '3' which is dz/dx \n \n def test_torch_tensor(self):\n log(\"Testing: torch_tensor\")\n n = torch.Tensor(np.arange(0,1000))\n shelf.t = torch.Tensor(np.arange(0,1000)) #pymm.torch_tensor(n)\n\n # shelf type S\n self.assertTrue(str(type(shelf.t)) == \"<class 'pymm.torch_tensor.shelved_torch_tensor'>\")\n\n log(\"Testing: torch_tensor sum={}\".format(sum(shelf.t))) \n self.assertTrue(shelf.t.sum() == 499500)\n\n slice_sum = sum(shelf.t[10:20])\n log(\"Testing: torch_tensor slice sum={}\".format(slice_sum))\n self.assertTrue(slice_sum == 145)\n\n # shelf type S after in-place operation\n self.assertTrue(str(type(shelf.t)) == \"<class 'pymm.torch_tensor.shelved_torch_tensor'>\")\n \n # shelf type S * NS (non-shelf type)\n self.assertTrue(str(type(shelf.t * n)) == \"<class 'torch.Tensor'>\")\n \n # shelf type NS * S\n self.assertTrue(str(type(n * shelf.t)) == \"<class 'torch.Tensor'>\")\n\n # shelf type S * shelf type S\n self.assertTrue(str(type(shelf.t * shelf.t)) == \"<class 'torch.Tensor'>\")\n \n shelf.t += 1\n shelf.t *= 2\n shelf.t -= 0.4\n shelf.t /= 2\n\n shelf.erase('t')\n\n def test_torch_reassign(self):\n log(\"Testing: torch_tensor reassign\")\n shelf.c = torch.tensor([[1, 2, 3], [4, 5, 6], [9,10,11]])\n print(shelf.c)\n shelf.c = shelf.c.clone().view(9,-1) \n print(shelf.c)\n with self.assertRaises(RuntimeError):\n shelf.c = shelf.c.view(9,-1)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.randn", "numpy.arange", "torch.ones", "torch.tensor" ] ]
cherepas/circles_public
[ "7c8a429cb196fcca9d77d7e992af7a4c70882521" ]
[ "plot_output/e074/drafts/e074w004/main.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\nfrom __future__ import division\nimport argparse\nimport open3d\nimport torch as t\n# here was hvd import\nimport torch.nn as nn\nimport numpy as np\nimport torchvision\nfrom torchvision import transforms\nfrom skimage import io, transform\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport sys\nfrom numpy import linalg as LA\nimport datetime\nimport torch.multiprocessing as mp\nimport shutil\nfrom inspect import currentframe, getframeinfo\nimport h5py\nfrom os.path import join as jn\nfrom seed_everything import seed_everything\nimport random\nfrom torch.optim.lr_scheduler import StepLR\nfrom helpers import *\nfrom switcher import *\nfrom preprocess import *\nfrom transformdef import *\n#from cnet import *\n# import csv\n# from functools import wraps\n# from pathlib import Path\n# import imageio\n# from scipy.special import sph_harm\n# from skimage.measure import regionprops\n# from skimage import filters\n# import matplotlib.pyplot as plt\n# import time\n# import os\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n def setarg(parser, argname, dfl):\n parser.add_argument('-'+argname, dest=argname,\n action='store_true')\n parser.add_argument('-no_'+argname, dest=argname,\n action='store_false')\n exec('parser.set_defaults('+argname+'=dfl)')\n parser.add_argument('-bs', type=int, default=4)\n parser.add_argument('-epoch', type=int, default=8)\n parser.add_argument('-lr', type=float, default=5e-5)\n # normalize output on the tmean matrix, to have min = 0 and max = 1\n setarg(parser, 'minmax',False)\n # normalize input point cloud to have every coordinate between 0 and 1\n setarg(parser, 'minmax3dimage',False)\n # normalize input point cloud, that it is in canonical view\n setarg(parser, 'normalize',False)\n # centerize input point cloud, to have it's center of masses in the origin\n setarg(parser, 'center',False)\n # linearly downsample input point cloud\n parser.add_argument('-downsample', type=int, default=1)\n # use f_n or f, that was gotten with normalization on canonical view before\n # processing\n setarg(parser, 'classicnorm',False)\n # cut the number of maximum SH amplitude to regress\n parser.add_argument('-ampl', type=int, default=441)\n # centerize seed on the input image and crop to this width\n parser.add_argument('-cmscrop', type=int, default=0)\n parser.add_argument('-cencrop', type=int, default=700)\n # rescale input image\n parser.add_argument('-rescale', type=int, default=500)\n setarg(parser, 'use_adasum',False)\n parser.add_argument(\n '-gradient_predivide_factor', type=float, default=1.0,\n help='apply gradient predivide factor in optimizer (default: 1.0)')\n # name of experiment directory\n parser.add_argument('-expnum', type=str, default='111')\n # hidden_dim - size of appendix FC layers\n parser.add_argument(\n '-hidden_dim', nargs='+', type=int, default=[5000,2500,1000,441])\n parser.add_argument(\n '-chidden_dim', nargs='+', type=int, default=[96, 128, 256, 256, 256])\n parser.add_argument('-kernel_sizes', nargs='+', default=[7, 3, 3, 3, 3, 3])\n # number of input images that will be loaded\n parser.add_argument('-num_input_images', type=int, default=1)\n # name of standard model\n parser.add_argument('-model_name', type=str, default='')\n parser.add_argument('-netname', nargs='+', default=['cnet'])\n setarg(parser, 'use_pretrained',False)\n parser.add_argument('-weight_decay', type=float, default=0)\n # used to load images all in parallel, or merge them after output\n # \"separate\" merging order means to get from Dataloader tensor like as for\n # color channel, that [15, 3, 1000, 1800], but then reshape this tensor to\n # the [45, 1, 1000, 1800] and work with it like with separate data points\n parser.add_argument('-merging', type=str,\n choices=['color', 'latent', 'batch'], default='batch')\n # take input image of random angle, if not, then image will\n # be taken relative to the horizontal pose\n setarg(parser, 'rand_angle',False)\n # number of experiment from phenoseeder\n parser.add_argument('-specie', type=str, default='598')\n # number of sampled directions to make subsampling after f_n\n parser.add_argument('-num_sam_points', type=int, default=500)\n # loss calculating between 'pc','f' or 'f_n'\n parser.add_argument('-lb', type=str, default='f')\n # short description what exactly this job is up for\n parser.add_argument('-expdescr', type=str, default='')\n # use csv file with pathes to all input files together with\n # horizontal image index\n setarg(parser, 'use_existing_csv',True)\n setarg(parser, 'use_sep_csv',True)\n # instead of input files noise is generating with random numbers\n setarg(parser, 'noise_input',False)\n # use convolutional part of the network or not\n setarg(parser, 'haf',True)\n # type of input data. can be 'img', 'f' or 'pc'\n parser.add_argument('-inputt', type=str, default='img')\n # normalize to make min = 0 and max = 1 for input f\n setarg(parser, 'minmax_f',True)\n # criterion to calculate loss\n parser.add_argument('-criterion', type=str, default='L1')\n # number of GPUs is used in the job\n parser.add_argument('-ngpu', type=int, default=4)\n # type of parallelization. 'hvd' means horovod, or 't'\n parser.add_argument('-parallel', type=str, choices=['horovod', 'torch'],\n default='hvd')\n # in case loading standard model, it can be use as feature extracting\n # (when freezeing all layers except the last one)\n setarg(parser, 'feature_extract',False)\n # if load only one image as input, this will be always image with index\n # 000_rotation\n # if load more than 1 image, then number of images will be spread evenly in\n # the range (0,36)\n # if false, images will be taking that first image in views will be with\n # horizontal pose\n setarg(parser, 'zero_angle',True)\n # is used for testing computing time,\n # where all needed files including data in one folder\n parser.add_argument('-single_folder',\n dest='single_folder', action='store_true')\n parser.set_defaults(single_folder=False)\n parser.add_argument('-noise_output', dest='noise_output',\n action='store_true')\n parser.set_defaults(noise_output=False)\n # only log will be in the output\n setarg(parser, 'save_output',True)\n # type of data that is loaded for gt. for example, single_f_n\n # means that only *f_n files will be used for GT in dataloader\n # and maybe it will be singular loading of y_n\n # it is used separate transform_f_n.py to not load more than is\n # needed\n # In case if gt is loaded not from dataloader, but from csv or from h5 file,\n # there is option \"single_file\"\n parser.add_argument('-gttype', type=str,\n choices=['single_file'],\n default='single_file')\n # name of csv that will be used for loading GT\n # it can be 598csv9 for original pose and 598csv11 for normalized pose\n parser.add_argument('-csvname', type=str, default='598csv9')\n # name of the csv which will be used for loading data\n # choices are : 598frame for full or 598frame_dummy\n parser.add_argument('-dfname', type=str, default='598frame')\n # factor on which all output point cloud data will be normalized\n parser.add_argument('-pscale', type=int, default=100)\n # if view_sep = True, and more than one image is loaded,\n # all input images will be treated as separate data elements\n # new dataframe will be created\n setarg(parser, 'view_sep',False)\n # rotate directions together with angle from which\n # current image were taken\n setarg(parser, 'rot_dirs',False)\n # for dataloader\n parser.add_argument('-num_workers', type=int, default=0)\n setarg(parser, 'pin_memory',False)\n # manually calculate distance vector F out of point cloud output\n setarg(parser, 'man_dist',False)\n setarg(parser, 'use_cuda',True)\n parser.add_argument('-machine', type=str,\n choices=['jureca', 'workstation', 'lenovo', 'huawei'],\n default='jureca')\n setarg(parser, 'maintain',False)\n setarg(parser, 'maintain_line',False)\n parser.add_argument('-wandb', type=str, default=\"\")\n setarg(parser, 'measure_time',False)\n setarg(parser, 'rotate_output',False)\n parser.add_argument('-transappendix', type=str, default=\"_image\")\n # how often to save batch output intermediate in epoch\n parser.add_argument('-batch_output', type=int, default=2)\n # minmax fun for current ground truth preparation before training\n parser.add_argument('-minmax_fn', type=str,\n choices=['min,max','mean,std', ''], default='')\n parser.add_argument('-updateFraction', type=float, default=3)\n parser.add_argument('-standardize', nargs='+', default=255)\n # parser.add_argument('-standardize', default=(18.31589541, 39.63290785))\n # if rmdirname is True, delete dirname content and use this directory again\n # for saving output\n setarg(parser, 'rmdirname', False)\n parser.add_argument('-steplr', nargs='+', type=float, default=(30,1))\n parser.add_argument('-outputt', type=str,\n choices=['points','pose6', 'eul', 'orient'],\n default='points')\n parser.add_argument('-ufmodel', type=int, default=100000)\n parser.add_argument('-framelim', type=int, default=int(1e20))\n parser.add_argument('-conTrain', type=str, default='')\n # how often to print loss in the log output\n parser.add_argument('-print_minibatch', type=int, default=10)\n # for orientation there are two right GT, because it is a ray. That is why\n # augementation of ground truth is needed for evaluation\n parser.add_argument('-aug_gt', nargs='+', type=str, default=(''))\n parser.add_argument('-datapath', type=str,\n default='C:/cherepashkin1/phenoseed')\n # job name is used to create corresponding subdirectory\n parser.add_argument('-jobname', type=str, default='')\n # real job of the executed sh file. it is needed to copy sh file to the new\n # directory\n parser.add_argument('-realjobname', type=str, default='')\n parser.add_argument('-jobdir', type=str, default='')\n setarg(parser, 'loadh5', False)\n opt = parser.parse_args()\n # print(215, opt.jobname)\n # sys.exit()\n if opt.parallel == 'horovod':\n import horovod.torch as hvd\n hvd.init()\n t.cuda.set_device(hvd.local_rank())\n t.set_num_threads(1)\n rank = hvd.rank()\n else:\n rank = 0\n homepath = __file__.replace(__file__.split('/')[-1], '')\n dir1 = jn(__file__.replace(__file__.split('/')[-1], ''), 'plot_output',\n opt.expnum)\n dirname = jn(dir1, opt.jobname.replace('.sh', ''))\n\n if opt.save_output and rank == 0 and os.path.isdir(dirname) and opt.rmdirname:\n saferm(dirname)\n os.rmdir(dirname)\n if opt.save_output and rank == 0 and not os.path.isdir(dirname):\n Path(dirname).mkdir(parents=True, exist_ok=True)\n elif opt.save_output and rank==0 and os.path.isdir(dirname) and not opt.rmdirname:\n print('folder is not empty')\n sys.exit()\n # savedirname(opt,rank,dirname)\n if opt.save_output and rank == 0:\n shutil.copy(jn(opt.jobdir, opt.realjobname), jn(dirname, opt.realjobname))\n # dirname = jn(dir1, opt.localexp)\n # if opt.save_output and rank == 0 and not opt.localexp:\n # with open(jn(dir1,'counter.txt'), 'r') as f:\n # secnt = int(f.readlines()[0])\n # secnt += 1\n # dirname = None\n # while dirname is None:\n # try:\n # dirname = jn(dir1,str(secnt).zfill(3))+opt.machine[0]\n # Path(dirname).mkdir(parents=True, exist_ok=True)\n # except:\n # time.sleep(0.1)\n # pass\n # with open(jn(dir1, 'counter.txt'), 'w') as f:\n # f.write(str(secnt))\n # elif all([opt.localexp, opt.save_output, rank == 0,\n # os.path.isdir(dirname)]):\n # for filename in os.listdir(dirname):\n # file_path = jn(dirname, filename)\n # try:\n # if os.path.isfile(file_path) or os.path.islink(file_path):\n # os.unlink(file_path)\n # elif os.path.isdir(file_path):\n # shutil.rmtree(file_path)\n # except Exception as e:\n # print('Failed to delete %s. Reason: %s' % (file_path, e))\n # elif all([opt.localexp, opt.save_output, rank == 0,\n # not os.path.isdir(dirname)]):\n # Path(dirname).mkdir(parents=True, exist_ok=True)\n # elif rank == 0:\n # dirname = jn(dir1, 'misc')\n\n conTrain = opt.conTrain\n if opt.conTrain and rank==0:\n print(jn(dir1,conTrain,'opt.csv'))\n opt = csv2dic(jn(dir1,conTrain,'opt.csv'))\n if rank == 0:\n dic2csv(jn(dirname,'opt.csv'), opt.__dict__)\n nim = opt.num_input_images\n enim = nim if not opt.view_sep else 1\n nsp = opt.num_sam_points\n tstart = time.time()\n iscuda = opt.use_cuda and t.cuda.is_available()\n if rank == 0:\n print('iscuda=',iscuda)\n device = t.device(\"cuda:0\" if t.cuda.is_available() else \"cpu\")\n seed = 0\n if iscuda:\n t.cuda.manual_seed_all(seed)\n t.backends.cudnn.deterministic = True\n t.backends.cudnn.benchmark = False\n seed_everything(seed)\n os.environ[\"CUBLAS_WORKSPACE_CONFIG\"] = \":16:8\"\n\n def worker_init_fn(worker_id):\n worker_seed = t.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n g = t.Generator()\n g.manual_seed(seed)\n\n def generator(seed):\n g = t.Generator()\n g.manual_seed(seed)\n return g\n vox2mm = 0.05\n mt = opt.maintain and rank == 0\n mt1 = opt.maintain_line and rank == 0\n if rank == 0:\n print(\"PyTorch Version: \", t.__version__)\n print(\"Torchvision Version: \", torchvision.__version__)\n print('opt:\\n',opt)\n print('sys.argv:\\n',sys.argv)\n print('seed = ', seed)\n classicnorm = '_prenormalized' if opt.classicnorm else ''\n if '619' in opt.specie:\n original_h, original_w = 2048, 2448\n else:\n original_h, original_w = 1000, 1800\n dataPath = opt.datapath\n csvPath = jn(homepath, 'csv')\n if rank == 0:\n print('path were main.py is located=',homepath)\n if opt.single_folder:\n exec('from'+opt.netname+'import *')\n else:\n for name in opt.netname:\n exec('from experiments.'+opt.expnum+'.'+name+' import *')\n\n\n\n def my_loss(output, target):\n myloss = t.mean(t.multiply(weightv, (output - target))**2)\n return myloss\n # if mt1:\n # frameinfo = getframeinfo(currentframe())\n # print(frameinfo.filename, frameinfo.lineno)\n if opt.save_output and rank == 0 and not os.path.exists(dir1):\n os.mkdir(dir1)\n # print(333, dir1, homepath, opt.jobdir)\n sys.path.insert(1, homepath)\n sys.path.insert(2, dir1)\n sys.path.insert(3, opt.jobdir)\n # from cnet import *\n if opt.model_name:\n from standard_models import *\n print('opt.wandb = ',opt.wandb)\n if opt.wandb:\n import wandb\n if opt.wandb != 'disabled' and opt.wandb and opt.machine != 'jureca':\n wandb.init(project=opt.wandb, config=vars(opt))\n elif opt.wandb != 'disabled' and opt.wandb and opt.machine == 'jureca':\n wandb.init(mode=\"offline\", config=vars(opt))\n elif opt.wandb == 'disabled':\n wandb.init(mode=\"disabled\")\n\n lb = opt.lb\n epoch0 = -1\n\n #@simple_time_tracker(_log)\n class Prepare_train(object):\n def __init__(self, **argd):\n self.__dict__.update(argd)\n\n def trainit(lframe, train_part, bs, nsp, classicnorm, csvname, ampl,\n rank, homepath, rot_dirs, merging_order, iscuda):\n\n # print(time.ctime()) # 'Mon Oct 18 13:35:29 2010'\n lbs = []\n ebn = []\n batchsum = []\n trFrac = [0.8, 1-0.8]\n lfl = [train_part, len(lframe)-train_part]\n for pcnt, phase in enumerate(['train', 'val']):\n lbs.append(lfl[pcnt]%bs if lfl[pcnt]%bs else bs)\n ebn.append(int(len(lframe)*trFrac[pcnt]/bs))\n if rank == 0:\n print('{} consists of {} full batches '\n 'with {} tensors with {}' \\\n ' views'.format(phase, ebn[pcnt], opt.bs, nim))\n if lfl[pcnt]%opt.bs and rank==0:\n print('the last batch has size of {}' \\\n ' tensors with {} views'.format(lbs[pcnt], nim))\n batchsum.append(ebn[pcnt] + lbs[pcnt])\n # TODO change name F_Nw on GTw, as to have single variable name for all\n if opt.loadh5:\n y_n, bX, F_Nw, prmatw = \\\n [np.array(h5py.File(jn(csvPath,nm), 'r').get('dataset')) for nm in\n ['y_n_fibo_' + str(nsp) + classicnorm+'_whole.h5',\n 'bX_fibo_' + str(nsp) + classicnorm+'_whole.h5',\n csvname+'_F_N.h5',\n opt.specie+'prmat.h5']]\n bigm = np.copy(prmatw)\n bigm[:, :, 3, :] = np.repeat(np.repeat(np.expand_dims([0, 0, 0, 1],\n axis=(0, 1)), 36, axis=1), prmatw.shape[0], axis=0)\n matw = np.einsum('ij, njk->nik', np.linalg.inv(bigm[0, 0, :, :]),\n bigm[:, 0, :, :])\n F_Nw = F_Nw[:, :ampl]\n tmean = np.zeros([2, F_Nw.shape[0], ampl])\n for x in ['train', 'val']:\n df_dict[x].to_csv(jn(dirname, 'pathes_df_' + x + '.csv'),\n index=False)\n if opt.minmax_fn == 'min,max':\n tmean[0] = np.repeat(np.expand_dims(np.nanmin(F_Nw, 0),\n axis=0), F_Nw.shape[0],\n axis=0)\n tmean[1] = np.repeat(np.expand_dims(np.nanmax(F_Nw, 0),\n axis=0), F_Nw.shape[0],\n axis=0)\n F_Nw = (F_Nw - tmean[0]) / (tmean[1] - tmean[0])\n elif opt.minmax_fn == 'mean,std':\n tmean[0] = np.repeat(np.expand_dims(np.nanmean(F_Nw, 0),\n axis=0), F_Nw.shape[0],\n axis=0)\n tmean[1] = np.repeat(np.expand_dims(np.nanstd(F_Nw, 0),\n axis=0), F_Nw.shape[0],\n axis=0)\n F_Nw = (F_Nw - tmean[0]) / tmean[1]\n bX = t.transpose(t.Tensor(bX), 0, 1)\n y_n, F_Nw, prmatw, bigm, matw = [t.Tensor(i) for i in \\\n [y_n, F_Nw, prmatw, bigm, matw]]\n else:\n y_n, bX, F_Nw, prmatw, bigm, matw = (t.zeros(1),)*6\n prmat = np.genfromtxt(jn(\n homepath, 'csv', 'prmat.csv').replace('\\\\', '/'),\n delimiter=',')\n C = np.zeros([36,3,3])\n E = [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0]]\n # TODO exchange loop with vectorization with einsum\n for i in range(36):\n C[i,:,:] = \\\n np.matmul(np.matmul(E,prmat[4*i:4*(i+1),:]),\n np.linalg.pinv(np.matmul(E,prmat[0:4,:])))\n # TODO rewrite this part in order to reuse the same lines of code for\n # different options\n if opt.outputt == 'eul' \\\n and opt.merging == 'color':\n GTw = lframewh.loc[:,['eul' + str(i) for i in range(3)]].values\n elif opt.outputt == 'eul' and opt.merging == 'batch':\n orients = lframewh.loc[:,['orient' + str(i)\\\n for i in range(9)]].values\n GTw0 = np.einsum('ijk,nkm->nijm', LA.inv(C),\n orients.reshape([-1,3,3]))\n GTw = np.zeros([GTw0.shape[0],GTw0.shape[1],3])\n # print(610, GTw.shape)\n for i in range(GTw0.shape[0]):\n for j in range(GTw0.shape[1]):\n GTw[i,j,:] = rot2eul(GTw0[i,j])\n elif opt.outputt == 'orient' and opt.merging == 'color':\n GTw = lframewh.loc[:,['orient' + str(i) for i in range(9)]].values\n elif opt.outputt == 'orient' and opt.merging == 'batch':\n orients = lframewh.loc[:, ['orient' +\\\n str(i) for i in range(9)]].values\n GTw0 = np.einsum('ijk,nkm->nijm', LA.inv(C),\n orients.reshape([-1, 3, 3]))\n GTw = GTw0.reshape([-1,36,9])\n # TODO normalize GTws for merging order separate\n if opt.outputt == 'eul' and \\\n opt.merging == 'batch' and opt.minmax_fn:\n gstat = np.array([np.min(GTw,axis=(0,1)), np.max(GTw,axis=(0,1)),\n np.mean(GTw,axis=(0,1)), np.std(GTw,axis=(0,1))])\n if opt.outputt == 'eul' and \\\n opt.merging == 'color' and opt.minmax_fn:\n gstat = np.array(getstat(GTw))\n if opt.outputt == 'eul' and opt.minmax_fn == 'min,max':\n GTw = (GTw-gstat[0])/(gstat[1]-gstat[0])\n elif opt.outputt == 'eul' and opt.minmax_fn == 'mean,std':\n GTw = (GTw-gstat[2])/gstat[3]\n C, GTw = [t.Tensor(i) for i in [C, GTw]]\n if iscuda:\n bX, C, y_n, F_Nw, prmatw, bigm, matw, GTw =\\\n [i.cuda() for i in \\\n [bX, C, y_n, F_Nw, prmatw, bigm, matw, GTw]]\n if lb in ('eul', 'orient'):\n y_n2, y_n, dirs = (None,)*3\n if lb in ('f', 'pc', 'pc+f') and not rot_dirs:\n y_n2_one = t.unsqueeze(y_n[:nsp,:], axis=0)\n y_n2 = y_n2_one.repeat(opt.bs*enim, 1, 1)\n dirs = bX[:, :2].repeat(opt.bs*enim, 1, 1)\n if all([not lb in ('eul', 'orient'), not rot_dirs,\n merging_order == 'color']):\n y_n2_one = t.unsqueeze(y_n[:nsp,:], axis=0)\n y_n2 = y_n2_one.expand(bs, nsp, ampl)\n dirs = bX[:, :2].expand(bs, nsp, 2)\n for dirc in ['netOutputs', 'latent', 'loss_out']:\n Path(jn(dirname, dirc)).mkdir(parents=True, exist_ok=True)\n lossoutdir = jn(dirname, 'loss_out')\n return y_n, bX, F_Nw, bX, C, y_n2, dirs, \\\n prmatw, bigm, matw, batchsum, lossoutdir, GTw\n\n def train_model(model, optimizer):\n since = time.time()\n lossar = np.zeros([4, opt.epoch])\n # lossarb = np.zeros([2, opt.epoch])\n curloss = np.zeros([2, opt.epoch])\n # lt = np.zeros([4, (len(lframe)*trFrac[0]//bs+1)])\n cnt = 0\n abs_batch_cnt = [0, 0]\n y_n, bX, F_Nw, bX, C, y_n2, dirs, prmatw, \\\n bigm, matw, batchsum, lossoutdir, GTw = \\\n trainit(lframe, train_part, opt.bs,\n nsp, classicnorm, opt.csvname, opt.ampl,\n rank, homepath, opt.rot_dirs,\n opt.merging, iscuda)\n lossmb = [np.zeros([opt.epoch*batchsum[0]]),\n np.zeros([opt.epoch*batchsum[1]])]\n for epoch in range(opt.epoch):\n if opt.parallel == 'horovod':\n samplers = {x: t.utils.data.distributed.DistributedSampler(\n image_datasets[x], num_replicas=hvd.size(),\n rank=hvd.rank(), shuffle=False) for x in ['train', 'val']}\n # Create training and validation dataloaders\n dataloaders = {x: t.utils.data.DataLoader(\n image_datasets[x],\n batch_size=opt.bs, shuffle=False, sampler=samplers[x],\n worker_init_fn=None,\n generator=g, **kwargs) for x in ['train', 'val']}\n elif opt.parallel == 'torch':\n samplers = None\n dataloaders = {'train': t.utils.data.DataLoader(\n image_datasets['train'],\n batch_size=opt.bs, shuffle=True,\n num_workers=opt.num_workers),\n 'val': t.utils.data.DataLoader(\n image_datasets['val'],\n batch_size=opt.bs, shuffle=False,\n num_workers=opt.num_workers)\n }\n if mt:\n print('start training', time.time())\n if rank == 0:\n print('Epoch {}/{}'.format(epoch, opt.epoch - 1))\n print('-' * 10)\n print(time.ctime())\n ste = time.time()\n # Each epoch has a training and validation phase\n for pcnt, phase in enumerate(['train', 'val']):\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n rloss, rloss0, rloss1 = [0.0]*3\n if opt.parallel == 'horovod':\n train_sampler = samplers[phase]\n train_sampler.set_epoch(epoch)\n # Iterate over data.\n for i_batch, sample_batched in enumerate(dataloaders[phase]):\n # print(idx)\n bst = time.time()\n if mt:\n print('start %d batch loading at %f' %(i_batch,\n time.time()))\n if opt.measure_time:\n ts = time.time()\n inputs = sample_batched[0]['image']\n fl2int = lambda x : [int(x[i]) for i in range(len(x))]\n # absolute index\n index = fl2int(sample_batched[2].tolist())\n angles_list = sample_batched[1].tolist()\n pathes = lframe.loc[index, 'file_name'].tolist()\n sz = inputs.shape\n # cnum = sz[1]\n if all([lb in ('pc', 'f', 'eul', 'orient'),\n opt.gttype == 'single_file',\n opt.merging != 'color']):\n # TODO consolidate three senteces into one\n # TODO save index, angles list to one array, not two\n # separate lists\n pathes = [j for j in pathes for i in range(enim)]\n index = [j for j in index for i in range(enim)]\n angles_list = [item for sublist in angles_list\\\n for item in sublist]\n # TODO use unified angles everywhere,\n # in degrees or in //10\n angles_list = [j//10 for j in angles_list]\n if all([lb in ('pc', 'f'), opt.gttype == 'single_file']):\n f_n = F_Nw[index,:]\n if all([lb in ('eul', 'orient'), opt.gttype == 'single_file',\n opt.merging == 'batch']):\n GT = GTw[index, angles_list,:]\n if opt.outputt in ('eul', 'orient') and lb in\\\n ('eul', 'orient') and \\\n opt.merging == 'color':\n GT = GTw[index]\n if (opt.inputt in ('img', 'pc', 'eul', 'orient')) and \\\n opt.merging == 'batch':\n inputs = t.Tensor(inputs).cuda() if iscuda\\\n else t.Tensor(inputs)\n inputs = t.unsqueeze(t.reshape(inputs,\n (sz[0]*sz[1],\n sz[2], sz[3])),\n axis = 1)\n elif opt.inputt in ('img', 'pc') and \\\n opt.merging == 'color':\n inputs = inputs.cuda() if iscuda else inputs\n cbs = inputs.shape[0]\n # zero the parameter gradients\n optimizer.zero_grad()\n if opt.measure_time:\n tf = time.time()\n if opt.wandb and opt.measure_time:\n wandb.log({'loading time '+phase: tf-ts})\n if phase == 'train' and opt.measure_time:\n lt[0, i_batch] = tf-ts\n # forward\n # track history if only in train\n if lb != 'f_n' and not opt.rand_angle and \\\n opt.rot_dirs:\n y_n2 = t.zeros(cbs,nsp,opt.ampl)\n y_n2 = y_n2.cuda() if iscuda else y_n2\n for i, angle in enumerate(angles_list):\n y_n2[i] = \\\n y_n[int(angle/10)*nsp:(int(angle/10) +\n 1)*nsp,:]\n if lb != 'f_n' and opt.rot_dirs:\n dirs = t.zeros(cbs, nsp, 2)\n dirs = dirs.cuda() if iscuda else dirs\n for i, angle in enumerate(angles_list):\n dirs[i] = \\\n bX[:, int(angle/10)*2:\n (int(angle/10) +\n 1)*2]\n if lb in ('pc', 'pc+f'):\n # making pc GT\n GT = fn2p(y_n2[:cbs], f_n, dirs[:cbs], nsp,\n vox2mm, iscuda)\n # TODO check whether it makes sense to shift\n # cms and then subtract it again, if matrix only\n # translate\n GT = t.cat((GT, t.ones(nsp).repeat(GT.shape[0],\n 1, 1).cuda()),\n axis=1)\n GT = t.einsum('nji,nkj->nik', GT, matw[index, :, :])\n GT = (GT / t.unsqueeze(GT[:, :, 3], axis=2))[:, :, :3]\n GT = t.transpose(GT, 1, 2)\n if lb == 'pc' and opt.merging != 'color':\n for i in range(cbs):\n p[i, :, :] = t.matmul(\n t.transpose(t.squeeze(\n C[int(angles_list[i]/10), :, :]), 0, 1),\n p[i, :, :])\n if lb == 'f':\n GT = t.einsum('bh,bph->bp', f_n, y_n2[:cbs])\n with t.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n if opt.measure_time:\n ts = time.time()\n loss, outputs, outputs_2, latent = out2loss(opt, model,\n inputs, iscuda, nsp, cbs, y_n2, C, angles_list, lb,\n vox2mm, GT, loss_fn, moments, phase, dirname,\n i_batch, epoch)\n if phase == 'train' and i_batch% \\\n (int(1/opt.updateFraction))==0:\n if opt.measure_time:\n ts = time.time()\n loss.backward()\n optimizer.step()\n if opt.measure_time:\n tf = time.time()\n lt[3, i_batch] = tf-ts\n # np.savetxt(jn(dirname,'outputs_2_'+phase+'_'+\\\n # str(epoch).zfill(3)+'_'+\\\n # str(i_batch).zfill(3)),\n # outputs_2[0].cpu().detach().numpy(),delimiter=',')\n if lb == 'pc+f':\n rloss0 += loss0.item()\n rloss1 += loss1.item()\n else:\n rloss += loss.item()\n cond = all([rank == 0, opt.save_output,\n i_batch % opt.batch_output == 0, i_batch > 0,\n lb == 'pc' or lb == 'f', epoch > epoch0])\n if rank == 0 and i_batch % opt.print_minibatch == 0:\n print('batch {}, {} loss = {:.2f}, '\n 'mean loss = {:.2f}'.format(i_batch, phase,\n loss.item(),\n rloss/(i_batch+1)))\n lossmb[pcnt][abs_batch_cnt[pcnt]] = loss.item()\n abs_batch_cnt[pcnt]+=1\n print(time.ctime())\n\n if cond and not opt.minmax:\n gtb = GT.detach().cpu().numpy()\n gtb0 = gtb[0]\n if cond and not opt.minmax and opt.outputt != 'f_n':\n ob = outputs_2.detach().cpu().numpy()*opt.pscale*vox2mm\n elif cond and not opt.minmax and opt.outputt == 'f_n':\n ob = fn2p(y_n2[:cbs], outputs, dirs[:cbs],\n nsp, vox2mm, iscuda)\n if cond and epoch == epoch0+1:\n Path(jn(dirname,'perbatch_showpoint')). \\\n mkdir(parents=True, exist_ok=True)\n np.savetxt(jn(dirname,'perbatch_showpoint', \\\n 'gtb_'+phase+ \\\n '_' + str(i_batch).zfill(3)),\n gtb.reshape(cbs, -1))\n if cond and opt.lb != 'f':\n oob = ob[0]\n oob = oob.reshape((3, nsp))\n if cond and opt.lb == 'f' and opt.outputt == 'f_n':\n oob = ob[0]\n showmanypoints(cbs,nim,ob,gtb,pathes,angles_list,phase,\n i_batch,cnt,jn(dirname,'showPoints',\n 'perbatch_showpoint'),\n opt.merging, vox2mm)\n np.savetxt(jn(dirname,'perbatch_showpoint', \\\n 'o_'+phase+'_' + str(cnt).zfill(3) + \\\n '_' + str(i_batch).zfill(3)),\n ob.reshape(cbs, -1))\n print('curloss for %s phase for %d epoch for %d '\n 'batch = %f' \\\n %(phase,epoch,i_batch,np.mean(\n np.abs(LA.norm(oob,axis=0)-LA.norm(gtb0,axis=0)))))\n print(time.ctime())\n log1 = all([rank == 0, epoch > epoch0, opt.save_output,\n lb != 'f_n'])\n if epoch == epoch0 + 1 and opt.save_output and rank == 0 and \\\n phase == 'train':\n original_stdout = sys.stdout\n with open(jn(dirname, \"opt.txt\"), 'a') as f:\n sys.stdout = f # Change the standard output to\n # the file we created.\n print(opt)\n with open(jn(dirname, \"sys_argv.txt\"), 'a') as f:\n sys.stdout = f\n print(sys.argv)\n sys.stdout = original_stdout\n if opt.wandb and opt.measure_time:\n wandb.log({'backward time '+phase: tf-ts})\n if lb == 'pc+f':\n lossar[pcnt][epoch] = rloss0/(i_batch+1)\n lossar[pcnt+2][epoch] = rloss1/(i_batch+1)\n else:\n lossar[pcnt][epoch] = rloss/(i_batch+1)\n if opt.wandb:\n wandb.log({phase+' loss': lossar[pcnt][epoch]})\n st1 = time.time()\n if mt:\n print('save preliminary output', time.time())\n #TODO insert plotitout\n # plotitout(lossar)\n\n # if opt.merging == 'color':\n # i0 = np.squeeze(inputs[0,0].detach().cpu().numpy())\n # else:\n # i0 = np.squeeze(inputs[0].detach().cpu().numpy())\n # if all([log1, lb == 'eul', epoch == epoch0+1]):\n # Path(jn(dirname,'netOutputs')).mkdir(parents=True, exist_ok=True)\n # # np.savetxt(jn(dirname,'netOutputs','gt_'+phase),\n # # np.reshape(gt[pcnt],(cbs,-1)), delimiter=',')\n # np.savetxt(jn(dirname,'input_image_'+phase),i0,delimiter=',')\n if all([rank == 0, epoch == epoch0+1, opt.save_output,\n phase == 'val']):\n for n in opt.netname:\n # shutil.copy(jn(homepath, 'experiments',\n # opt.expnum, n+'.py'),\n # jn(dirname,n+'.py'))\n shutil.copy(n+'.py', jn(dirname,n+'.py'))\n shutil.copy(jn(homepath, 'main.py'),\n jn(dirname, 'main.py'))\n shutil.copy(jn(homepath, \"transform\"+\n opt.transappendix+\".py\"),\n jn(dirname, \"transform\"+\n opt.transappendix+\".py\"))\n if log1 and phase == 'val':\n lossout('Average_loss_', 'Epoch', lossar, epoch,\n lossoutdir , lb)\n lossout('!Single_seed_loss_', 'Epoch', curloss, epoch,\n lossoutdir, lb)\n # lossmb_eff = [np.array([lossmb[i], np.zeros(lossmb[i].shape)])\\\n # for i in range(2)]\n np.savetxt(jn(lossoutdir,'lossar'), lossar, delimiter=',')\n np.savetxt(jn(lossoutdir, 'curloss'), curloss, delimiter=',')\n np.savetxt(jn(lossoutdir, 'lossmb_train'), lossmb[0], delimiter=',')\n np.savetxt(jn(lossoutdir, 'lossmb_val'), lossmb[1], delimiter=',')\n lossmb_eff = [np.array([lossmb[0],\n np.zeros(lossmb[0].shape)]),\n np.array([np.zeros(lossmb[1].shape),\n lossmb[1]])]\n for pcnt_c, phase_c in enumerate(['train', 'val']):\n lossout('Average_loss_minibatch_'+phase_c+'_',\n 'Iteration',\n lossmb_eff[pcnt_c],\n abs_batch_cnt[pcnt_c], lossoutdir, lb)\n if all([rank == 0, epoch > 0, opt.save_output,\n lb in ('eul', 'orient')]):\n t.save(model.state_dict(), jn(dirname,\"model_state\"))\n if all([isinstance(latent, t.Tensor), rank == 0, epoch > epoch0, opt.save_output,\n lb in ('eul', 'orient')]):\n print(765, latent)\n np.savetxt(jn(dirname,'latent','latent_'+phase+'_' + \\\n str(cnt).zfill(3)),\n latent.detach().cpu().numpy(), delimiter=',')\n if rank == 0:\n print('{} Loss: {:.2f}'.format(phase, lossar[pcnt][epoch]))\n print()\n cnt += 1\n scheduler.step()\n if rank == 0:\n print('epoch %d was done for %f seconds' %(epoch,\n time.time()-ste))\n time_elapsed = time.time() - since\n if rank == 0:\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n return model, lossar, time_elapsed\n\n #@simple_time_tracker(_log)\n class Seed3D_Dataset(Dataset):\n \"\"\"seed point cloud dataset.\"\"\"\n\n def __init__(self, path, lframe, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform\n to be applied\n on a sample.\n \"\"\"\n self.df = lframe\n self.root_dir = path\n self.transform = transform\n\n def __len__(self):\n return len(self.df)\n\n # @simple_time_tracker(_log)\n def __getitem__(self, idx):\n fln = self.df.iloc[idx]['file_name']\n # ts = time.time()\n if t.is_tensor(idx):\n pass\n img = 0\n if opt.inputt == 'pc':\n img_name = jn(self.root_dir, self.root_dir.replace(\n 'phenoseed_csv', 'phenoseed'),\n fln.replace('csv', '')+\n '_Surface.ply').replace('\\\\', '/')\n pcd = np.asarray(open3d.io.read_point_cloud(img_name).points)\n img = np.concatenate(\n (img, np.zeros([58014 - img.shape[0], 3])), axis=0)\n elif opt.inputt == 'img':\n t.manual_seed(idx)\n if opt.inputt == 'img' and opt.noise_input:\n img = t.rand(enim, original_h, original_w)\n angles_list = np.array([0])\n if inn and not(opt.rand_angle or opt.view_sep) and opt.zero_angle:\n angles_list = np.array(alf)\n elif inn and not(opt.rand_angle or opt.zero_angle or opt.view_sep):\n angles_list = np.array([10*self.df.loc[idx,'zero_angle']])\n angles_list = angles_list.astype(int)\n if inn:\n # st = time.time()\n img_name = []\n img = np.zeros([enim, original_h,\n original_w]).astype(np.single)\n for i in range(enim):\n img_name = jn(\n self.root_dir.replace('phenoseed_csv', 'phenoseed'),\n fln.replace(opt.csvname, opt.specie),\n 'rotation_' + str(angles_list[i]).zfill(3) +\n '.tif').replace('\\\\', '/')\n curim = np.asarray(io.imread(img_name), dtype=np.single)\n h1, w1 = curim.shape\n if (h1, w1) == (original_h, original_w):\n img[i] = curim\n else:\n h2, w2 = original_h, original_w\n th, tw = int((h2-h1) / 2), int((w2-w1) / 2)\n img[i] = np.pad(curim, ((th, th), (tw, tw)))\n if not opt.noise_output and not opt.gttype == 'single_file':\n fara = np.genfromtxt(jn(self.root_dir, (\n fln +\n '_Far_' + str(nsp) + classicnorm +\n '.csv')).replace('\\\\', '/'), delimiter=',')\n f_na = np.genfromtxt(jn(self.root_dir, (\n fln +\n '_F_N_' + str(nsp) + classicnorm +\n '.csv')).replace('\\\\', '/'), delimiter=',')\n elif opt.noise_output:\n far = t.rand(nsp)\n f_n = t.rand(opt.ampl)\n if not opt.noise_output and not opt.rand_angle and \\\n enim == 1 and not opt.gttype == 'single_file':\n far = fara[int(angles_list[0]/10)]\n f_n = f_na[int(angles_list[0]/10)]\n if opt.gttype == 'single_file':\n sample = {'image': img}\n if self.transform:\n st = time.time()\n sample = self.transform(sample)\n return sample, angles_list, self.df.iloc[idx]['index'], idx\n data_transforms = transformdef(opt, homepath)\n # kwargs = {}\n # kwargs = {'num_workers': 1, 'pin_memory': True} if opt.parallel == 'hvd' else {}\n kwargs = {'num_workers': opt.num_workers, 'pin_memory': opt.pin_memory} \\\n if opt.parallel == 'horovod' else {}\n if (opt.parallel == 'horovod' and\n kwargs.get('num_workers', 0) > 0 and\n hasattr(mp, '_supports_context') and mp._supports_context and\n 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n if mt:\n print('loading of dataset csv with pathes to F_N and angles')\n # TODO insert preprocess\n lframe, lframewh, alf, inn = preprodf(homepath, opt, mt1)\n train_part = int(0.8 * len(lframe))\n print('len train = ', len(lframe[:train_part]))\n df_dict = {'train': lframe[:train_part], 'val': lframe[train_part:]}\n # Create training and validation datasets\n image_datasets = {x: Seed3D_Dataset(\n dataPath, df_dict[x],\n transform=data_transforms[x]) for x in ['train', 'val']}\n if mt:\n print('define dataloaders and samplers')\n lossar = np.zeros([2, opt.epoch])\n hidden_dim = np.array(opt.hidden_dim)\n chidden_dim = np.array(opt.chidden_dim)\n kernel_sizes = np.array(opt.kernel_sizes)\n cropf = opt.cencrop if opt.cencrop else original_h\n rescalef = opt.rescale if cropf > opt.rescale else cropf\n if opt.num_input_images > 1 and opt.merging == 'batch':\n cnum = 1\n else:\n cnum = opt.num_input_images\n def dpwrap(model):\n return(nn.DataParallel(model) if opt.parallel == 'torch' else model)\n if mt:\n print('load network architecture')\n if bool(opt.model_name):\n smodel, input_size = initialize_model(\n opt.model_name, opt.ampl, opt.feature_extract,\n use_pretrained=opt.use_pretrained)\n elif not bool(opt.model_name) and opt.inputt == 'img' and \\\n opt.merging != 'latent':\n smodel = CNet(\n hidden_dim, chidden_dim, kernel_sizes,\n cnum, rescalef,\n int(rescalef*original_w/original_h), opt.haf)\n elif not bool(opt.model_name) and opt.inputt == 'f' and \\\n opt.merging != 'latent':\n smodel = CNet(\n hidden_dim, chidden_dim, kernel_sizes,\n opt.num_input_images, 1, nsp, opt.haf)\n elif not bool(opt.model_name) and opt.inputt == 'pc' and \\\n opt.merging != 'latent':\n smodel = CNet(\n hidden_dim, chidden_dim, kernel_sizes,\n opt.num_input_images, np.floor(58014/opt.downsample).astype(int),\n 1, opt.haf)\n elif opt.inputt == 'img' and opt.merging == 'latent':\n smodel0 = Encoder(hidden_dim, chidden_dim, kernel_sizes,\n opt.num_input_images, rescalef,\n int(rescalef*original_w/original_h), opt.haf)\n smodel1 = Decoder(opt.num_input_images)\n smodel0 = dpwrap(smodel0)\n smodel1 = dpwrap(smodel1)\n if opt.conTrain:\n smodel.load_state_dict(t.load(jn(dir1,opt.conTrain,'model')))\n if opt.merging != 'latent':\n smodel = dpwrap(smodel)\n lr = opt.lr*hvd.local_size() if opt.parallel == 'horovod' else opt.lr\n if iscuda and opt.merging != 'latent':\n smodel.cuda()\n elif iscuda and opt.merging == 'latent':\n smodel0.cuda()\n smodel1.cuda()\n optimizer = t.optim.Adam(\n smodel.parameters(), lr, betas=(0.9, 0.999), eps=1e-08,\n weight_decay=opt.weight_decay, amsgrad=False)\n scheduler = StepLR(optimizer, step_size=opt.steplr[0], gamma=opt.steplr[1])\n if opt.criterion == 'L2':\n loss_fn = nn.MSELoss(reduction='none')\n elif opt.criterion == 'L1':\n loss_fn = nn.L1Loss(reduction='none')\n if opt.parallel == 'horovod':\n # Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(smodel.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n # Horovod: (optional) compression algorithm.\n compression = hvd.Compression.none\n # args.fp16_allreduce else hvd.Compression.none\n # Horovod: wrap optimizer with DistributedOptimizer.\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=smodel.named_parameters(),\n compression=compression,\n op=hvd.Adasum if opt.use_adasum else hvd.Average,\n gradient_predivide_factor=opt.gradient_predivide_factor)\n if mt:\n print('model .. = train_model(..)')\n model, lossar, time_elapsed = train_model(smodel, optimizer)\n if mt:\n print('saving output after training')\n if rank == 0 and opt.save_output:\n # print(987, dirname,\n # jn(opt.datapath.replace(\n # opt.datapath.split('/')[-2], ''), 'local_output',\n # opt.expnum, opt.jobname.replace('.sh', '')))\n\n with open(jn(dirname,\"job-parameters.txt\"), 'a') as f:\n original_stdout = sys.stdout\n sys.stdout = f # Change the standard output to the file we created.\n print(smodel)\n # Reset the standard output to its original value\n sys.stdout = original_stdout\n f.write('time_elapsed=' + \\\n str(datetime.timedelta(seconds=time_elapsed)) + '\\n')\n print('ellapsed time = ', time.time()-tstart)\n # TODO performance: clear directory content if is not empty\n newdirname = jn(opt.datapath.replace(\n opt.datapath.split('/')[-2], ''), 'local_output',\n opt.expnum, opt.jobname.replace('.sh', ''))\n if opt.save_output and rank==0 and os.path.isdir(newdirname) and opt.rmdirname:\n saferm(newdirname)\n os.rmdir(newdirname)\n # savedirname(opt, rank, newdirname)\n shutil.copytree(dirname, newdirname)" ]
[ [ "numpy.nanmax", "numpy.expand_dims", "torch.transpose", "torch.zeros", "numpy.nanmin", "torch.utils.data.DataLoader", "numpy.max", "torch.set_grad_enabled", "torch.set_num_threads", "numpy.mean", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "numpy.nanmean", "numpy.nanstd", "torch.nn.L1Loss", "torch.Generator", "torch.initial_seed", "numpy.pad", "torch.ones", "torch.einsum", "torch.reshape", "torch.multiprocessing.get_all_start_methods", "numpy.matmul", "numpy.copy", "numpy.std", "torch.rand", "numpy.zeros", "torch.optim.lr_scheduler.StepLR", "numpy.min", "numpy.linalg.inv", "torch.is_tensor", "torch.unsqueeze", "numpy.floor", "numpy.array", "numpy.random.seed", "torch.Tensor", "torch.manual_seed", "numpy.linalg.norm", "torch.multiply", "torch.nn.DataParallel", "torch.nn.MSELoss" ] ]
vferat/nipype
[ "c14f24eba1da08711bbb894e049ee858ed740096" ]
[ "nipype/interfaces/spm/base.py" ]
[ "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The spm module provides basic functions for interfacing with SPM tools.\n\nIn order to use the standalone MCR version of spm, you need to ensure that\nthe following commands are executed at the beginning of your script::\n\n from nipype.interfaces import spm\n matlab_cmd = '/path/to/run_spm8.sh /path/to/Compiler_Runtime/v713/ script'\n spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)\n\nyou can test by calling::\n\n spm.SPMCommand().version\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import range, object, str, bytes\n\n# Standard library imports\nimport os\nfrom copy import deepcopy\n\n# Third-party imports\nfrom nibabel import load\nimport numpy as np\n\n# Local imports\nfrom ... import logging\nfrom ...utils import spm_docs as sd, NUMPY_MMAP\nfrom ..base import (BaseInterface, traits, isdefined, InputMultiPath,\n BaseInterfaceInputSpec, Directory, Undefined, ImageFile,\n PackageInfo)\nfrom ..base.traits_extension import NoDefaultSpecified\nfrom ..matlab import MatlabCommand\nfrom ...external.due import due, Doi, BibTeX\n\n__docformat__ = 'restructuredtext'\nlogger = logging.getLogger('nipype.interface')\n\n\ndef func_is_3d(in_file):\n \"\"\"Checks if input functional files are 3d.\"\"\"\n\n if isinstance(in_file, list):\n return func_is_3d(in_file[0])\n else:\n img = load(in_file, mmap=NUMPY_MMAP)\n shape = img.shape\n if len(shape) == 3 or (len(shape) == 4 and shape[3] == 1):\n return True\n else:\n return False\n\n\ndef get_first_3dfile(in_files):\n if not func_is_3d(in_files):\n return None\n if isinstance(in_files[0], list):\n return in_files[0]\n return in_files\n\n\ndef scans_for_fname(fname):\n \"\"\"Reads a nifti file and converts it to a numpy array storing\n individual nifti volumes.\n\n Opens images so will fail if they are not found.\n\n \"\"\"\n if isinstance(fname, list):\n scans = np.zeros((len(fname), ), dtype=object)\n for sno, f in enumerate(fname):\n scans[sno] = '%s,1' % f\n return scans\n img = load(fname, mmap=NUMPY_MMAP)\n if len(img.shape) == 3:\n return np.array(('%s,1' % fname, ), dtype=object)\n else:\n n_scans = img.shape[3]\n scans = np.zeros((n_scans, ), dtype=object)\n for sno in range(n_scans):\n scans[sno] = '%s,%d' % (fname, sno + 1)\n return scans\n\n\ndef scans_for_fnames(fnames, keep4d=False, separate_sessions=False):\n \"\"\"Converts a list of files to a concatenated numpy array for each\n volume.\n\n keep4d : boolean\n keeps the entries of the numpy array as 4d files instead of\n extracting the individual volumes.\n separate_sessions: boolean\n if 4d nifti files are being used, then separate_sessions\n ensures a cell array per session is created in the structure.\n\n \"\"\"\n flist = None\n if not isinstance(fnames[0], list):\n if func_is_3d(fnames[0]):\n fnames = [fnames]\n if separate_sessions or keep4d:\n flist = np.zeros((len(fnames), ), dtype=object)\n for i, f in enumerate(fnames):\n if separate_sessions:\n if keep4d:\n if isinstance(f, list):\n flist[i] = np.array(f, dtype=object)\n else:\n flist[i] = np.array([f], dtype=object)\n else:\n flist[i] = scans_for_fname(f)\n else:\n if keep4d:\n flist[i] = f\n else:\n scans = scans_for_fname(f)\n if flist is None:\n flist = scans\n else:\n flist = np.concatenate((flist, scans))\n return flist\n\n\nclass Info(PackageInfo):\n \"\"\"Handles SPM version information\n\n If you use `SPMCommand.set_mlab_paths` to set alternate entries for\n matlab_cmd, paths, and use_mcr, then you will need to use the same entries\n to any call in the Info class to maintain memoization. Otherwise, it will\n default to the parameters in the `getinfo` function below.\n \"\"\"\n _path = None\n _name = None\n _command = None\n _paths = None\n _version = None\n\n @classmethod\n def path(klass, matlab_cmd=None, paths=None, use_mcr=None):\n klass.getinfo(matlab_cmd, paths, use_mcr)\n return klass._path\n\n @classmethod\n def version(klass, matlab_cmd=None, paths=None, use_mcr=None):\n klass.getinfo(matlab_cmd, paths, use_mcr)\n return klass._version\n\n @classmethod\n def name(klass, matlab_cmd=None, paths=None, use_mcr=None):\n klass.getinfo(matlab_cmd, paths, use_mcr)\n return klass._name\n\n @classmethod\n def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None):\n \"\"\"\n Returns the path to the SPM directory in the Matlab path\n If path not found, returns None.\n\n Parameters\n ----------\n matlab_cmd: str\n Sets the default matlab command. If None, the value of the\n environment variable SPMMCRCMD will be used if set and use_mcr\n is True or the environment variable FORCE_SPMMCR is set.\n If one of FORCE_SPMMCR or SPMMCRCMD is not set, the existence\n of the environment variable MATLABCMD is checked and its value\n is used as the matlab command if possible.\n If none of the above was successful, the fallback value of\n 'matlab -nodesktop -nosplash' will be used.\n paths : str\n Add paths to matlab session\n use_mcr : bool\n Whether to use the MATLAB Common Runtime. In this case, the\n matlab_cmd is expected to be a valid MCR call.\n\n Returns\n -------\n spm_path : string representing path to SPM directory\n\n returns None of path not found\n \"\"\"\n\n use_mcr = use_mcr or 'FORCE_SPMMCR' in os.environ\n matlab_cmd = matlab_cmd or ((use_mcr and os.getenv('SPMMCRCMD'))\n or os.getenv('MATLABCMD', 'matlab -nodesktop -nosplash'))\n\n if klass._name and klass._path and klass._version and \\\n klass._command == matlab_cmd and klass._paths == paths:\n\n return {\n 'name': klass._name,\n 'path': klass._path,\n 'release': klass._version\n }\n logger.debug('matlab command or path has changed. recomputing version.')\n mlab = MatlabCommand(matlab_cmd=matlab_cmd, resource_monitor=False)\n mlab.inputs.mfile = False\n if paths:\n mlab.inputs.paths = paths\n if use_mcr:\n mlab.inputs.nodesktop = Undefined\n mlab.inputs.nosplash = Undefined\n mlab.inputs.single_comp_thread = Undefined\n mlab.inputs.mfile = True\n mlab.inputs.uses_mcr = True\n mlab.inputs.script = \"\"\"\nif isempty(which('spm')),\nthrow(MException('SPMCheck:NotFound','SPM not in matlab path'));\nend;\nspm_path = spm('dir');\n[name, version] = spm('ver');\nfprintf(1, 'NIPYPE path:%s|name:%s|release:%s', spm_path, name, version);\nexit;\n \"\"\"\n try:\n out = mlab.run()\n except (IOError, RuntimeError) as e:\n # if no Matlab at all -- exception could be raised\n # No Matlab -- no spm\n logger.debug('%s', e)\n klass._version = None\n klass._path = None\n klass._name = None\n klass._command = matlab_cmd\n klass._paths = paths\n return None\n\n out = sd._strip_header(out.runtime.stdout)\n out_dict = {}\n for part in out.split('|'):\n key, val = part.split(':')\n out_dict[key] = val\n\n klass._version = out_dict['release']\n klass._path = out_dict['path']\n klass._name = out_dict['name']\n klass._command = matlab_cmd\n klass._paths = paths\n return out_dict\n\n\ndef no_spm():\n \"\"\" Checks if SPM is NOT installed\n used with pytest.mark.skipif decorator to skip tests\n that will fail if spm is not installed\"\"\"\n\n if 'NIPYPE_NO_MATLAB' in os.environ or Info.version() is None:\n return True\n else:\n return False\n\n\nclass SPMCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using SPM MCR')\n use_v8struct = traits.Bool(\n True,\n min_ver='8',\n usedefault=True,\n desc=('Generate SPM8 and higher '\n 'compatible jobs'))\n\n\nclass SPMCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement SPM specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = SPMCommandInputSpec\n _additional_metadata = ['field']\n\n _jobtype = 'basetype'\n _jobname = 'basename'\n\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n references_ = [{\n 'entry':\n BibTeX(\n \"@book{FrackowiakFristonFrithDolanMazziotta1997,\"\n \"author={R.S.J. Frackowiak, K.J. Friston, C.D. Frith, R.J. Dolan, and J.C. Mazziotta},\"\n \"title={Human Brain Function},\"\n \"publisher={Academic Press USA},\"\n \"year={1997},\"\n \"}\"),\n 'description':\n 'The fundamental text on Statistical Parametric Mapping (SPM)',\n # 'path': \"nipype.interfaces.spm\",\n 'tags': ['implementation'],\n }]\n\n def __init__(self, **inputs):\n super(SPMCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(\n self._matlab_cmd_update,\n ['matlab_cmd', 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n info_dict = Info.getinfo(\n matlab_cmd=matlab_cmd,\n paths=paths,\n use_mcr=use_mcr)\n\n def _find_mlab_cmd_defaults(self):\n # check if the user has set environment variables to enforce\n # the standalone (MCR) version of SPM\n if self._use_mcr or 'FORCE_SPMMCR' in os.environ:\n self._use_mcr = True\n if self._matlab_cmd is None:\n try:\n self._matlab_cmd = os.environ['SPMMCRCMD']\n except KeyError:\n pass\n\n def _matlab_cmd_update(self):\n # MatlabCommand has to be created here,\n # because matlab_cmd is not a proper input\n # and can be set only during init\n self.mlab = MatlabCommand(\n matlab_cmd=self.inputs.matlab_cmd,\n mfile=self.inputs.mfile,\n paths=self.inputs.paths,\n resource_monitor=False)\n self.mlab.inputs.script_file = 'pyscript_%s.m' % \\\n self.__class__.__name__.split('.')[-1].lower()\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n @property\n def version(self):\n info_dict = Info.getinfo(\n matlab_cmd=self.inputs.matlab_cmd,\n paths=self.inputs.paths,\n use_mcr=self.inputs.use_mcr)\n if info_dict:\n return '%s.%s' % (info_dict['name'].split('SPM')[-1],\n info_dict['release'])\n\n @property\n def jobtype(self):\n return self._jobtype\n\n @property\n def jobname(self):\n return self._jobname\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the SPM function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command(\n deepcopy(self._parse_inputs()))\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n\n raise NotImplementedError\n\n def _format_arg(self, opt, spec, val):\n \"\"\"Convert input to appropriate format for SPM.\"\"\"\n if spec.is_trait_type(traits.Bool):\n return int(val)\n elif spec.is_trait_type(traits.Tuple):\n return list(val)\n else:\n return val\n\n def _parse_inputs(self, skip=()):\n spmdict = {}\n metadata = dict(field=lambda t: t is not None)\n for name, spec in list(self.inputs.traits(**metadata).items()):\n if skip and name in skip:\n continue\n value = getattr(self.inputs, name)\n if not isdefined(value):\n continue\n field = spec.field\n if '.' in field:\n fields = field.split('.')\n dictref = spmdict\n for f in fields[:-1]:\n if f not in list(dictref.keys()):\n dictref[f] = {}\n dictref = dictref[f]\n dictref[fields[-1]] = self._format_arg(name, spec, value)\n else:\n spmdict[field] = self._format_arg(name, spec, value)\n return [spmdict]\n\n def _reformat_dict_for_savemat(self, contents):\n \"\"\"Encloses a dict representation within hierarchical lists.\n\n In order to create an appropriate SPM job structure, a Python\n dict storing the job needs to be modified so that each dict\n embedded in dict needs to be enclosed as a list element.\n\n Examples\n --------\n >>> a = SPMCommand()._reformat_dict_for_savemat(dict(a=1,\n ... b=dict(c=2, d=3)))\n >>> a == [{'a': 1, 'b': [{'c': 2, 'd': 3}]}]\n True\n\n \"\"\"\n newdict = {}\n try:\n for key, value in list(contents.items()):\n if isinstance(value, dict):\n if value:\n newdict[key] = self._reformat_dict_for_savemat(value)\n # if value is None, skip\n else:\n newdict[key] = value\n\n return [newdict]\n except TypeError:\n print('Requires dict input')\n\n def _generate_job(self, prefix='', contents=None):\n \"\"\"Recursive function to generate spm job specification as a string\n\n Parameters\n ----------\n prefix : string\n A string that needs to get\n contents : dict\n A non-tuple Python structure containing spm job\n information gets converted to an appropriate sequence of\n matlab commands.\n\n \"\"\"\n jobstring = ''\n if contents is None:\n return jobstring\n if isinstance(contents, list):\n for i, value in enumerate(contents):\n if prefix.endswith(\")\"):\n newprefix = \"%s,%d)\" % (prefix[:-1], i + 1)\n else:\n newprefix = \"%s(%d)\" % (prefix, i + 1)\n jobstring += self._generate_job(newprefix, value)\n return jobstring\n if isinstance(contents, dict):\n for key, value in list(contents.items()):\n newprefix = \"%s.%s\" % (prefix, key)\n jobstring += self._generate_job(newprefix, value)\n return jobstring\n if isinstance(contents, np.ndarray):\n if contents.dtype == np.dtype(object):\n if prefix:\n jobstring += \"%s = {...\\n\" % (prefix)\n else:\n jobstring += \"{...\\n\"\n for i, val in enumerate(contents):\n if isinstance(val, np.ndarray):\n jobstring += self._generate_job(\n prefix=None, contents=val)\n elif isinstance(val, list):\n items_format = []\n for el in val:\n items_format += [\n '{}' if not isinstance(el, (str, bytes)) else\n '\\'{}\\''\n ]\n val_format = ', '.join(items_format).format\n jobstring += '[{}];...\\n'.format(val_format(*val))\n elif isinstance(val, (str, bytes)):\n jobstring += '\\'{}\\';...\\n'.format(val)\n else:\n jobstring += '%s;...\\n' % str(val)\n jobstring += '};\\n'\n else:\n for i, val in enumerate(contents):\n for field in val.dtype.fields:\n if prefix:\n newprefix = \"%s(%d).%s\" % (prefix, i + 1, field)\n else:\n newprefix = \"(%d).%s\" % (i + 1, field)\n jobstring += self._generate_job(newprefix, val[field])\n return jobstring\n if isinstance(contents, (str, bytes)):\n jobstring += \"%s = '%s';\\n\" % (prefix, contents)\n return jobstring\n jobstring += \"%s = %s;\\n\" % (prefix, str(contents))\n return jobstring\n\n def _make_matlab_command(self, contents, postscript=None):\n \"\"\"Generates a mfile to build job structure\n Parameters\n ----------\n\n contents : list\n a list of dicts generated by _parse_inputs\n in each subclass\n\n cwd : string\n default os.getcwd()\n\n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n cwd = os.getcwd()\n mscript = \"\"\"\n %% Generated by nipype.interfaces.spm\n if isempty(which('spm')),\n throw(MException('SPMCheck:NotFound', 'SPM not in matlab path'));\n end\n [name, version] = spm('ver');\n fprintf('SPM version: %s Release: %s\\\\n',name, version);\n fprintf('SPM path: %s\\\\n', which('spm'));\n spm('Defaults','fMRI');\n\n if strcmp(name, 'SPM8') || strcmp(name(1:5), 'SPM12'),\n spm_jobman('initcfg');\n spm_get_defaults('cmdline', 1);\n end\\n\n \"\"\"\n if self.mlab.inputs.mfile:\n if (isdefined(self.inputs.use_v8struct)\n and self.inputs.use_v8struct):\n mscript += self._generate_job('jobs{1}.spm.%s.%s' %\n (self.jobtype,\n self.jobname), contents[0])\n else:\n if self.jobname in [\n 'st', 'smooth', 'preproc', 'preproc8', 'fmri_spec',\n 'fmri_est', 'factorial_design', 'defs'\n ]:\n # parentheses\n mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' %\n (self.jobtype,\n self.jobname), contents[0])\n else:\n # curly brackets\n mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' %\n (self.jobtype,\n self.jobname), contents[0])\n else:\n from scipy.io import savemat\n jobdef = {\n 'jobs': [{\n self.jobtype: [{\n self.jobname:\n self.reformat_dict_for_savemat(contents[0])\n }]\n }]\n }\n savemat(os.path.join(cwd, 'pyjobs_%s.mat' % self.jobname), jobdef)\n mscript += \"load pyjobs_%s;\\n\\n\" % self.jobname\n mscript += \"\"\"\n spm_jobman(\\'run\\', jobs);\\n\n \"\"\"\n if self.inputs.use_mcr:\n mscript += \"\"\"\n if strcmp(name, 'SPM8') || strcmp(name(1:5), 'SPM12'),\n close(\\'all\\', \\'force\\');\n end;\n \"\"\"\n if postscript is not None:\n mscript += postscript\n return mscript\n\n\nclass ImageFileSPM(ImageFile):\n \"\"\"Defines a trait whose value must be a NIfTI file.\"\"\"\n\n def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, **metadata):\n \"\"\"Create an ImageFileSPM trait.\"\"\"\n super(ImageFileSPM, self).__init__(\n value=value, exists=exists, types=['nifti1', 'nifti2'],\n allow_compressed=False, resolve=resolve, **metadata)\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.dtype" ] ]
koen-vg/pypsa-eur
[ "a431bbd5a75a54440fab93fd91577197968032e5" ]
[ "scripts/add_electricity.py" ]
[ "# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors\n#\n# SPDX-License-Identifier: MIT\n\n# coding: utf-8\n\"\"\"\nAdds electrical generators and existing hydro storage units to a base network.\n\nRelevant Settings\n-----------------\n\n.. code:: yaml\n\n costs:\n year:\n USD2013_to_EUR2013:\n dicountrate:\n emission_prices:\n\n electricity:\n max_hours:\n marginal_cost:\n capital_cost:\n conventional_carriers:\n co2limit:\n extendable_carriers:\n include_renewable_capacities_from_OPSD:\n estimate_renewable_capacities_from_capacity_stats:\n\n load:\n scaling_factor:\n\n renewable:\n hydro:\n carriers:\n hydro_max_hours:\n hydro_capital_cost:\n\n lines:\n length_factor:\n\n.. seealso::\n Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,\n :ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`\n\nInputs\n------\n\n- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.\n- ``data/bundle/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country.\n\n .. image:: ../img/hydrocapacities.png\n :scale: 34 %\n\n- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!\n- ``resources/opsd_load.csv`` Hourly per-country load profiles.\n- ``resources/regions_onshore.geojson``: confer :ref:`busregions`\n- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`\n- ``resources/powerplants.csv``: confer :ref:`powerplants`\n- ``resources/profile_{}.nc``: all technologies in ``config[\"renewables\"].keys()``, confer :ref:`renewableprofiles`.\n- ``networks/base.nc``: confer :ref:`base`\n\nOutputs\n-------\n\n- ``networks/elec.nc``:\n\n .. image:: ../img/elec.png\n :scale: 33 %\n\nDescription\n-----------\n\nThe rule :mod:`add_electricity` ties all the different data inputs from the preceding rules together into a detailed PyPSA network that is stored in ``networks/elec.nc``. It includes:\n\n- today's transmission topology and transfer capacities (optionally including lines which are under construction according to the config settings ``lines: under_construction`` and ``links: under_construction``),\n- today's thermal and hydro power generation capacities (for the technologies listed in the config setting ``electricity: conventional_carriers``), and\n- today's load time-series (upsampled in a top-down approach according to population and gross domestic product)\n\nIt further adds extendable ``generators`` with **zero** capacity for\n\n- photovoltaic, onshore and AC- as well as DC-connected offshore wind installations with today's locational, hourly wind and solar capacity factors (but **no** current capacities),\n- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)\n\"\"\"\n\nimport logging\nfrom _helpers import configure_logging, update_p_nom_max\n\nimport pypsa\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\nimport geopandas as gpd\nimport powerplantmatching as pm\nfrom powerplantmatching.export import map_country_bus\n\nfrom vresutils.costdata import annuity\nfrom vresutils.load import timeseries_opsd\nfrom vresutils import transfer as vtransfer\n\nidx = pd.IndexSlice\n\nlogger = logging.getLogger(__name__)\n\n\ndef normed(s): return s/s.sum()\n\n\ndef _add_missing_carriers_from_costs(n, costs, carriers):\n missing_carriers = pd.Index(carriers).difference(n.carriers.index)\n if missing_carriers.empty: return\n\n emissions_cols = costs.columns.to_series()\\\n .loc[lambda s: s.str.endswith('_emissions')].values\n suptechs = missing_carriers.str.split('-').str[0]\n emissions = costs.loc[suptechs, emissions_cols].fillna(0.)\n emissions.index = missing_carriers\n n.import_components_from_dataframe(emissions, 'Carrier')\n\n\ndef load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):\n if tech_costs is None:\n tech_costs = snakemake.input.tech_costs\n\n if config is None:\n config = snakemake.config['costs']\n\n # set all asset costs and other parameters\n costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index()\n\n # correct units to MW and EUR\n costs.loc[costs.unit.str.contains(\"/kW\"),\"value\"] *= 1e3\n costs.loc[costs.unit.str.contains(\"USD\"),\"value\"] *= config['USD2013_to_EUR2013']\n\n costs = (costs.loc[idx[:,config['year'],:], \"value\"]\n .unstack(level=2).groupby(\"technology\").sum(min_count=1))\n\n costs = costs.fillna({\"CO2 intensity\" : 0,\n \"FOM\" : 0,\n \"VOM\" : 0,\n \"discount rate\" : config['discountrate'],\n \"efficiency\" : 1,\n \"fuel\" : 0,\n \"investment\" : 0,\n \"lifetime\" : 25})\n\n costs[\"capital_cost\"] = ((annuity(costs[\"lifetime\"], costs[\"discount rate\"]) +\n costs[\"FOM\"]/100.) *\n costs[\"investment\"] * Nyears)\n\n costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel']\n costs.at['CCGT', 'fuel'] = costs.at['gas', 'fuel']\n\n costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency']\n\n costs = costs.rename(columns={\"CO2 intensity\": \"co2_emissions\"})\n\n costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']\n costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']\n\n costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] +\n costs.at['solar-utility', 'capital_cost'])\n\n def costs_for_storage(store, link1, link2=None, max_hours=1.):\n capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']\n if link2 is not None:\n capital_cost += link2['capital_cost']\n return pd.Series(dict(capital_cost=capital_cost,\n marginal_cost=0.,\n co2_emissions=0.))\n\n if elec_config is None:\n elec_config = snakemake.config['electricity']\n max_hours = elec_config['max_hours']\n costs.loc[\"battery\"] = \\\n costs_for_storage(costs.loc[\"battery storage\"], costs.loc[\"battery inverter\"],\n max_hours=max_hours['battery'])\n costs.loc[\"H2\"] = \\\n costs_for_storage(costs.loc[\"hydrogen storage\"], costs.loc[\"fuel cell\"],\n costs.loc[\"electrolysis\"], max_hours=max_hours['H2'])\n\n for attr in ('marginal_cost', 'capital_cost'):\n overwrites = config.get(attr)\n if overwrites is not None:\n overwrites = pd.Series(overwrites)\n costs.loc[overwrites.index, attr] = overwrites\n\n return costs\n\n\ndef load_powerplants(ppl_fn=None):\n if ppl_fn is None:\n ppl_fn = snakemake.input.powerplants\n carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass',\n 'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}\n return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})\n .powerplant.to_pypsa_names()\n .rename(columns=str.lower).drop(columns=['efficiency'])\n .replace({'carrier': carrier_dict}))\n\n\ndef attach_load(n):\n substation_lv_i = n.buses.index[n.buses['substation_lv']]\n regions = (gpd.read_file(snakemake.input.regions).set_index('name')\n .reindex(substation_lv_i))\n opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True)\n .filter(items=snakemake.config['countries']))\n\n scaling = snakemake.config.get('load', {}).get('scaling_factor', 1.0)\n logger.info(f\"Load data scaled with scalling factor {scaling}.\")\n opsd_load *= scaling\n\n nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')\n\n def upsample(cntry, group):\n l = opsd_load[cntry]\n if len(group) == 1:\n return pd.DataFrame({group.index[0]: l})\n else:\n nuts3_cntry = nuts3.loc[nuts3.country == cntry]\n transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry,\n normed=False).T.tocsr()\n gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values),\n index=group.index)\n pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values),\n index=group.index)\n\n # relative factors 0.6 and 0.4 have been determined from a linear\n # regression on the country to continent load data\n # (refer to vresutils.load._upsampling_weights)\n factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))\n return pd.DataFrame(factors.values * l.values[:,np.newaxis],\n index=l.index, columns=factors.index)\n\n load = pd.concat([upsample(cntry, group) for cntry, group\n in regions.geometry.groupby(regions.country)], axis=1)\n\n n.madd(\"Load\", substation_lv_i, bus=substation_lv_i, p_set=load)\n\n\ndef update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):\n n.lines['capital_cost'] = (n.lines['length'] * length_factor *\n costs.at['HVAC overhead', 'capital_cost'])\n\n if n.links.empty: return\n\n dc_b = n.links.carrier == 'DC'\n\n # If there are no dc links, then the 'underwater_fraction' column\n # may be missing. Therefore we have to return here.\n if n.links.loc[dc_b].empty: return\n\n if simple_hvdc_costs:\n costs = (n.links.loc[dc_b, 'length'] * length_factor *\n costs.at['HVDC overhead', 'capital_cost'])\n else:\n costs = (n.links.loc[dc_b, 'length'] * length_factor *\n ((1. - n.links.loc[dc_b, 'underwater_fraction']) *\n costs.at['HVDC overhead', 'capital_cost'] +\n n.links.loc[dc_b, 'underwater_fraction'] *\n costs.at['HVDC submarine', 'capital_cost']) +\n costs.at['HVDC inverter pair', 'capital_cost'])\n n.links.loc[dc_b, 'capital_cost'] = costs\n\n\ndef attach_wind_and_solar(n, costs):\n for tech in snakemake.config['renewable']:\n if tech == 'hydro': continue\n\n n.add(\"Carrier\", name=tech)\n with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds:\n if ds.indexes['bus'].empty: continue\n\n suptech = tech.split('-', 2)[0]\n if suptech == 'offwind':\n underwater_fraction = ds['underwater_fraction'].to_pandas()\n connection_cost = (snakemake.config['lines']['length_factor'] *\n ds['average_distance'].to_pandas() *\n (underwater_fraction *\n costs.at[tech + '-connection-submarine', 'capital_cost'] +\n (1. - underwater_fraction) *\n costs.at[tech + '-connection-underground', 'capital_cost']))\n capital_cost = (costs.at['offwind', 'capital_cost'] +\n costs.at[tech + '-station', 'capital_cost'] +\n connection_cost)\n logger.info(\"Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}\"\n .format(connection_cost.min(), connection_cost.max(), tech))\n else:\n capital_cost = costs.at[tech, 'capital_cost']\n\n n.madd(\"Generator\", ds.indexes['bus'], ' ' + tech,\n bus=ds.indexes['bus'],\n carrier=tech,\n p_nom_extendable=True,\n p_nom_max=ds['p_nom_max'].to_pandas(),\n weight=ds['weight'].to_pandas(),\n marginal_cost=costs.at[suptech, 'marginal_cost'],\n capital_cost=capital_cost,\n efficiency=costs.at[suptech, 'efficiency'],\n p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())\n\n\ndef attach_conventional_generators(n, costs, ppl):\n carriers = snakemake.config['electricity']['conventional_carriers']\n\n _add_missing_carriers_from_costs(n, costs, carriers)\n\n ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')\n .rename(index=lambda s: 'C' + str(s)))\n\n logger.info('Adding {} generators with capacities [MW] \\n{}'\n .format(len(ppl), ppl.groupby('carrier').p_nom.sum()))\n\n n.madd(\"Generator\", ppl.index,\n carrier=ppl.carrier,\n bus=ppl.bus,\n p_nom=ppl.p_nom,\n efficiency=ppl.efficiency,\n marginal_cost=ppl.marginal_cost,\n capital_cost=0)\n\n logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')\n\n\ndef attach_hydro(n, costs, ppl):\n if 'hydro' not in snakemake.config['renewable']: return\n c = snakemake.config['renewable']['hydro']\n carriers = c.get('carriers', ['ror', 'PHS', 'hydro'])\n\n _add_missing_carriers_from_costs(n, costs, carriers)\n\n ppl = ppl.query('carrier == \"hydro\"').reset_index(drop=True)\\\n .rename(index=lambda s: str(s) + ' hydro')\n ror = ppl.query('technology == \"Run-Of-River\"')\n phs = ppl.query('technology == \"Pumped Storage\"')\n hydro = ppl.query('technology == \"Reservoir\"')\n\n country = ppl['bus'].map(n.buses.country).rename(\"country\")\n\n inflow_idx = ror.index.union(hydro.index)\n if not inflow_idx.empty:\n dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)\n\n with xr.open_dataarray(snakemake.input.profile_hydro) as inflow:\n inflow_countries = pd.Index(country[inflow_idx])\n missing_c = (inflow_countries.unique()\n .difference(inflow.indexes['countries']))\n assert missing_c.empty, (f\"'{snakemake.input.profile_hydro}' is missing \"\n f\"inflow time-series for at least one country: {', '.join(missing_c)}\")\n\n inflow_t = (inflow.sel(countries=inflow_countries)\n .rename({'countries': 'name'})\n .assign_coords(name=inflow_idx)\n .transpose('time', 'name')\n .to_pandas()\n .multiply(dist_key, axis=1))\n\n if 'ror' in carriers and not ror.empty:\n n.madd(\"Generator\", ror.index,\n carrier='ror',\n bus=ror['bus'],\n p_nom=ror['p_nom'],\n efficiency=costs.at['ror', 'efficiency'],\n capital_cost=costs.at['ror', 'capital_cost'],\n weight=ror['p_nom'],\n p_max_pu=(inflow_t[ror.index]\n .divide(ror['p_nom'], axis=1)\n .where(lambda df: df<=1., other=1.)))\n\n if 'PHS' in carriers and not phs.empty:\n # fill missing max hours to config value and\n # assume no natural inflow due to lack of data\n phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}})\n n.madd('StorageUnit', phs.index,\n carrier='PHS',\n bus=phs['bus'],\n p_nom=phs['p_nom'],\n capital_cost=costs.at['PHS', 'capital_cost'],\n max_hours=phs['max_hours'],\n efficiency_store=np.sqrt(costs.at['PHS','efficiency']),\n efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']),\n cyclic_state_of_charge=True)\n\n if 'hydro' in carriers and not hydro.empty:\n hydro_max_hours = c.get('hydro_max_hours')\n hydro_stats = pd.read_csv(snakemake.input.hydro_capacities,\n comment=\"#\", na_values='-', index_col=0)\n e_target = hydro_stats[\"E_store[TWh]\"].clip(lower=0.2) * 1e6\n e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum()\n e_missing = e_target - e_installed\n missing_mh_i = hydro.query('max_hours == 0').index\n\n if hydro_max_hours == 'energy_capacity_totals_by_country':\n # watch out some p_nom values like IE's are totally underrepresented\n max_hours_country = e_missing / \\\n hydro.loc[missing_mh_i].groupby('country').p_nom.sum()\n\n elif hydro_max_hours == 'estimate_by_large_installations':\n max_hours_country = hydro_stats['E_store[TWh]'] * 1e3 / \\\n hydro_stats['p_nom_discharge[GW]']\n\n missing_countries = (pd.Index(hydro['country'].unique())\n .difference(max_hours_country.dropna().index))\n if not missing_countries.empty:\n logger.warning(\"Assuming max_hours=6 for hydro reservoirs in the countries: {}\"\n .format(\", \".join(missing_countries)))\n hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0,\n hydro.country.map(max_hours_country)).fillna(6)\n\n n.madd('StorageUnit', hydro.index, carrier='hydro',\n bus=hydro['bus'],\n p_nom=hydro['p_nom'],\n max_hours=hydro_max_hours,\n capital_cost=(costs.at['hydro', 'capital_cost']\n if c.get('hydro_capital_cost') else 0.),\n marginal_cost=costs.at['hydro', 'marginal_cost'],\n p_max_pu=1., # dispatch\n p_min_pu=0., # store\n efficiency_dispatch=costs.at['hydro', 'efficiency'],\n efficiency_store=0.,\n cyclic_state_of_charge=True,\n inflow=inflow_t.loc[:, hydro.index])\n\n\ndef attach_extendable_generators(n, costs, ppl):\n elec_opts = snakemake.config['electricity']\n carriers = pd.Index(elec_opts['extendable_carriers']['Generator'])\n\n _add_missing_carriers_from_costs(n, costs, carriers)\n\n for tech in carriers:\n if tech.startswith('OCGT'):\n ocgt = ppl.query(\"carrier in ['OCGT', 'CCGT']\").groupby('bus', as_index=False).first()\n n.madd('Generator', ocgt.index,\n suffix=' OCGT',\n bus=ocgt['bus'],\n carrier=tech,\n p_nom_extendable=True,\n p_nom=0.,\n capital_cost=costs.at['OCGT', 'capital_cost'],\n marginal_cost=costs.at['OCGT', 'marginal_cost'],\n efficiency=costs.at['OCGT', 'efficiency'])\n\n elif tech.startswith('CCGT'):\n ccgt = ppl.query(\"carrier in ['OCGT', 'CCGT']\").groupby('bus', as_index=False).first()\n n.madd('Generator', ccgt.index,\n suffix=' CCGT',\n bus=ccgt['bus'],\n carrier=tech,\n p_nom_extendable=True,\n p_nom=0.,\n capital_cost=costs.at['CCGT', 'capital_cost'],\n marginal_cost=costs.at['CCGT', 'marginal_cost'],\n efficiency=costs.at['CCGT', 'efficiency'])\n\n elif tech.startswith('nuclear'):\n nuclear = ppl.query(\"carrier == 'nuclear'\").groupby('bus', as_index=False).first()\n n.madd('Generator', nuclear.index,\n suffix=' nuclear',\n bus=nuclear['bus'],\n carrier=tech,\n p_nom_extendable=True,\n p_nom=0.,\n capital_cost=costs.at['nuclear', 'capital_cost'],\n marginal_cost=costs.at['nuclear', 'marginal_cost'],\n efficiency=costs.at['nuclear', 'efficiency'])\n\n else:\n raise NotImplementedError(f\"Adding extendable generators for carrier \"\n \"'{tech}' is not implemented, yet. \"\n \"Only OCGT, CCGT and nuclear are allowed at the moment.\")\n\n\n\ndef attach_OPSD_renewables(n):\n\n available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB']\n tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'}\n countries = set(available) & set(n.buses.country)\n techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', [])\n tech_map = {k: v for k, v in tech_map.items() if v in techs}\n\n if not tech_map:\n return\n\n logger.info(f'Using OPSD renewable capacities in {\", \".join(countries)} '\n f'for technologies {\", \".join(tech_map.values())}.')\n\n df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries])\n technology_b = ~df.Technology.isin(['Onshore', 'Offshore'])\n df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology)\n df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2()\n\n for fueltype, carrier_like in tech_map.items():\n gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)]\n buses = n.buses.loc[gens.bus.unique()]\n gens_per_bus = gens.groupby('bus').p_nom.count()\n\n caps = map_country_bus(df.query('Fueltype == @fueltype'), buses)\n caps = caps.groupby(['bus']).Capacity.sum()\n caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)\n\n n.generators.p_nom.update(gens.bus.map(caps).dropna())\n n.generators.p_nom_min.update(gens.bus.map(caps).dropna())\n\n\n\ndef estimate_renewable_capacities(n, tech_map=None):\n if tech_map is None:\n tech_map = (snakemake.config['electricity']\n .get('estimate_renewable_capacities_from_capacity_stats', {}))\n\n if len(tech_map) == 0: return\n\n capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2()\n [lambda df: df.Energy_Source_Level_2]\n .set_index(['Fueltype', 'Country']).sort_index())\n\n countries = n.buses.country.unique()\n\n if len(countries) == 0: return\n\n logger.info('heuristics applied to distribute renewable capacities [MW] \\n{}'\n .format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1')\n .groupby('Country').agg({'Capacity': 'sum'})))\n\n for ppm_fueltype, techs in tech_map.items():\n tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\\\n .reindex(countries, fill_value=0.)\n #tech_i = n.generators.query('carrier in @techs').index\n tech_i = (n.generators.query('carrier in @techs')\n [n.generators.query('carrier in @techs')\n .bus.map(n.buses.country).isin(countries)].index)\n n.generators.loc[tech_i, 'p_nom'] = (\n (n.generators_t.p_max_pu[tech_i].mean() *\n n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation\n .groupby(n.generators.bus.map(n.buses.country))\n .transform(lambda s: normed(s) * tech_capacities.at[s.name])\n .where(lambda s: s>0.1, 0.)) # only capacities above 100kW\n n.generators.loc[tech_i, 'p_nom_min'] = n.generators.loc[tech_i, 'p_nom']\n\n\ndef add_nice_carrier_names(n, config=None):\n if config is None: config = snakemake.config\n carrier_i = n.carriers.index\n nice_names = (pd.Series(config['plotting']['nice_names'])\n .reindex(carrier_i).fillna(carrier_i.to_series().str.title()))\n n.carriers['nice_name'] = nice_names\n colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i)\n if colors.isna().any():\n missing_i = list(colors.index[colors.isna()])\n logger.warning(f'tech_colors for carriers {missing_i} not defined '\n 'in config.')\n n.carriers['color'] = colors\n\n\nif __name__ == \"__main__\":\n if 'snakemake' not in globals():\n from _helpers import mock_snakemake\n snakemake = mock_snakemake('add_electricity')\n configure_logging(snakemake)\n\n n = pypsa.Network(snakemake.input.base_network)\n Nyears = n.snapshot_weightings.objective.sum() / 8760.\n\n costs = load_costs(Nyears)\n ppl = load_powerplants()\n\n attach_load(n)\n\n update_transmission_costs(n, costs)\n\n attach_conventional_generators(n, costs, ppl)\n attach_wind_and_solar(n, costs)\n attach_hydro(n, costs, ppl)\n attach_extendable_generators(n, costs, ppl)\n\n estimate_renewable_capacities(n)\n attach_OPSD_renewables(n)\n update_p_nom_max(n)\n\n add_nice_carrier_names(n)\n\n n.export_to_netcdf(snakemake.output[0])\n" ]
[ [ "pandas.read_csv", "numpy.sqrt", "pandas.Series", "pandas.Index", "pandas.DataFrame" ] ]
IAM20/nothing
[ "45cb6da621a8c63e9329c14390b84a6a566bdf49" ]
[ "fairseq/options.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport sys\nfrom typing import Callable, List, Optional\n\nimport torch\n\nfrom fairseq import utils\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\n\n\ndef get_preprocessing_parser(default_task=\"translation\"):\n parser = get_parser(\"Preprocessing\", default_task)\n add_preprocess_args(parser)\n return parser\n\n\ndef get_training_parser(default_task=\"translation\"):\n parser = get_parser(\"Trainer\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser)\n add_model_args(parser)\n add_optimization_args(parser)\n add_checkpoint_args(parser)\n return parser\n\n\ndef get_generation_parser(interactive=False, default_task=\"translation\"):\n parser = get_parser(\"Generation\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_generation_args(parser)\n if interactive:\n add_interactive_args(parser)\n return parser\n\n\ndef get_interactive_generation_parser(default_task=\"translation\"):\n return get_generation_parser(interactive=True, default_task=default_task)\n\n\ndef get_eval_lm_parser(default_task=\"language_modeling\"):\n parser = get_parser(\"Evaluate Language Model\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_eval_lm_args(parser)\n return parser\n\n\ndef get_validation_parser(default_task=None):\n parser = get_parser(\"Validation\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser, default_world_size=1)\n group = parser.add_argument_group(\"Evaluation\")\n add_common_eval_args(group)\n return parser\n\n\ndef csv_str_list(x):\n return x.split(',')\n\n\ndef eval_str_list(x, type=float):\n if x is None:\n return None\n if isinstance(x, str):\n x = eval(x)\n try:\n return list(map(type, x))\n except TypeError:\n return [type(x)]\n\n\ndef eval_str_dict(x, type=dict):\n if x is None:\n return None\n if isinstance(x, str):\n x = eval(x)\n return x\n\n\ndef eval_bool(x, default=False):\n if x is None:\n return default\n try:\n return bool(eval(x))\n except TypeError:\n return default\n\n\ndef parse_args_and_arch(\n parser: argparse.ArgumentParser,\n input_args: List[str] = None,\n parse_known: bool = False,\n suppress_defaults: bool = False,\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,\n):\n \"\"\"\n Args:\n parser (ArgumentParser): the parser\n input_args (List[str]): strings to parse, defaults to sys.argv\n parse_known (bool): only parse known arguments, similar to\n `ArgumentParser.parse_known_args`\n suppress_defaults (bool): parse while ignoring all default values\n modify_parser (Optional[Callable[[ArgumentParser], None]]):\n function to modify the parser, e.g., to set default values\n \"\"\"\n if suppress_defaults:\n # Parse args without any default values. This requires us to parse\n # twice, once to identify all the necessary task/model args, and a second\n # time with all defaults set to None.\n args = parse_args_and_arch(\n parser,\n input_args=input_args,\n parse_known=parse_known,\n suppress_defaults=False,\n )\n suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])\n suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})\n args = suppressed_parser.parse_args(input_args)\n return argparse.Namespace(\n **{k: v for k, v in vars(args).items() if v is not None}\n )\n\n from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY\n\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args(input_args)\n utils.import_user_module(usr_args)\n\n if modify_parser is not None:\n modify_parser(parser)\n\n # The parser doesn't know about model/criterion/optimizer-specific args, so\n # we parse twice. First we parse the model/criterion/optimizer, then we\n # parse a second time after adding the *-specific arguments.\n # If input_args is given, we will parse those args instead of sys.argv.\n args, _ = parser.parse_known_args(input_args)\n\n # Add model-specific args to parser.\n if hasattr(args, \"arch\"):\n model_specific_group = parser.add_argument_group(\n \"Model-specific configuration\",\n # Only include attributes which are explicitly given as command-line\n # arguments or which have default values.\n argument_default=argparse.SUPPRESS,\n )\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n\n # Add *-specific args to parser.\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n choice = getattr(args, registry_name, None)\n if choice is not None:\n cls = REGISTRY[\"registry\"][choice]\n if hasattr(cls, \"add_args\"):\n cls.add_args(parser)\n if hasattr(args, \"task\"):\n from fairseq.tasks import TASK_REGISTRY\n\n TASK_REGISTRY[args.task].add_args(parser)\n if getattr(args, \"use_bmuf\", False):\n # hack to support extra args for block distributed data parallelism\n from fairseq.optim.bmuf import FairseqBMUF\n\n FairseqBMUF.add_args(parser)\n\n # Modify the parser a second time, since defaults may have been reset\n if modify_parser is not None:\n modify_parser(parser)\n\n # Parse a second time.\n if parse_known:\n args, extra = parser.parse_known_args(input_args)\n else:\n args = parser.parse_args(input_args)\n extra = None\n\n # Post-process args.\n if hasattr(args, \"max_sentences_valid\") and args.max_sentences_valid is None:\n args.max_sentences_valid = args.max_sentences\n if hasattr(args, \"max_tokens_valid\") and args.max_tokens_valid is None:\n args.max_tokens_valid = args.max_tokens\n if getattr(args, \"memory_efficient_fp16\", False):\n args.fp16 = True\n if getattr(args, \"memory_efficient_bf16\", False):\n args.bf16 = True\n args.tpu = getattr(args, \"tpu\", False)\n args.bf16 = getattr(args, \"bf16\", False)\n if args.bf16:\n args.tpu = True\n if args.tpu and args.fp16:\n raise ValueError(\"Cannot combine --fp16 and --tpu, use --bf16 on TPUs\")\n\n if getattr(args, \"seed\", None) is None:\n args.seed = 1 # default seed for training\n args.no_seed_provided = True\n else:\n args.no_seed_provided = False\n\n # Apply architecture configuration.\n if hasattr(args, \"arch\"):\n ARCH_CONFIG_REGISTRY[args.arch](args)\n\n if parse_known:\n return args, extra\n else:\n return args\n\n\ndef get_parser(desc, default_task=\"translation\"):\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args()\n utils.import_user_module(usr_args)\n\n parser = argparse.ArgumentParser(allow_abbrev=False)\n # fmt: off\n parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')\n parser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='log progress every N batches (when progress bar is disabled)')\n parser.add_argument('--log-format', default=None, help='log format to use',\n choices=['json', 'none', 'simple', 'tqdm'])\n parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',\n help='path to save logs for tensorboard, should match --logdir '\n 'of running tensorboard (default: no tensorboard logging)')\n parser.add_argument('--seed', default=None, type=int, metavar='N',\n help='pseudo random number generator seed')\n parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')\n parser.add_argument('--tpu', action='store_true', help='use TPU instead of CUDA')\n parser.add_argument('--bf16', action='store_true', help='use bfloat16; implies --tpu')\n parser.add_argument('--fp16', action='store_true', help='use FP16')\n parser.add_argument('--memory-efficient-bf16', action='store_true',\n help='use a memory-efficient version of BF16 training; implies --bf16')\n parser.add_argument('--memory-efficient-fp16', action='store_true',\n help='use a memory-efficient version of FP16 training; implies --fp16')\n parser.add_argument('--fp16-no-flatten-grads', action='store_true',\n help='don\\'t flatten FP16 grads tensor')\n parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,\n help='default FP16 loss scale')\n parser.add_argument('--fp16-scale-window', type=int,\n help='number of updates before increasing loss scale')\n parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,\n help='pct of updates that can overflow before decreasing the loss scale')\n parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',\n help='minimum FP16 loss scale, after which training is stopped')\n parser.add_argument('--threshold-loss-scale', type=float,\n help='threshold FP16 loss scale from below')\n parser.add_argument('--user-dir', default=None,\n help='path to a python module containing custom extensions (tasks and/or architectures)')\n parser.add_argument('--empty-cache-freq', default=0, type=int,\n help='how often to clear the PyTorch CUDA cache (0 to disable)')\n parser.add_argument('--all-gather-list-size', default=16384, type=int,\n help='number of bytes reserved for gathering stats from workers')\n parser.add_argument('--model-parallel-size', type=int, metavar='N',\n default=1,\n help='total number of GPUs to parallelize model over')\n parser.add_argument('--checkpoint-suffix', default='',\n help='suffix to add to the checkpoint file name')\n parser.add_argument('--quantization-config-path', default=None,\n help='path to quantization config file')\n parser.add_argument('--profile', action='store_true', help='enable autograd profiler emit_nvtx')\n parser.add_argument('--quant-mode', type=str, default='none', choices=['none', 'symmetric', 'asymmetric'],\n help='quantization mode')\n parser.add_argument('--force-dequant', type=str, default='none', \n choices=['none', 'gelu', 'layernorm', 'softmax', 'nonlinear'],\n help='force dequantize the specific layers')\n parser.add_argument('--log-file', type=str, default='none',\n help='logging file')\n\n from fairseq.registry import REGISTRIES\n for registry_name, REGISTRY in REGISTRIES.items():\n parser.add_argument(\n '--' + registry_name.replace('_', '-'),\n default=REGISTRY['default'],\n choices=REGISTRY['registry'].keys(),\n )\n\n # Task definitions can be found under fairseq/tasks/\n from fairseq.tasks import TASK_REGISTRY\n parser.add_argument('--task', metavar='TASK', default=default_task,\n choices=TASK_REGISTRY.keys(),\n help='task')\n # fmt: on\n return parser\n\n\ndef add_preprocess_args(parser):\n group = parser.add_argument_group(\"Preprocessing\")\n # fmt: off\n group.add_argument(\"-s\", \"--source-lang\", default=None, metavar=\"SRC\",\n help=\"source language\")\n group.add_argument(\"-t\", \"--target-lang\", default=None, metavar=\"TARGET\",\n help=\"target language\")\n group.add_argument(\"--trainpref\", metavar=\"FP\", default=None,\n help=\"train file prefix\")\n group.add_argument(\"--validpref\", metavar=\"FP\", default=None,\n help=\"comma separated, valid file prefixes\")\n group.add_argument(\"--testpref\", metavar=\"FP\", default=None,\n help=\"comma separated, test file prefixes\")\n group.add_argument(\"--align-suffix\", metavar=\"FP\", default=None,\n help=\"alignment file suffix\")\n group.add_argument(\"--destdir\", metavar=\"DIR\", default=\"data-bin\",\n help=\"destination dir\")\n group.add_argument(\"--thresholdtgt\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--thresholdsrc\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--tgtdict\", metavar=\"FP\",\n help=\"reuse given target dictionary\")\n group.add_argument(\"--srcdict\", metavar=\"FP\",\n help=\"reuse given source dictionary\")\n group.add_argument(\"--nwordstgt\", metavar=\"N\", default=-1, type=int,\n help=\"number of target words to retain\")\n group.add_argument(\"--nwordssrc\", metavar=\"N\", default=-1, type=int,\n help=\"number of source words to retain\")\n group.add_argument(\"--alignfile\", metavar=\"ALIGN\", default=None,\n help=\"an alignment file (optional)\")\n parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument(\"--joined-dictionary\", action=\"store_true\",\n help=\"Generate joined dictionary\")\n group.add_argument(\"--only-source\", action=\"store_true\",\n help=\"Only process the source language\")\n group.add_argument(\"--padding-factor\", metavar=\"N\", default=8, type=int,\n help=\"Pad dictionary size to be multiple of N\")\n group.add_argument(\"--workers\", metavar=\"N\", default=1, type=int,\n help=\"number of parallel workers\")\n # fmt: on\n return parser\n\n\ndef add_dataset_args(parser, train=False, gen=False):\n group = parser.add_argument_group(\"Dataset and data loading\")\n # fmt: off\n group.add_argument('--num-workers', default=1, type=int, metavar='N',\n help='how many subprocesses to use for data loading')\n group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',\n help='ignore too long or too short lines in valid and test set')\n group.add_argument('--max-tokens', type=int, metavar='N',\n help='maximum number of tokens in a batch')\n group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',\n help='maximum number of sentences in a batch')\n group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',\n help='batch size will either be less than this value, '\n 'or a multiple of this value')\n parser.add_argument('--dataset-impl', metavar='FORMAT',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument('--data-buffer-size', default=10, type=int, metavar='N',\n help='number of batches to preload')\n if train:\n group.add_argument('--train-subset', default='train', metavar='SPLIT',\n help='data subset to use for training (e.g. train, valid, test)')\n group.add_argument('--valid-subset', default='valid', metavar='SPLIT',\n help='comma separated list of data subsets to use for validation'\n ' (e.g. train, valid, test)')\n group.add_argument('--validate-interval', type=int, default=1, metavar='N',\n help='validate every N epochs')\n group.add_argument('--validate-interval-updates', type=int, default=0, metavar='N',\n help='validate every N updates')\n group.add_argument('--validate-after-updates', type=int, default=0, metavar='N',\n help='dont validate until reaching this many updates')\n group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',\n help='specified random seed for validation')\n group.add_argument('--disable-validation', action='store_true',\n help='disable validation')\n group.add_argument('--max-tokens-valid', type=int, metavar='N',\n help='maximum number of tokens in a validation batch'\n ' (defaults to --max-tokens)')\n group.add_argument('--max-sentences-valid', type=int, metavar='N',\n help='maximum number of sentences in a validation batch'\n ' (defaults to --max-sentences)')\n group.add_argument('--curriculum', default=0, type=int, metavar='N',\n help='don\\'t shuffle batches for first N epochs')\n if gen:\n group.add_argument('--gen-subset', default='test', metavar='SPLIT',\n help='data subset to generate (train, valid, test)')\n group.add_argument('--num-shards', default=1, type=int, metavar='N',\n help='shard generation over N shards')\n group.add_argument('--shard-id', default=0, type=int, metavar='ID',\n help='id of the shard to generate (id < num_shards)')\n # fmt: on\n return group\n\n\ndef add_distributed_training_args(parser, default_world_size=None):\n group = parser.add_argument_group(\"Distributed training\")\n # fmt: off\n if default_world_size is None:\n default_world_size = max(1, torch.cuda.device_count())\n group.add_argument('--distributed-world-size', type=int, metavar='N',\n default=default_world_size,\n help='total number of GPUs across all nodes (default: all visible GPUs)')\n group.add_argument('--distributed-rank', default=0, type=int,\n help='rank of the current worker')\n group.add_argument('--distributed-backend', default='nccl', type=str,\n help='distributed backend')\n group.add_argument('--distributed-init-method', default=None, type=str,\n help='typically tcp://hostname:port that will be used to '\n 'establish initial connetion')\n group.add_argument('--distributed-port', default=-1, type=int,\n help='port number (not required if using --distributed-init-method)')\n group.add_argument('--device-id', '--local_rank', default=0, type=int,\n help='which GPU to use (usually configured automatically)')\n group.add_argument('--distributed-no-spawn', action='store_true',\n help='do not spawn multiple processes even if multiple GPUs are visible')\n # \"c10d\" is PyTorch's DDP implementation and provides the fastest\n # training. \"no_c10d\" is a more robust, but slightly slower DDP\n # implementation. Try this if you get warning messages about\n # inconsistent gradients between workers, or if some of your model\n # parameters are not always used.\n group.add_argument('--ddp-backend', default='c10d', type=str,\n choices=['c10d', 'no_c10d'],\n help='DistributedDataParallel backend')\n group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',\n help='bucket size for reduction')\n group.add_argument('--fix-batches-to-gpus', action='store_true',\n help='don\\'t shuffle batches between GPUs; this reduces overall '\n 'randomness and may affect precision but avoids the cost of '\n 're-reading the data')\n group.add_argument('--find-unused-parameters', default=False, action='store_true',\n help='disable unused parameter detection (not applicable to '\n 'no_c10d ddp-backend')\n group.add_argument('--fast-stat-sync', default=False, action='store_true',\n help='[deprecated] this is now defined per Criterion')\n group.add_argument('--broadcast-buffers', default=False, action='store_true',\n help='Copy non-trainable parameters between GPUs, such as '\n 'batchnorm population statistics')\n\n group.add_argument('--distributed-wrapper', default='DDP', type=str,\n choices=['DDP', 'SlowMo'],\n help='DistributedDataParallel backend')\n # Add arguments for SlowMo - these will be used when SlowMo is enabled via above\n group.add_argument('--slowmo-momentum', default=None, type=float,\n help='SlowMo momentum term; by default use 0.0 for 16 GPUs, '\n '0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs')\n group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'],\n help='whether to use LocalSGD or SGP')\n group.add_argument('--localsgd-frequency', default=3, type=int,\n help='Local SGD allreduce frequency')\n group.add_argument('--nprocs-per-node', type=int, metavar='N',\n default=max(1, torch.cuda.device_count()),\n help='number of GPUs in each node. An allreduce operation across GPUs in '\n 'a node is very fast. Hence, we do allreduce across GPUs in a node, '\n 'and gossip across different nodes')\n # fmt: on\n return group\n\n\ndef add_optimization_args(parser):\n group = parser.add_argument_group(\"Optimization\")\n # fmt: off\n group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',\n help='force stop training at specified epoch')\n group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',\n help='force stop training at specified update')\n group.add_argument('--stop-time-hours', default=0, type=float, metavar='N',\n help='force stop training after specified cumulative time (if >0)')\n group.add_argument('--clip-norm', default=0.0, type=float, metavar='NORM',\n help='clip threshold of gradients')\n group.add_argument('--sentence-avg', action='store_true',\n help='normalize gradients by the number of sentences in a batch'\n ' (default is to normalize by number of tokens)')\n group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',\n type=lambda uf: eval_str_list(uf, type=int),\n help='update parameters every N_i batches, when in epoch i')\n group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,\n metavar='LR_1,LR_2,...,LR_N',\n help='learning rate for the first N epochs; all epochs >N using LR_N'\n ' (note: this may be interpreted differently depending on --lr-scheduler)')\n group.add_argument('--min-lr', default=-1, type=float, metavar='LR',\n help='stop training when the learning rate reaches this minimum')\n group.add_argument('--use-bmuf', default=False, action='store_true',\n help='specify global optimizer for syncing models on different GPUs/shards')\n # fmt: on\n return group\n\n\ndef add_checkpoint_args(parser):\n group = parser.add_argument_group(\"Checkpointing\")\n # fmt: off\n group.add_argument('--save-dir', metavar='DIR', default='checkpoints',\n help='path to save checkpoints')\n group.add_argument('--restore-file', default='checkpoint_last.pt',\n help='filename from which to load checkpoint '\n '(default: <save-dir>/checkpoint_last.pt')\n group.add_argument('--finetune-from-model', default=None, type=str,\n help='finetune from a pretrained model; '\n 'note that meters and lr scheduler will be reset')\n group.add_argument('--reset-dataloader', action='store_true',\n help='if set, does not reload dataloader state from the checkpoint')\n group.add_argument('--reset-lr-scheduler', action='store_true',\n help='if set, does not load lr scheduler state from the checkpoint')\n group.add_argument('--reset-meters', action='store_true',\n help='if set, does not load meters from the checkpoint')\n group.add_argument('--reset-optimizer', action='store_true',\n help='if set, does not load optimizer state from the checkpoint')\n group.add_argument('--optimizer-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override optimizer args when loading a checkpoint')\n group.add_argument('--save-interval', type=int, default=1, metavar='N',\n help='save a checkpoint every N epochs')\n group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',\n help='save a checkpoint (and validate) every N updates')\n group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',\n help='keep the last N checkpoints saved with --save-interval-updates')\n group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',\n help='keep last N epoch checkpoints')\n group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N',\n help='keep best N checkpoints based on scores')\n group.add_argument('--no-save', action='store_true',\n help='don\\'t save models or checkpoints')\n group.add_argument('--no-epoch-checkpoints', action='store_true',\n help='only store last and best checkpoints')\n group.add_argument('--no-last-checkpoints', action='store_true',\n help='don\\'t store last checkpoints')\n group.add_argument('--no-save-optimizer-state', action='store_true',\n help='don\\'t save optimizer-state as part of checkpoint')\n group.add_argument('--best-checkpoint-metric', type=str, default='loss',\n help='metric to use for saving \"best\" checkpoints')\n group.add_argument('--maximize-best-checkpoint-metric', action='store_true',\n help='select the largest metric value for saving \"best\" checkpoints')\n group.add_argument('--patience', type=int, default=-1, metavar='N',\n help=('early stop training if valid performance doesn\\'t '\n 'improve for N consecutive validation runs; note '\n 'that this is influenced by --validate-interval'))\n # fmt: on\n return group\n\n\ndef add_common_eval_args(group):\n # fmt: off\n group.add_argument('--path', metavar='FILE',\n help='path(s) to model file(s), colon separated')\n group.add_argument('--remove-bpe', '--post-process', nargs='?', const='@@ ', default=None,\n help='remove BPE tokens before scoring (can be set to sentencepiece)')\n group.add_argument('--quiet', action='store_true',\n help='only print final scores')\n group.add_argument('--model-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override model args at generation '\n 'that were used during model training')\n group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,\n help='path to save eval results (optional)\"')\n # fmt: on\n\n\ndef add_eval_lm_args(parser):\n group = parser.add_argument_group(\"LM Evaluation\")\n add_common_eval_args(group)\n # fmt: off\n group.add_argument('--output-word-probs', action='store_true',\n help='if set, outputs words and their predicted log probabilities to standard output')\n group.add_argument('--output-word-stats', action='store_true',\n help='if set, outputs word statistics such as word count, average probability, etc')\n group.add_argument('--context-window', default=0, type=int, metavar='N',\n help='ensures that every evaluated token has access to a context of at least this size,'\n ' if possible')\n group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',\n help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'\n ' in order to fit into GPU memory')\n # fmt: on\n\n\ndef add_generation_args(parser):\n group = parser.add_argument_group(\"Generation\")\n add_common_eval_args(group)\n # fmt: off\n group.add_argument('--beam', default=5, type=int, metavar='N',\n help='beam size')\n group.add_argument('--nbest', default=1, type=int, metavar='N',\n help='number of hypotheses to output')\n group.add_argument('--max-len-a', default=0, type=float, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--max-len-b', default=200, type=int, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--min-len', default=1, type=float, metavar='N',\n help=('minimum generation length'))\n group.add_argument('--match-source-len', default=False, action='store_true',\n help=('generations should match the source length'))\n group.add_argument('--no-early-stop', action='store_true',\n help='deprecated')\n group.add_argument('--unnormalized', action='store_true',\n help='compare unnormalized hypothesis scores')\n group.add_argument('--no-beamable-mm', action='store_true',\n help='don\\'t use BeamableMM in attention layers')\n group.add_argument('--lenpen', default=1, type=float,\n help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')\n group.add_argument('--unkpen', default=0, type=float,\n help='unknown word penalty: <0 produces more unks, >0 produces fewer')\n group.add_argument('--replace-unk', nargs='?', const=True, default=None,\n help='perform unknown replacement (optionally with alignment dictionary)')\n group.add_argument('--sacrebleu', action='store_true',\n help='score with sacrebleu')\n group.add_argument('--score-reference', action='store_true',\n help='just score the reference translation')\n group.add_argument('--prefix-size', default=0, type=int, metavar='PS',\n help='initialize generation by target prefix of given length')\n group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',\n help='ngram blocking such that this size ngram cannot be repeated in the generation')\n group.add_argument('--sampling', action='store_true',\n help='sample hypotheses instead of using beam search')\n group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',\n help='sample from top K likely next words instead of all words')\n group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',\n help='sample from the smallest set whose cumulative probability mass exceeds p for next words')\n group.add_argument('--temperature', default=1., type=float, metavar='N',\n help='temperature for generation')\n group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',\n help='number of groups for Diverse Beam Search')\n group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',\n help='strength of diversity penalty for Diverse Beam Search')\n group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',\n help='strength of diversity penalty for Diverse Siblings Search')\n group.add_argument('--print-alignment', action='store_true',\n help='if set, uses attention feedback to compute and print alignment to source tokens')\n group.add_argument('--print-step', action='store_true')\n\n # arguments for iterative refinement generator\n group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',\n help='if > 0.0, it penalized early-stopping in decoding.')\n group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',\n help='maximum iterations for iterative refinement.')\n group.add_argument('--iter-decode-force-max-iter', action='store_true',\n help='if set, run exact the maximum number of iterations without early stop')\n group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',\n help='if > 1, model will generate translations varying by the lengths.')\n group.add_argument('--iter-decode-with-external-reranker', action='store_true',\n help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),\n group.add_argument('--retain-iter-history', action='store_true',\n help='if set, decoding returns the whole history of iterative refinement')\n group.add_argument('--retain-dropout', action='store_true',\n help='Use dropout at inference time')\n group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,\n help='if set, only retain dropout for the specified modules; '\n 'if not set, then dropout will be retained for all modules')\n\n # special decoding format for advanced decoding.\n group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])\n # fmt: on\n return group\n\n\ndef add_interactive_args(parser):\n group = parser.add_argument_group(\"Interactive\")\n # fmt: off\n group.add_argument('--buffer-size', default=0, type=int, metavar='N',\n help='read this many sentences into a buffer before processing them')\n group.add_argument('--input', default='-', type=str, metavar='FILE',\n help='file to read from; use - for stdin')\n # fmt: on\n\n\ndef add_model_args(parser):\n group = parser.add_argument_group(\"Model configuration\")\n # fmt: off\n\n # Model definitions can be found under fairseq/models/\n #\n # The model architecture can be specified in several ways.\n # In increasing order of priority:\n # 1) model defaults (lowest priority)\n # 2) --arch argument\n # 3) --encoder/decoder-* arguments (highest priority)\n from fairseq.models import ARCH_MODEL_REGISTRY\n group.add_argument('--arch', '-a', metavar='ARCH',\n choices=ARCH_MODEL_REGISTRY.keys(),\n help='model architecture')\n # fmt: on\n return group\n" ]
[ [ "torch.cuda.device_count" ] ]
amankedia/Project-Euler
[ "acc48bf4731ad85e07c22c46cc18360eec3f20e6" ]
[ "AmicableNumbers.py" ]
[ "import numpy as np\ndef sumDivisors(x):\n sum = 1\n j = x\n sqrt = int(np.sqrt(x))\n for i in range(2, sqrt):\n if j%i==0:\n #print(j, i, j/i)\n sum = sum + i\n sum = sum + (j/i)\n if sqrt*sqrt == x:\n sum = sum - sqrt\n #print(sum)\n return sum\n\ndef isAmicable(a, b):\n if sumDivisors(a) == b and sumDivisors(b) == a:\n return True\n else:\n return False\n\"\"\"\ndef main():\n print(sumDivisors(8039))\n print(isAmicable(2,3))\n\"\"\"\ndef main():\n sum = 0\n for i in range(1, 10000):\n for j in range(i+1, 10000):\n truth = isAmicable(i, j)\n if truth == True:\n print(i, j)\n sum = sum + i + j\n print(sum)\n\nif __name__==\"__main__\":\n main()\n" ]
[ [ "numpy.sqrt" ] ]
nebw/pymc3
[ "2fec315835a9a699a87d5ef931074c3692cca4b7" ]
[ "pymc3/distributions/mixture.py" ]
[ "import numpy as np\nimport theano.tensor as tt\n\nfrom pymc3.util import get_variable_name\nfrom ..math import logsumexp\nfrom .dist_math import bound, random_choice\nfrom .distribution import (Discrete, Distribution, draw_values,\n generate_samples, _DrawValuesContext)\nfrom .continuous import get_tau_sd, Normal\n\n\ndef all_discrete(comp_dists):\n \"\"\"\n Determine if all distributions in comp_dists are discrete\n \"\"\"\n if isinstance(comp_dists, Distribution):\n return isinstance(comp_dists, Discrete)\n else:\n return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)\n\n\nclass Mixture(Distribution):\n R\"\"\"\n Mixture log-likelihood\n\n Often used to model subpopulation heterogeneity\n\n .. math:: f(x \\mid w, \\theta) = \\sum_{i = 1}^n w_i f_i(x \\mid \\theta_i)\n\n ======== ============================================\n Support :math:`\\cap_{i = 1}^n \\textrm{support}(f_i)`\n Mean :math:`\\sum_{i = 1}^n w_i \\mu_i`\n ======== ============================================\n\n Parameters\n ----------\n w : array of floats\n w >= 0 and w <= 1\n the mixture weights\n comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)\n or iterable of one-dimensional PyMC3 distributions the\n component distributions :math:`f_1, \\ldots, f_n`\n\n Example\n -------\n .. code-block:: python\n\n # 2-Mixture Poisson distribution\n with pm.Model() as model:\n lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.\n\n # As we just need the logp, rather than add a RV to the model, we need to call .dist()\n components = pm.Poisson.dist(mu=lam, shape=(2,))\n\n w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.\n\n like = pm.Mixture('like', w=w, comp_dists=components, observed=data)\n\n # 2-Mixture Poisson using iterable of distributions.\n with pm.Model() as model:\n lam1 = pm.Exponential('lam1', lam=1)\n lam2 = pm.Exponential('lam2', lam=1)\n\n pois1 = pm.Poisson.dist(mu=lam1)\n pois2 = pm.Poisson.dist(mu=lam2)\n\n w = pm.Dirichlet('w', a=np.array([1, 1]))\n\n like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)\n \"\"\"\n\n def __init__(self, w, comp_dists, *args, **kwargs):\n shape = kwargs.pop('shape', ())\n\n self.w = w = tt.as_tensor_variable(w)\n self.comp_dists = comp_dists\n\n defaults = kwargs.pop('defaults', [])\n\n if all_discrete(comp_dists):\n dtype = kwargs.pop('dtype', 'int64')\n else:\n dtype = kwargs.pop('dtype', 'float64')\n\n try:\n self.mean = (w * self._comp_means()).sum(axis=-1)\n\n if 'mean' not in defaults:\n defaults.append('mean')\n except AttributeError:\n pass\n\n try:\n comp_modes = self._comp_modes()\n comp_mode_logps = self.logp(comp_modes)\n self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]\n\n if 'mode' not in defaults:\n defaults.append('mode')\n except (AttributeError, ValueError, IndexError):\n pass\n\n super(Mixture, self).__init__(shape, dtype, defaults=defaults,\n *args, **kwargs)\n\n def _comp_logp(self, value):\n comp_dists = self.comp_dists\n\n try:\n value_ = value if value.ndim > 1 else tt.shape_padright(value)\n\n return comp_dists.logp(value_)\n except AttributeError:\n return tt.squeeze(tt.stack([comp_dist.logp(value)\n for comp_dist in comp_dists],\n axis=1))\n\n def _comp_means(self):\n try:\n return tt.as_tensor_variable(self.comp_dists.mean)\n except AttributeError:\n return tt.squeeze(tt.stack([comp_dist.mean\n for comp_dist in self.comp_dists],\n axis=1))\n\n def _comp_modes(self):\n try:\n return tt.as_tensor_variable(self.comp_dists.mode)\n except AttributeError:\n return tt.squeeze(tt.stack([comp_dist.mode\n for comp_dist in self.comp_dists],\n axis=1))\n\n def _comp_samples(self, point=None, size=None):\n try:\n samples = self.comp_dists.random(point=point, size=size)\n except AttributeError:\n samples = np.column_stack([comp_dist.random(point=point, size=size)\n for comp_dist in self.comp_dists])\n\n return np.squeeze(samples)\n\n def logp(self, value):\n w = self.w\n\n return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1),\n w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),\n broadcast_conditions=False)\n\n def random(self, point=None, size=None):\n with _DrawValuesContext() as draw_context:\n w = draw_values([self.w], point=point)[0]\n comp_tmp = self._comp_samples(point=point, size=None)\n if np.asarray(self.shape).size == 0:\n distshape = np.asarray(np.broadcast(w, comp_tmp).shape)[..., :-1]\n else:\n distshape = np.asarray(self.shape)\n\n # Normalize inputs\n w /= w.sum(axis=-1, keepdims=True)\n\n w_samples = generate_samples(random_choice,\n p=w,\n broadcast_shape=w.shape[:-1] or (1,),\n dist_shape=distshape,\n size=size).squeeze()\n if (size is None) or (distshape.size == 0):\n with draw_context:\n comp_samples = self._comp_samples(point=point, size=size)\n if comp_samples.ndim > 1:\n samples = np.squeeze(comp_samples[np.arange(w_samples.size), ..., w_samples])\n else:\n samples = np.squeeze(comp_samples[w_samples])\n else:\n if w_samples.ndim == 1:\n w_samples = np.reshape(np.tile(w_samples, size), (size,) + w_samples.shape)\n samples = np.zeros((size,)+tuple(distshape))\n with draw_context:\n for i in range(size):\n w_tmp = w_samples[i, :]\n comp_tmp = self._comp_samples(point=point, size=None)\n if comp_tmp.ndim > 1:\n samples[i, :] = np.squeeze(comp_tmp[np.arange(w_tmp.size), ..., w_tmp])\n else:\n samples[i, :] = np.squeeze(comp_tmp[w_tmp])\n\n return samples\n\n\nclass NormalMixture(Mixture):\n R\"\"\"\n Normal mixture log-likelihood\n\n .. math::\n\n f(x \\mid w, \\mu, \\sigma^2) = \\sum_{i = 1}^n w_i N(x \\mid \\mu_i, \\sigma^2_i)\n\n ======== =======================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\sum_{i = 1}^n w_i \\mu_i`\n Variance :math:`\\sum_{i = 1}^n w_i^2 \\sigma^2_i`\n ======== =======================================\n\n Parameters\n ----------\n w : array of floats\n w >= 0 and w <= 1\n the mixture weights\n mu : array of floats\n the component means\n sd : array of floats\n the component standard deviations\n tau : array of floats\n the component precisions\n comp_shape : shape of the Normal component\n notice that it should be different than the shape\n of the mixture distribution, with one axis being\n the number of components.\n\n Note: You only have to pass in sd or tau, but not both.\n \"\"\"\n\n def __init__(self, w, mu, comp_shape=(), *args, **kwargs):\n _, sd = get_tau_sd(tau=kwargs.pop('tau', None),\n sd=kwargs.pop('sd', None))\n\n self.mu = mu = tt.as_tensor_variable(mu)\n self.sd = sd = tt.as_tensor_variable(sd)\n\n super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=comp_shape),\n *args, **kwargs)\n\n def _repr_latex_(self, name=None, dist=None):\n if dist is None:\n dist = self\n mu = dist.mu\n w = dist.w\n sd = dist.sd\n name = r'\\text{%s}' % name\n return r'${} \\sim \\text{{NormalMixture}}(\\mathit{{w}}={},~\\mathit{{mu}}={},~\\mathit{{sigma}}={})$'.format(name,\n get_variable_name(w),\n get_variable_name(mu),\n get_variable_name(sd))\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.squeeze", "numpy.tile", "numpy.broadcast" ] ]
EarthObservationSimulator/eosim
[ "8a589679235d7f93ed4bb7bad4e607f2ec23e604" ]
[ "eosim/gui/visualizeframe/vis2dframe.py" ]
[ "from tkinter import ttk \nimport tkinter as tk\nimport tkinter.filedialog, tkinter.messagebox\nfrom eosim import config\nfrom orbitpy import preprocess, orbitpropcov, communications, obsdatametrics, util\nimport instrupy\nimport pandas as pd\nimport numpy as np\n\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n# Implement the default Matplotlib key bindings.\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Plot2DVisVars(instrupy.util.EnumEntity):\n TIME = \"Time\"\n ALT = \"Altitude [km]\"\n INC = \"Inclination [deg]\"\n TA = \"True Anomaly [km]\"\n RAAN = \"RAAN [deg]\"\n AOP = \"AOP [deg]\"\n ECC = \"ECC\"\n SPD = \"ECI Speed [km/s]\"\n ECIX = \"ECI X-position [km]\"\n ECIY = \"ECI Y-position [km]\"\n ECIZ = \"ECI Z-position [km]\"\n VX = \"ECI X Velocity [km/s]\"\n VY = \"ECI Y Velocity [km/s]\"\n VZ = \"ECI Z Velocity [km/s]\"\n LAT = \"Latitude [deg]\"\n LON = \"Longitude [deg]\"\n\n @classmethod\n def get_orbitpy_file_column_header(cls, var):\n if(var==cls.ECIX):\n return \"X[km]\"\n elif(var==cls.ECIY):\n return \"Y[km]\"\n elif(var==cls.ECIZ):\n return \"Z[km]\"\n elif(var==cls.VX):\n return \"VX[km/s]\"\n elif(var==cls.VY):\n return \"VY[km/s]\"\n elif(var==cls.VZ):\n return \"VZ[km/s]\"\n elif(var==cls.INC):\n return \"INC[deg]\"\n elif(var==cls.RAAN):\n return \"RAAN[deg]\"\n elif(var==cls.AOP):\n return \"AOP[deg]\"\n elif(var==cls.TA):\n return \"TA[deg]\"\n elif(var==cls.ECC):\n return \"ECC\"\n else:\n return False # could be a derived variable\n \n @classmethod\n def get_data_from_orbitpy_file(cls, sat_df, sat_id, var, step_size, epoch_JDUT1):\n ''' Get data frame the orbitpy resultant output files '''\n _header = Plot2DVisVars.get_orbitpy_file_column_header(var) \n if(_header is not False): \n if _header == sat_df.index.name:\n data = sat_df.index\n else:\n data = sat_df[_header]\n else:\n # a derived variable\n if(var == cls.TIME):\n data = np.array(sat_df.index) * step_size # index = \"TimeIndex\"\n _header = 'Time[s]'\n elif(var == cls.ALT):\n sat_dist = []\n sat_dist = np.array(sat_df[\"X[km]\"])*np.array(sat_df[\"X[km]\"]) + np.array(sat_df[\"Y[km]\"])*np.array(sat_df[\"Y[km]\"]) + np.array(sat_df[\"Z[km]\"])*np.array(sat_df[\"Z[km]\"])\n sat_dist = np.sqrt(sat_dist)\n data = np.array(sat_dist) - instrupy.util.Constants.radiusOfEarthInKM\n _header = 'Alt[km]'\n elif(var==cls.SPD):\n data = np.array(sat_df[\"VX[km/s]\"])*np.array(sat_df[\"VX[km/s]\"]) + np.array(sat_df[\"VY[km/s]\"])*np.array(sat_df[\"VY[km/s]\"]) + np.array(sat_df[\"VZ[km/s]\"])*np.array(sat_df[\"VZ[km/s]\"])\n data = np.sqrt(data)\n _header = 'Speed[km/s]'\n elif(var==cls.LAT):\n lat = np.zeros((len(sat_df[\"X[km]\"]), 1))\n for k in range(0,len(sat_df[\"X[km]\"])):\n [lat[k], _x, _y] = instrupy.util.MathUtilityFunctions.eci2geo([sat_df[\"X[km]\"][k], sat_df[\"Y[km]\"][k], sat_df[\"Z[km]\"][k]], epoch_JDUT1)\n data = lat\n _header = 'Latitude[deg]'\n elif(var==cls.LON):\n lon = np.zeros((len(sat_df[\"X[km]\"]), 1))\n for k in range(0,len(sat_df[\"X[km]\"])):\n [_x, lon[k], _y] = instrupy.util.MathUtilityFunctions.eci2geo([sat_df[\"X[km]\"][k], sat_df[\"Y[km]\"][k], sat_df[\"Z[km]\"][k]], epoch_JDUT1)\n data = lon\n _header = 'Longitude[deg]'\n \n return [str(sat_id)+'.'+_header, data]\n\nclass TwoDimVisPlotAttibutes():\n def __init__(self, x_sat_id=None, x_var=None, y_sat_id=None, y_var=None, time_start=None, time_end=None):\n self.x_sat_id = x_sat_id if x_sat_id is not None else None\n self.x_var = x_var if x_var is not None else None\n self.y_sat_id = y_sat_id if y_sat_id is not None else list()\n self.y_var = y_var if y_var is not None else list()\n self.time_start = time_start if time_start is not None else None\n self.time_end = time_end if time_end is not None else None\n\n def update_x_variables(self, x_sat_id, x_var):\n self.x_sat_id = x_sat_id\n self.x_var = x_var\n \n def update_y_variables(self, y_sat_id, y_var):\n self.y_sat_id.append(y_sat_id)\n self.y_var.append(y_var)\n \n def reset_y_variables(self):\n self.y_sat_id = list()\n self.y_var = list()\n\n def update_time_interval(self, time_start, time_end):\n self.time_start = time_start\n self.time_end = time_end\n \n def get_x_variables(self):\n return [self.x_sat_id, self.x_var]\n\n def get_y_variables(self):\n return [self.y_sat_id, self.y_var]\n \n def get_time_interval(self):\n return [self.time_start, self.time_end]\n\nclass Vis2DFrame(ttk.Frame):\n\n def __init__(self, win, tab):\n \n self.two_dim_vis_plt_attr = TwoDimVisPlotAttibutes() # data structure storing the 2D plot attributes\n\n # 2d plots frame\n vis_2d_frame = ttk.Frame(tab)\n vis_2d_frame.pack(expand = True, fill =\"both\", padx=10, pady=10)\n vis_2d_frame.rowconfigure(0,weight=1)\n vis_2d_frame.rowconfigure(1,weight=1)\n vis_2d_frame.columnconfigure(0,weight=1)\n vis_2d_frame.columnconfigure(1,weight=1) \n\n vis_2d_time_frame = ttk.LabelFrame(vis_2d_frame, text='Set Time Interval', labelanchor='n')\n vis_2d_time_frame.grid(row=0, column=0, sticky='nswe', rowspan=2, padx=(40,0))\n vis_2d_time_frame.rowconfigure(0,weight=1)\n vis_2d_time_frame.rowconfigure(1,weight=1)\n vis_2d_time_frame.rowconfigure(2,weight=1)\n vis_2d_time_frame.columnconfigure(0,weight=1)\n vis_2d_time_frame.columnconfigure(1,weight=1)\n\n vis_2d_xaxis_frame = ttk.LabelFrame(vis_2d_frame, text='Set X-variable', labelanchor='n')\n vis_2d_xaxis_frame.grid(row=0, column=1, sticky='nswe')\n vis_2d_xaxis_frame.columnconfigure(0,weight=1)\n vis_2d_xaxis_frame.columnconfigure(1,weight=1)\n vis_2d_xaxis_frame.rowconfigure(0,weight=1)\n\n vis_2d_yaxis_frame = ttk.LabelFrame(vis_2d_frame, text='Set Y-variable(s)', labelanchor='n')\n vis_2d_yaxis_frame.grid(row=1, column=1, sticky='nswe')\n vis_2d_yaxis_frame.columnconfigure(0,weight=1)\n vis_2d_yaxis_frame.columnconfigure(1,weight=1)\n vis_2d_yaxis_frame.rowconfigure(0,weight=1)\n\n vis_2d_plot_frame = ttk.Frame(vis_2d_frame)\n vis_2d_plot_frame.grid(row=2, column=0, columnspan=2, sticky='nswe', pady=(10,2)) \n vis_2d_plot_frame.columnconfigure(0,weight=1)\n vis_2d_plot_frame.columnconfigure(1,weight=1) \n vis_2d_plot_frame.rowconfigure(0,weight=1)\n\n # 2D vis frame\n ttk.Label(vis_2d_time_frame, text=\"Time (hh:mm:ss) from mission-epoch\", wraplength=\"110\", justify='center').grid(row=0, column=0,columnspan=2,ipady=5)\n \n ttk.Label(vis_2d_time_frame, text=\"From\").grid(row=1, column=0, sticky='ne')\n self.vis_2d_time_from_entry = ttk.Entry(vis_2d_time_frame, width=10, takefocus = False)\n self.vis_2d_time_from_entry.grid(row=1, column=1, sticky='nw', padx=10)\n self.vis_2d_time_from_entry.insert(0,'00:00:00')\n self.vis_2d_time_from_entry.bind(\"<FocusIn>\", lambda args: self.vis_2d_time_from_entry.delete('0', 'end'))\n \n ttk.Label(vis_2d_time_frame, text=\"To\").grid(row=2, column=0, sticky='ne')\n self.vis_2d_time_to_entry = ttk.Entry(vis_2d_time_frame, width=10, takefocus = False)\n self.vis_2d_time_to_entry.grid(row=2, column=1, sticky='nw', padx=10)\n self.vis_2d_time_to_entry.insert(0,'10:00:00')\n self.vis_2d_time_to_entry.bind(\"<FocusIn>\", lambda args: self.vis_2d_time_to_entry.delete('0', 'end'))\n\n vis_2d_x_sel_var_btn = ttk.Button(vis_2d_xaxis_frame, text=\"X.Var\", command=self.click_select_xvar_btn)\n vis_2d_x_sel_var_btn.grid(row=0, column=0)\n self.vis_2d_x_sel_var_disp = tk.Text(vis_2d_xaxis_frame, state='disabled',height = 1, width = 3, background=\"light grey\")\n self.vis_2d_x_sel_var_disp.grid(row=0, column=1, sticky='nsew', padx=20, pady=20) \n\n vis_2d_y_sel_var_btn = ttk.Button(vis_2d_yaxis_frame, text=\"Y.Var(s)\", command=self.click_select_yvar_btn)\n vis_2d_y_sel_var_btn.grid(row=0, column=0)\n self.vis_2d_y_sel_var_disp = tk.Text(vis_2d_yaxis_frame, state='disabled',height = 2, width = 3, background=\"light grey\")\n self.vis_2d_y_sel_var_disp.grid(row=0, column=1, sticky='nsew', padx=20, pady=20) \n \n plot_btn = ttk.Button(vis_2d_plot_frame, text=\"Plot\", command=lambda: self.click_plot_btn(plot=True))\n plot_btn.grid(row=0, column=0, sticky='e', padx=20)\n\n export_btn = ttk.Button(vis_2d_plot_frame, text=\"Export\", command=lambda: self.click_plot_btn(export=True))\n export_btn.grid(row=0, column=1, sticky='w', padx=20)\n\n def click_select_xvar_btn(self):\n # create window to ask which satellite \n select_xvar_win = tk.Toplevel()\n select_xvar_win.rowconfigure(0,weight=1)\n select_xvar_win.rowconfigure(1,weight=1)\n select_xvar_win.columnconfigure(0,weight=1)\n select_xvar_win.columnconfigure(1,weight=1)\n\n select_sat_win_frame = ttk.LabelFrame(select_xvar_win, text='Select Satellite')\n select_sat_win_frame.grid(row=0, column=0, padx=10, pady=10) \n\n select_var_frame = ttk.LabelFrame(select_xvar_win, text='Select Variable')\n select_var_frame.grid(row=0, column=1, padx=10, pady=10) \n\n okcancel_frame = ttk.Label(select_xvar_win)\n okcancel_frame.grid(row=1, column=0, columnspan=2, padx=10, pady=10) \n\n # place the widgets in the frame\n available_sats = config.out_config.get_satellite_ids() # get all available sats for which outputs are available\n \n sats_combo_box = ttk.Combobox(select_sat_win_frame, \n values=available_sats)\n sats_combo_box.grid(row=0, column=0)\n\n sats_combo_box = ttk.Combobox(select_sat_win_frame, \n values=available_sats)\n sats_combo_box.current(0)\n sats_combo_box.grid(row=0, column=0)\n\n self._2dvis_xvar= tk.StringVar() # using self so that the variable is retained even after exit from the function, make sure variable name is unique\n j = 0\n k = 0\n for _var in list(Plot2DVisVars):\n var_rbtn = ttk.Radiobutton(select_var_frame, text=_var, variable=self._2dvis_xvar, value=_var)\n var_rbtn.grid(row=j, column=k, sticky='w')\n j = j + 1\n if(j==5):\n j=0\n k=k+1\n\n def click_ok_btn():\n self.two_dim_vis_plt_attr.update_x_variables(sats_combo_box.get(), self._2dvis_xvar.get())\n [sats, xvars] = self.two_dim_vis_plt_attr.get_x_variables()\n xvars_str = str(sats+'.'+xvars)\n self.vis_2d_x_sel_var_disp.configure(state='normal')\n self.vis_2d_x_sel_var_disp.delete(1.0,'end')\n self.vis_2d_x_sel_var_disp.insert(1.0, xvars_str)\n self.vis_2d_x_sel_var_disp.configure(state='disabled')\n select_xvar_win.destroy()\n\n ok_btn = ttk.Button(okcancel_frame, text=\"Ok\", command=click_ok_btn, width=15)\n ok_btn.grid(row=0, column=0, sticky ='e')\n cancel_btn = ttk.Button(okcancel_frame, text=\"Exit\", command=select_xvar_win.destroy, width=15)\n cancel_btn.grid(row=0, column=1, sticky ='w') \n\n def click_select_yvar_btn(self):\n\n # reset any previously configured y-variables\n self.two_dim_vis_plt_attr.reset_y_variables()\n \n # create window to ask which satellite \n select_yvar_win = tk.Toplevel()\n select_yvar_win.rowconfigure(0,weight=1)\n select_yvar_win.rowconfigure(1,weight=1)\n select_yvar_win.columnconfigure(0,weight=1)\n select_yvar_win.columnconfigure(1,weight=1)\n\n select_sat_win_frame = ttk.LabelFrame(select_yvar_win, text='Select Satellite')\n select_sat_win_frame.grid(row=0, column=0, padx=10, pady=10) \n\n select_var_frame = ttk.LabelFrame(select_yvar_win, text='Select Variable')\n select_var_frame.grid(row=0, column=1, padx=10, pady=10) \n\n okcancel_frame = ttk.Label(select_yvar_win)\n okcancel_frame.grid(row=1, column=0, columnspan=2, padx=10, pady=10) \n\n # place the widgets in the frame\n available_sats = config.out_config.get_satellite_ids() # get all available sats for which outputs are available\n \n sats_combo_box = ttk.Combobox(select_sat_win_frame, \n values=available_sats)\n sats_combo_box.current(0)\n sats_combo_box.grid(row=0, column=0)\n\n self._2dvis_yvar= tk.StringVar() # using self so that the variable is retained even after exit from the function, make sure variable name is unique\n j = 0\n k = 0\n for _var in list(Plot2DVisVars):\n var_rbtn = ttk.Radiobutton(select_var_frame, text=_var, variable=self._2dvis_yvar, value=_var)\n var_rbtn.grid(row=j, column=k, sticky='w')\n j = j + 1\n if(j==5):\n j=0\n k=k+1\n\n def click_ok_btn():\n self.two_dim_vis_plt_attr.update_y_variables(sats_combo_box.get(), self._2dvis_yvar.get())\n \n def click_exit_btn():\n self.vis_2d_y_sel_var_disp.configure(state='normal')\n self.vis_2d_y_sel_var_disp.delete(1.0,'end')\n [sats, yvars] = self.two_dim_vis_plt_attr.get_y_variables()\n yvars_str = [str(sats[k]+'.'+yvars[k]) for k in range(0,len(sats))]\n self.vis_2d_y_sel_var_disp.insert(1.0,' '.join(yvars_str))\n self.vis_2d_y_sel_var_disp.configure(state='disabled')\n select_yvar_win.destroy()\n\n ok_btn = ttk.Button(okcancel_frame, text=\"Add\", command=click_ok_btn, width=15)\n ok_btn.grid(row=0, column=0, sticky ='e')\n cancel_btn = ttk.Button(okcancel_frame, text=\"Exit\", command=click_exit_btn, width=15)\n cancel_btn.grid(row=0, column=1, sticky ='w') \n\n def update_time_interval_in_attributes_variable(self):\n # read the plotting time interval \n time_start = str(self.vis_2d_time_from_entry.get()).split(\":\") # split and reverse list\n time_start.reverse()\n # convert to seconds\n x = 0\n for k in range(0,len(time_start)):\n x = x + float(time_start[k]) * (60**k)\n time_start_s = x\n\n time_end = str(self.vis_2d_time_to_entry.get()).split(\":\") # split and reverse list\n time_end.reverse()\n # convert to seconds\n x = 0\n for k in range(0,len(time_end)):\n x = x + float(time_end[k]) * (60**k)\n time_end_s = x\n\n self.two_dim_vis_plt_attr.update_time_interval(time_start_s, time_end_s)\n \n def click_plot_btn(self, export=False, plot=False):\n \"\"\" Make X-Y scatter plots of the variables indicated in :code:`self.two_dim_vis_plt_attr` instance variable. \n \"\"\"\n \n self.update_time_interval_in_attributes_variable()\n\n [time_start_s, time_end_s] = self.two_dim_vis_plt_attr.get_time_interval()\n\n # get the x-axis data\n [x_sat_id, x_var] = self.two_dim_vis_plt_attr.get_x_variables()\n x_sat_state_fp = config.out_config.get_satellite_state_fp()[config.out_config.get_satellite_ids().index(x_sat_id)]\n x_sat_kepstate_fp = config.out_config.get_satellite_kepstate_fp()[config.out_config.get_satellite_ids().index(x_sat_id)]\n \n # read the epoch and time-step size and fix the start and stop indices\n epoch_JDUT1 = pd.read_csv(x_sat_state_fp, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch\n epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[2])\n\n step_size = pd.read_csv(x_sat_state_fp, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize\n step_size = float(step_size[0][0].split()[4])\n\n logger.debug(\"epoch_JDUT1 is \" + str(epoch_JDUT1))\n logger.debug(\"step_size is \" + str(step_size))\n\n time_start_index = int(time_start_s/step_size)\n time_end_index = int(time_end_s/step_size)\n\n # check if the time interval is within bounds\n # Carteisan ECI state file\n x_sat_state_df = pd.read_csv(x_sat_state_fp,skiprows = [0,1,2,3]) \n x_sat_state_df.set_index('TimeIndex', inplace=True)\n # Keplerian state file\n x_sat_kepstate_df = pd.read_csv(x_sat_kepstate_fp,skiprows = [0,1,2,3]) \n x_sat_kepstate_df.set_index('TimeIndex', inplace=True)\n \n min_time_index = min(x_sat_state_df.index)\n max_time_index = max(x_sat_state_df.index)\n if(time_start_index < min_time_index or time_start_index > max_time_index or \n time_end_index < min_time_index or time_end_index > max_time_index or\n time_start_index > time_end_index):\n logger.info(\"Please enter valid time-interval.\")\n return\n\n # get data only in the relevant time-interval\n x_sat_state_df = x_sat_state_df.iloc[time_start_index:time_end_index]\n x_sat_kepstate_df = x_sat_kepstate_df.iloc[time_start_index:time_end_index]\n x_sat_df = pd.concat([x_sat_state_df, x_sat_kepstate_df], axis=1)\n\n # make empty dataframe to store the plot related data\n # extract the time and x-var \n plt_data = pd.DataFrame(index=x_sat_state_df.index)\n \n [_xvarname, _xdata] = Plot2DVisVars.get_data_from_orbitpy_file(sat_df=x_sat_df, sat_id=x_sat_id, var=x_var, step_size=step_size, epoch_JDUT1=epoch_JDUT1)\n plt_data[_xvarname] = _xdata \n\n # iterate over the list of y-vars \n [y_sat_id, y_var] = self.two_dim_vis_plt_attr.get_y_variables()\n num_y_vars = len(y_var)\n for k in range(0,num_y_vars): \n # extract the y-variable data from of the particular satellite\n # cartesian eci state file\n y_sat_state_fp = config.out_config.get_satellite_state_fp()[config.out_config.get_satellite_ids().index(y_sat_id[k])]\n y_sat_state_df = pd.read_csv(y_sat_state_fp,skiprows = [0,1,2,3]) \n y_sat_state_df.set_index('TimeIndex', inplace=True)\n y_sat_state_df = y_sat_state_df.iloc[time_start_index:time_end_index]\n # keplerian state file\n y_sat_kepstate_fp = config.out_config.get_satellite_kepstate_fp()[config.out_config.get_satellite_ids().index(y_sat_id[k])]\n y_sat_kepstate_df = pd.read_csv(y_sat_kepstate_fp,skiprows = [0,1,2,3]) \n y_sat_kepstate_df.set_index('TimeIndex', inplace=True)\n y_sat_kepstate_df = y_sat_kepstate_df.iloc[time_start_index:time_end_index]\n \n y_sat_df = pd.concat([y_sat_state_df, y_sat_kepstate_df], axis=1)\n\n # add new column with the y-data\n [_yvarname, _ydata] = Plot2DVisVars.get_data_from_orbitpy_file(sat_df=y_sat_df, sat_id=y_sat_id[k], var=y_var[k], step_size=step_size, epoch_JDUT1=epoch_JDUT1)\n plt_data[_yvarname] = _ydata \n \n if(export is True):\n vis2d_data_fp = tkinter.filedialog.asksaveasfile()\n plt_data.to_csv(vis2d_data_fp)\n \n if(plot is True):\n fig_win = tk.Toplevel()\n fig = Figure(figsize=(5, 4), dpi=100)\n ax = fig.add_subplot(111)\n _lgnd=[]\n for k in range(0,num_y_vars):\n ax.scatter(plt_data.iloc[:,0],plt_data.iloc[:,k+1])\n _lgnd.append(plt_data.columns[k+1]) # pylint: disable=E1136 # pylint/issues/3139\n ax.set_xlabel(plt_data.columns[0]) # pylint: disable=E1136 # pylint/issues/3139\n ax.set_ylabel('Y-axis')\n ax.legend(_lgnd)\n \n canvas = FigureCanvasTkAgg(fig, master=fig_win) # A tk.DrawingArea.\n canvas.draw()\n canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, fig_win)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n \n " ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.sqrt", "matplotlib.figure.Figure", "pandas.DataFrame", "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "numpy.array", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
cuiheng1234/CT3D
[ "831eb3cb1bc32775c415db7eb3e77c5ad800c9bb" ]
[ "pcdet/datasets/__init__.py" ]
[ "import torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\nfrom pcdet.utils import common_utils\n\nfrom .dataset import DatasetTemplate\nfrom .kitti.kitti_dataset import KittiDataset\nfrom .waymo.waymo_dataset import WaymoDataset\n\n__all__ = {\n 'DatasetTemplate': DatasetTemplate,\n 'KittiDataset': KittiDataset,\n 'WaymoDataset': WaymoDataset\n}\n\n\nclass DistributedSampler(_DistributedSampler):\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n\ndef build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,\n logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):\n\n dataset = __all__[dataset_cfg.DATASET](\n dataset_cfg=dataset_cfg,\n class_names=class_names,\n root_path=root_path,\n training=training,\n logger=logger,\n )\n\n if merge_all_iters_to_one_epoch:\n assert hasattr(dataset, 'merge_all_iters_to_one_epoch')\n dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)\n\n if dist:\n if training:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n else:\n rank, world_size = common_utils.get_dist_info()\n sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)\n else:\n sampler = None\n dataloader = DataLoader(\n dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,\n shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,\n drop_last=False, sampler=sampler, timeout=0\n )\n\n return dataset, dataloader, sampler" ]
[ [ "torch.Generator", "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler" ] ]
MinjaMiladinovic/ignite
[ "007d320150fa915d7ac8757ddb586aaa9c427682" ]
[ "ignite/utils.py" ]
[ "import collections.abc as collections\nimport functools\nimport logging\nimport random\nimport warnings\nfrom typing import Any, Callable, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union, cast\n\nimport torch\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"manual_seed\"]\n\n\ndef convert_tensor(\n x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\n\n Args:\n x: input tensor or mapping, or sequence of tensors.\n device: device type to move ``x``.\n non_blocking: convert a CPU Tensor with pinned memory to a CUDA Tensor\n asynchronously with respect to the host if possible\n \"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n\n return apply_to_tensor(x, _func)\n\n\ndef apply_to_tensor(\n x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n\n Args:\n x: input tensor or mapping, or sequence of tensors.\n func: the function to apply on ``x``.\n \"\"\"\n return apply_to_type(x, torch.Tensor, func)\n\n\ndef apply_to_type(\n x: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on an object of `input_type` or mapping, or sequence of objects of `input_type`.\n\n Args:\n x: object or mapping or sequence.\n input_type: data type of ``x``.\n func: the function to apply on ``x``.\n \"\"\"\n if isinstance(x, input_type):\n return func(x)\n if isinstance(x, (str, bytes)):\n return x\n if isinstance(x, collections.Mapping):\n return cast(Callable, type(x))({k: apply_to_type(sample, input_type, func) for k, sample in x.items()})\n if isinstance(x, tuple) and hasattr(x, \"_fields\"): # namedtuple\n return cast(Callable, type(x))(*(apply_to_type(sample, input_type, func) for sample in x))\n if isinstance(x, collections.Sequence):\n return cast(Callable, type(x))([apply_to_type(sample, input_type, func) for sample in x])\n raise TypeError((f\"x must contain {input_type}, dicts or lists; found {type(x)}\"))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the\n input's device`.\n\n Args:\n indices: input tensor to convert.\n num_classes: number of classes for one-hot tensor.\n\n .. versionchanged:: 0.4.3\n This functions is now torchscriptable.\n \"\"\"\n new_shape = (indices.shape[0], num_classes) + indices.shape[1:]\n onehot = torch.zeros(new_shape, dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: Optional[str] = None,\n level: int = logging.INFO,\n stream: Optional[TextIO] = None,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: Optional[int] = None,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name: new name for the logger. If None, the standard logger is used.\n level: logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG.\n stream: logging stream. If None, the standard stream is used (sys.stderr).\n format: logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`.\n filepath: Optional logging file path. If not None, logs are written to the file.\n distributed_rank: Optional, rank in distributed configuration to avoid logger setup for workers.\n If None, distributed_rank is initialized to the rank of process.\n\n Returns:\n logging.Logger\n\n For example, to improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n .. versionchanged:: 0.4.3\n Added ``stream`` parameter.\n \"\"\"\n logger = logging.getLogger(name)\n\n # don't propagate to ancestors\n # the problem here is to attach handlers to loggers\n # should we provide a default configuration less open ?\n if name is not None:\n logger.propagate = False\n\n # Remove previous handlers\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n formatter = logging.Formatter(format)\n\n if distributed_rank is None:\n import ignite.distributed as idist\n\n distributed_rank = idist.get_rank()\n\n if distributed_rank > 0:\n logger.addHandler(logging.NullHandler())\n else:\n logger.setLevel(level)\n\n ch = logging.StreamHandler(stream=stream)\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\ndef manual_seed(seed: int) -> None:\n \"\"\"Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).\n\n Args:\n seed: Random state seed\n\n .. versionchanged:: 0.4.3\n Added ``torch.cuda.manual_seed_all(seed)``.\n \"\"\"\n random.seed(seed)\n torch.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n try:\n import numpy as np\n\n np.random.seed(seed)\n except ImportError:\n pass\n\n\ndef deprecated(\n deprecated_in: str, removed_in: str = \"\", reasons: Tuple[str, ...] = (), raise_exception: bool = False\n) -> Callable:\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n def decorator(func: F) -> F:\n func_doc = func.__doc__ if func.__doc__ else \"\"\n deprecation_warning = (\n f\"This function has been deprecated since version {deprecated_in}\"\n + (f\" and will be removed in version {removed_in}\" if removed_in else \"\")\n + \".\\n Please refer to the documentation for more details.\"\n )\n\n @functools.wraps(func)\n def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:\n if raise_exception:\n raise DeprecationWarning(deprecation_warning)\n warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)\n return func(*args, **kwargs)\n\n appended_doc = f\".. deprecated:: {deprecated_in}\" + (\"\\n\\n\\t\" if len(reasons) else \"\")\n\n for reason in reasons:\n appended_doc += \"\\n\\t- \" + reason\n wrapper.__doc__ = f\"**Deprecated function**.\\n\\n {func_doc}{appended_doc}\"\n return cast(F, wrapper)\n\n return decorator\n" ]
[ [ "numpy.random.seed", "torch.zeros", "torch.manual_seed", "torch.cuda.manual_seed_all", "torch.cuda.is_available" ] ]
variscarey/ASTARS
[ "63ee17ba73e21880218db6514e4c6dd37671074d" ]
[ "paper_examples/regression_selection.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 19 09:24:41 2020\n\n@author: varis\n\"\"\"\n\nimport numpy as np\nfrom astars.stars_sim import Stars_sim\nimport matplotlib.pyplot as plt\n\n\ndef noisy_cubic(x,var=1E-4):\n return x**3-6*x + np.random.normal(scale=np.sqrt(var),size=x.shape)\n\ndef poly_mod(pred,weights):\n return pred@weights\n\ndef rms_loss(weights,pred,data):\n '''\n \n Parameters\n ----------\n weights : size dim, float of current linear model weights\n pred : dim x size data, float of current predictors per input point\n data : value of data at uncertain regression points\n\n Returns\n -------\n RMS : RMS loss function\n\n '''\n \n RMS = np.sum((poly_mod(pred,weights)-data)**2)\n return RMS\n\nnum_features = 6\nnum_train = 20\ntrue_pts = np.random.uniform(low=-1.0,high=1.0, size = num_train)\n#build polynomial features\ntrain_feat = np.vander(true_pts,N=num_features+1,increasing=True)[:,1:]\n\n#build synthetic data\ndata = noisy_cubic(true_pts)\n#print('true points',true_pts)\nprint('true data',data)\n#initialize model weights\n#init_weights = np.random.uniform(low=-1.0,high=1.0,size=num_features)\ninit_weights = np.zeros(num_features)\ninit_weights[0]=-6.0\ninit_weights[2]=1\n\nprint('initial weights',init_weights)\nprint('initial RMS loss',rms_loss(init_weights,train_feat,data))\n\ndef stars_wrapper(iterate,dim=10):\n return rms_loss(iterate,train_feat,data)\n #print('initial first component',noisy_pred[:,0])\n #print('current first component',pred[:,0])\n# return rms_loss(weights,pred,data)\n\n#stars setup\nmaxit = 100\ninit_pt = np.copy(init_weights) \nntrials = 1\nf_avr = np.zeros(maxit+1) #set equal to number of iterations + 1\nmyL1= 2.0*np.max(train_feat**2)\nprint('L1=',myL1)\n\nfor trial in range(ntrials):\n #sim setup\n test = Stars_sim(stars_wrapper, init_pt, L1 = myL1, var = 1E-4, verbose = False, maxit = maxit)\n test.STARS_only = True\n test.get_mu_star()\n test.get_h()\n # do 100 steps\n while test.iter < test.maxit:\n test.step()\n \n #update average of f\n f_avr += test.fhist \n\nprint('STARS min',test.x)\ns_min = test.x\nf2_avr = np.zeros(maxit+1)\n\ninit_pt=np.copy(init_weights)\nfor trial in range(ntrials):\n #sim setup\n test = Stars_sim(stars_wrapper, init_pt, L1 = myL1, var = 1E-4, verbose =False, maxit = maxit)\n #test.STARS_only = True\n test.get_mu_star()\n test.get_h()\n # adapt every 10 timesteps using quadratic(after inital burn)\n test.train_method = 'GQ'\n test.adapt = 200 # Sets number of sub-cylcing steps\n \n # do 100 steps\n while test.iter < test.maxit:\n test.step() \n if test.active is not None and test.iter % 10 == 0:\n print('Iteration', test.iter)\n print('Active dimension',test.active.shape[1])\n print('Active weights',test.wts)\n print('True active variable comps.',test.active)\n f2_avr += test.fhist\n print('trial',trial,' minval',test.fhist[-1])\n print(test.x)\n\nf_avr /= ntrials\nf2_avr /= ntrials\n\nas_min = test.x\n\nprint('Stars_min',s_min)\nprint('Astars min',as_min)\n\n\nplt.semilogy(f_avr,label='Stars')\nplt.semilogy(f2_avr, label='Astars')\nplt.legend()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.semilogy", "matplotlib.pyplot.legend", "numpy.vander", "numpy.sqrt", "numpy.max", "numpy.copy", "numpy.random.uniform", "matplotlib.pyplot.show", "numpy.zeros" ] ]
tmash/pyscf
[ "89c101c1c963e8247808635c61cd165bffab42d6" ]
[ "examples/qmmm/22-with_solvent.py" ]
[ "#!/usr/bin/env python\n#\n# Author: Qiming Sun <osirpt.sun@gmail.com>\n#\n\n'''\nQM/MM charges + implicit solvent model\n'''\n\nimport numpy\nfrom pyscf import gto, qmmm, solvent\nfrom pyscf.data import radii\n\n# load all modeuls\nfrom pyscf import __all__\n\nmol = gto.M(atom='''\nC 0.000000 0.000000 -0.542500\nO 0.000000 0.000000 0.677500\nH 0.000000 0.9353074360871938 -1.082500\nH 0.000000 -0.9353074360871938 -1.082500\n ''',\n verbose = 4)\n\nnumpy.random.seed(1)\ncoords = numpy.random.random((5,3)) * 10\ncharges = (numpy.arange(5) + 1.) * .1\nmm_atoms = [('C', c) for c in coords]\nmm_mol = qmmm.create_mm_mol(mm_atoms, charges)\n\n# Make a giant system include both QM and MM particles\nqmmm_mol = mol + mm_mol\n\n# The solvent model is based on the giant system\nsol = solvent.DDCOSMO(qmmm_mol)\n\n# According to Lipparini's suggestion in issue #446\nsol.radii_table = radii.VDW\n\n#\n# The order to apply solvent model and QM/MM charges does not affect results\n#\n# ddCOSMO-QMMM-SCF\n#\nmf = mol.RHF()\nmf = mf.QMMM(coords, charges)\nmf = mf.DDCOSMO(sol)\nmf.run()\n\n#\n# QMMM-ddCOSMO-SCF\n#\nmf = mol.RHF()\nmf = mf.DDCOSMO(sol)\nmf = mf.QMMM(coords, charges)\nmf.run()\n" ]
[ [ "numpy.arange", "numpy.random.random", "numpy.random.seed" ] ]
RonyBenitez/triplet-reid-pytorch
[ "b32f5df1084c70aeceb5084001f242dce31d0ebb" ]
[ "eval.py" ]
[ "#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport torch\n\nimport pickle\nimport numpy as np\nimport sys\nimport logging\nimport argparse\nfrom tqdm import tqdm\n\nfrom utils import pdist_np as pdist\n\n\ndef parse_args():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n '--gallery_embs',\n dest = 'gallery_embs',\n type = str,\n default = './res/emb_gallery.pkl',\n help = 'path to embeddings of gallery dataset'\n )\n parse.add_argument(\n '--query_embs',\n dest = 'query_embs',\n type = str,\n default = './res/emb_query.pkl',\n help = 'path to embeddings of query dataset'\n )\n parse.add_argument(\n '--cmc_rank',\n dest = 'cmc_rank',\n type = int,\n default = 1,\n help = 'path to embeddings of query dataset'\n )\n\n return parse.parse_args()\n\n\ndef evaluate(args):\n ## logging\n FORMAT = '%(levelname)s %(filename)s:%(lineno)d: %(message)s'\n logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\n logger = logging.getLogger(__name__)\n\n ## load embeddings\n logger.info('loading gallery embeddings')\n with open(args.gallery_embs, 'rb') as fr:\n gallery_dict = pickle.load(fr)\n emb_gallery, lb_ids_gallery, lb_cams_gallery = gallery_dict['embeddings'], gallery_dict['label_ids'], gallery_dict['label_cams']\n logger.info('loading query embeddings')\n with open(args.query_embs, 'rb') as fr:\n query_dict = pickle.load(fr)\n emb_query, lb_ids_query, lb_cams_query = query_dict['embeddings'], query_dict['label_ids'], query_dict['label_cams']\n\n ## compute and clean distance matrix\n dist_mtx = pdist(emb_query, emb_gallery)\n n_q, n_g = dist_mtx.shape\n indices = np.argsort(dist_mtx, axis = 1)\n matches = lb_ids_gallery[indices] == lb_ids_query[:, np.newaxis]\n matches = matches.astype(np.int32)\n all_aps = []\n all_cmcs = []\n logger.info('starting evaluating ...')\n for qidx in tqdm(range(n_q)):\n qpid = lb_ids_query[qidx]\n qcam = lb_cams_query[qidx]\n\n order = indices[qidx]\n pid_diff = lb_ids_gallery[order] != qpid\n cam_diff = lb_cams_gallery[order] != qcam\n useful = lb_ids_gallery[order] != -1\n keep = np.logical_or(pid_diff, cam_diff)\n keep = np.logical_and(keep, useful)\n match = matches[qidx][keep]\n\n if not np.any(match): continue\n\n cmc = match.cumsum()\n cmc[cmc > 1] = 1\n all_cmcs.append(cmc[:args.cmc_rank])\n\n num_real = match.sum()\n match_cum = match.cumsum()\n match_cum = [el / (1.0 + i) for i, el in enumerate(match_cum)]\n match_cum = np.array(match_cum) * match\n ap = match_cum.sum() / num_real\n all_aps.append(ap)\n\n assert len(all_aps) > 0, \"NO QUERY MATCHED\"\n mAP = sum(all_aps) / len(all_aps)\n all_cmcs = np.array(all_cmcs, dtype = np.float32)\n cmc = np.mean(all_cmcs, axis = 0)\n\n print('mAP is: {}, cmc is: {}'.format(mAP, cmc))\n\n\nif __name__ == '__main__':\n args = parse_args()\n evaluate(args)\n" ]
[ [ "numpy.logical_and", "numpy.logical_or", "numpy.mean", "numpy.any", "numpy.argsort", "numpy.array" ] ]
robertvsiii/kaic-analysis
[ "f80f85bbaeb1190cba2bb18ed732af915374ff9c" ]
[ "kaic_analysis/scripts.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 15 10:31:23 2017\n\n@author: robertmarsland\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport subprocess\nimport os\nimport pickle\nimport datetime\nfrom sklearn.decomposition import PCA\n\nStateData = ['ACI', 'ACII', 'CIATP', 'CIIATP', 'pU', 'pT', 'pD', 'pS']\n\ndef FormatPath(folder):\n if folder==None:\n folder=''\n else:\n if folder != '':\n if folder[-1] != '/':\n folder = folder+'/'\n return folder\n\ndef LoadData(name, folder = None, suffix = '.dat'):\n folder = FormatPath(folder)\n col_ind = list(range(22))\n del col_ind[5]\n return pd.read_table(folder+name+suffix,index_col=0,usecols=col_ind)\n\ndef RunModel(paramdict = {}, name = 'data', default = 'default.par', folder = None, extra_mem = False):\n if folder != None:\n cwd = os.getcwd()\n os.chdir(folder)\n linelist = []\n with open(default) as f:\n for line in f:\n for item in paramdict:\n if line[:len(item)] == item:\n line = item + ' ' + str(paramdict[item]) + '\\n'\n if line[:15] == 'output_filename':\n line = 'output_filename ' + name + '\\n'\n linelist.append(line)\n\n with open(name + '.par','w') as f:\n for line in linelist:\n f.write(line)\n if extra_mem:\n subprocess.check_call('ulimit -s 65532; ./KMCKaiC ' + name + '.par', shell = True)\n else:\n subprocess.check_call('./KMCKaiC ' + name + '.par', shell = True)\n if folder != None:\n os.chdir(cwd)\n return LoadData(name, folder=folder)\n else:\n return LoadData(name)\n\ndef Current(data,species):\n J = [0]\n t = [data.index[0]]\n\n center = [np.mean(data[species[0]]),np.mean(data[species[1]])]\n values = [[],[]]\n\n for k in range(len(data)-1):\n if data[species[0]].iloc[k] < center[0] and data[species[0]].iloc[k+1] < center[0]:\n if data[species[1]].iloc[k] <= center[1] and data[species[1]].iloc[k+1] > center[1]:\n J.append(J[-1]-1)\n t.append(data.index[k])\n values[0].append(data[species[0]].iloc[k])\n values[1].append(data[species[1]].iloc[k])\n if data[species[1]].iloc[k] > center[1] and data[species[1]].iloc[k+1] <= center[1]:\n J.append(J[-1]+1)\n t.append(data.index[k])\n values[0].append(data[species[0]].iloc[k])\n values[1].append(data[species[1]].iloc[k])\n \n J = np.asarray(J,dtype=int)\n t = np.asarray(t,dtype=float)\n T = np.nan\n if len(J) > 1:\n if J[-1]>J[1]:\n T = (t[-1]-t[1])/(J[-1]-J[1])\n \n return t, J, T, center\n\ndef Current_PCA(data,center=[1,0]):\n J = [0]\n t = [data.index[0]]\n \n values = [[],[]]\n \n data_PCA = PCA(n_components=2).fit_transform(data[StateData])\n \n for k in range(len(data_PCA)-1):\n if data_PCA[k,1] >= center[1] and data_PCA[k+1,1] >= center[1]:\n if data_PCA[k,0] <= center[0] and data_PCA[k+1,0] > center[0]:\n J.append(J[-1]-1)\n t.append(data.index[k])\n values[0].append(data_PCA[k,0])\n values[1].append(data_PCA[k,1])\n if data_PCA[k,0] > center[0] and data_PCA[k+1,0] <= center[0]:\n J.append(J[-1]+1)\n t.append(data.index[k])\n values[0].append(data_PCA[k,0])\n values[1].append(data_PCA[k,1])\n \n J = np.asarray(J,dtype=int)\n t = np.asarray(t,dtype=float)\n T = np.nan\n\n if len(J) > 1:\n if J[-1] < 0:\n J = -J\n if J[-1]>J[1]:\n T = (t[-1]-t[1])/(J[-1]-J[1])\n\n return t, J, T\n\ndef EntropyRate(data,name='data',folder=None):\n \n NA = 6.02e23\n conv = 1e-21\n ATPcons_hex = (data['CIATPcons'].iloc[-1] + data['CIIATPcons'].iloc[-1] -\n data['CIATPcons'].iloc[0] - data['CIIATPcons'].iloc[0])\n ATPcons = (6*conv*NA*FindParam('volume',name,folder=folder)*\n FindParam('KaiC0',name,folder=folder)*ATPcons_hex)\n return (FindParam('Delmu',name,folder=folder)*ATPcons/\n (data.index[-1]-data.index[0]))\n\ndef FirstPassageSingleTraj(t,J):\n tau_list = []\n for k in range(2,max(J)+1):\n tau_list.append(t[np.where(J>=k)[0][0]]-t[np.where(J>=k-1)[0][0]])\n return tau_list\n\ndef FindParam(param,par_file,folder=None):\n folder = FormatPath(folder)\n \n if param == 'Delmu':\n paramdict = {}\n with open(folder+par_file+'.par') as f:\n for line in f:\n words = line.split()\n if words != []:\n if words[0] in ['Khyd','ATPfrac','Piconc']:\n paramdict[words[0]] = float(words[1])\n \n return np.log(paramdict['Khyd']/paramdict['Piconc']) + np.log(1/((1/paramdict['ATPfrac'])-1))\n else:\n with open(folder+par_file+'.par') as f:\n for line in f:\n words = line.split()\n if words != []:\n if words[0] == param:\n return float(words[1])\n\ndef EntropyProduction(data,name='data'):\n NA = 6.02e23\n conv = 1e-21\n ATPcons = 6*conv*NA*FindParam('volume',name)*FindParam('KaiC0',name)*(data['CIATPcons'] + data['CIIATPcons'])\n return FindParam('Delmu',name)*ATPcons\n\ndef Ensemble(paramdict,ns,species=['pT','pS'],folder=None,run_number=1):\n results = []\n Tvec = []\n Sdotvec = []\n \n path = FormatPath(folder)\n \n date = str(datetime.datetime.now()).split()[0]\n name = '_'.join([str(run_number),date])\n filename = path + 'RawData_' + name + '.dat'\n\n for k in range(ns):\n paramdict['rnd_seed'] = np.random.rand()*1000000\n data = None\n count = 0\n while data is None and count < 10:\n try:\n datname = 'data_'+str(np.random.randint(1000000))\n data = RunModel(paramdict=paramdict,name=datname,folder=folder)\n except:\n subprocess.check_call('rm -f '+path+datname+'.par', shell = True)\n count += 1\n assert data is not None, 'KMCKaiC failed to run.'\n\n t, J, T_new, center = Current(data,species)\n Sdot_new = EntropyRate(data,name=datname,folder=folder)\n Tvec.append(T_new)\n Sdotvec.append(Sdot_new)\n results.append({'t': t, 'J': J})\n subprocess.check_call('rm -f '+'\\''+path+datname+'.dat'+'\\'', shell = True)\n subprocess.check_call('rm -f '+'\\''+path+datname+'.par'+'\\'', shell = True)\n T = np.nanmean(Tvec)\n Sdot = np.nanmean(Sdotvec)\n with open(filename,'wb') as f:\n pickle.dump([results,T,Sdot],f)\n\ndef Ensemble_PCA(paramdict,ns,folder=None,run_number=1):\n results = []\n Tvec = []\n Sdotvec = []\n \n path = FormatPath(folder)\n \n date = str(datetime.datetime.now()).split()[0]\n name = '_'.join([str(run_number),date])\n filename = path + 'RawData_' + name + '.dat'\n \n for k in range(ns):\n paramdict['rnd_seed'] = np.random.rand()*1000000\n data = None\n count = 0\n while data is None and count < 10:\n try:\n datname = 'data_'+str(np.random.randint(1000000))\n data = RunModel(paramdict=paramdict,name=datname,folder=folder)\n except:\n subprocess.check_call('rm -f '+path+datname+'.par', shell = True)\n count += 1\n assert data is not None, 'KMCKaiC failed to run.'\n \n t, J, T_new = Current_PCA(data)\n Sdot_new = EntropyRate(data,name=datname,folder=folder)\n Tvec.append(T_new)\n Sdotvec.append(Sdot_new)\n results.append({'t': t, 'J': J})\n subprocess.check_call('rm -f '+'\\''+path+datname+'.dat'+'\\'', shell = True)\n subprocess.check_call('rm -f '+'\\''+path+datname+'.par'+'\\'', shell = True)\n T = np.nanmean(Tvec)\n Sdot = np.nanmean(Sdotvec)\n with open(filename,'wb') as f:\n pickle.dump([results,T,Sdot],f)\n\ndef FirstPassage(results,Ncyc = 1,all=False):\n tau = []\n if all:\n for item in results:\n tau = tau + FirstPassageSingleTraj(item['t'],item['J'])\n else:\n for item in results:\n inds1 = np.where(item['J'] >= 1)[0]\n inds2 = np.where(item['J'] >= 1+Ncyc)[0]\n if len(inds1) != 0 and len(inds2) != 0:\n t1 = item['t'][inds1[0]]\n t2 = item['t'][inds2[0]]\n tau.append(t2-t1)\n else:\n tau.append(np.nan)\n \n return tau\n\ndef LoadExperiment(param_name,run_numbers,date,folder='data'):\n folder = FormatPath(folder)\n \n name = '_'.join([param_name,str(run_numbers[0]),date])\n filename1 = folder + 'FirstPassageData_' + name + '.csv'\n filename2 = folder + 'Sdot_' + name + '.csv'\n filename3 = folder + 'AllData_' + name + '.dat'\n \n tau=pd.read_csv(filename1,index_col=0)\n Sdot=pd.read_csv(filename2,index_col=0)\n with open(filename3,'rb') as f:\n results=pickle.load(f)\n \n for run_number in run_numbers[1:]:\n name = '_'.join([param_name,str(run_number),date])\n filename1 = folder + 'FirstPassageData_' + name + '.csv'\n filename2 = folder + 'Sdot_' + name + '.csv'\n filename3 = folder + 'AllData_' + name + '.dat'\n \n tau = tau.join(pd.read_csv(filename1,index_col=0))\n Sdot = Sdot.join(pd.read_csv(filename2,index_col=0))\n with open(filename3,'rb') as f:\n results_new=pickle.load(f)\n results.update(results_new)\n \n return tau, Sdot, results\n\ndef RunExperiment(vol = 0.5, param_val = 25, param_name = 'Delmu', ens_size = 5, CIIhyd = True,\n sample_cnt = 3e6, code_folder = None, run_number = 1, use_PCA = False):\n \n paramdict = {}\n paramdict['volume'] = vol\n paramdict['sample_cnt'] = sample_cnt\n paramdict['tequ'] = 50\n\n if not CIIhyd:\n paramdict['kCIIhyd0'] = 0.1\n \n if param_name == 'Delmu':\n paramdict['Khyd'] = (np.exp(param_val)*FindParam('Piconc','default',folder=code_folder)*\n ((1/FindParam('ATPfrac','default',folder=code_folder))-1))\n else:\n paramdict[param_name] = param_val\n\n if use_PCA:\n Ensemble_PCA(paramdict,ens_size,folder=code_folder,run_number=run_number)\n else:\n Ensemble(paramdict,ens_size,folder=code_folder,run_number=run_number)\n \n \ndef ProcessExperiment(run_number = 1, date = str(datetime.datetime.now()).split()[0], all = False,\n param_name = 'Delmu', param_val = 20, folder = 'data', code_folder = None, Ncyc = 30):\n \n if all:\n Ncyc = 1\n \n folder = FormatPath(folder)\n code_folder = FormatPath(code_folder)\n \n filename0 = code_folder + 'RawData_' + '_'.join([str(run_number),date]) + '.dat'\n \n name = '_'.join([param_name,str(run_number),date])\n filename1 = folder + 'FirstPassageData_' + name + '.csv'\n filename2 = folder + 'Sdot_' + name + '.csv'\n filename3 = folder + 'AllData_' + name + '.dat'\n \n keyname = param_name + ' = ' + str(param_val)\n \n results = {}\n tau = {}\n Sdot = {}\n \n with open(filename0,'rb') as f:\n results[keyname], T, Sdot[keyname] = pickle.load(f)\n tau[keyname] = FirstPassage(results[keyname],Ncyc=Ncyc,all=all)\n \n tau = pd.DataFrame.from_dict(tau)\n tau.to_csv(filename1)\n Sdot = pd.DataFrame.from_dict(Sdot,orient='index').T\n Sdot.to_csv(filename2)\n with open(filename3,'wb') as f:\n pickle.dump(results,f)\n \n return tau, Sdot, results\n\ndef PlotExperiment(ex_out,tmax = 3000., taumax = 3000., nbins = 50):\n tau = ex_out[0]\n Sdot = ex_out[1]\n results = ex_out[2]\n ns2 = len(tau.keys())\n tbins = np.linspace(0,taumax,nbins)\n fig, axs = plt.subplots(ns2,2,sharex='col',figsize = (8,10))\n \n paramlist = []\n for name in tau.keys():\n paramlist.append(float(name.split()[-1]))\n param_name = tau.keys()[0].split()[0]\n paramlist.sort()\n\n\n k = 0\n eps = []\n Sdotmean = []\n T = []\n for paramval in paramlist:\n name = param_name + ' = ' + str(paramval)\n for item in results[name]:\n if type(item['t']) == list or type(item['t']) == np.ndarray:\n if len(item['t']) > 1:\n axs[k,0].plot(item['t']-item['t'][1],item['J']-item['J'][1])\n tau[name].hist(ax=axs[k,1],bins=tbins)\n axs[k,1].set_yticks(())\n eps.append(tau[name].var()/tau[name].mean()**2)\n T.append(tau[name].mean())\n Sdotmean.append(Sdot[name])\n k += 1\n\n axs[int(round(ns2*1./2)),0].set_ylabel('Number of Cycles')\n axs[-1,0].set_xlabel('Time (hrs)')\n axs[-1,1].set_xlabel(r'$\\tau$ (hrs)')\n axs[-1,0].set_xlim((0,tmax))\n axs[-1,1].set_xlim((0,taumax))\n \n plt.show()\n \n DelSmean = np.asarray(Sdotmean)*np.asarray(T)\n eps = np.asarray(eps)\n DelSrange = np.linspace(min(DelSmean)*0.75,max(DelSmean)*1.25,100)\n fig2, ax2 = plt.subplots(1)\n ax2.semilogy(DelSmean,eps,'o-')\n ax2.semilogy(DelSrange,2./DelSrange,'k',linewidth = 3)\n ax2.set_xlabel(r'$\\langle \\Delta S_c \\rangle$')\n ax2.set_ylabel(r'var($\\tau$)/$\\langle \\tau\\rangle^2$')\n\n plt.show()\n" ]
[ [ "numpy.log", "pandas.read_csv", "numpy.linspace", "numpy.asarray", "matplotlib.pyplot.subplots", "pandas.read_table", "numpy.mean", "numpy.nanmean", "numpy.random.rand", "pandas.DataFrame.from_dict", "numpy.exp", "matplotlib.pyplot.show", "numpy.where", "sklearn.decomposition.PCA", "numpy.random.randint" ] ]
seblee97/student_teacher_catastrophic
[ "9baaaf2850025ba9cf33d61c42386bc4c3b2dad2" ]
[ "cata/plotters/unified_plotter.py" ]
[ "import os\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Union\n\nimport pandas as pd\nfrom cata import constants\nfrom cata.plotters import base_plotter\n\n\nclass UnifiedPlotter(base_plotter.BasePlotter):\n \"\"\"Class for plotting generalisation errors, overlaps etc.\n\n For case when logging is done in 'unified' fashion i.e. all into one dataframe.\n \"\"\"\n\n def __init__(\n self,\n save_folder: str,\n num_steps: int,\n log_overlaps: bool,\n ode_log_path: str,\n network_log_path: str,\n ):\n \"\"\"\n Class constructor.\n\n Args:\n save_folder: path to folder for saving plots.\n num_steps: total number of steps in the training run (used for scaling axes).\n log_overlaps: whether or not to plot overlaps (or just errors).\n log_ode: whether ot not to plot ode data.\n log_network: whether ot not to plot network data.\n \"\"\"\n self._ode_logger_path = ode_log_path\n self._network_logger_path = network_log_path\n\n super().__init__(\n save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps\n )\n\n def _setup_data(self):\n \"\"\"Setup data from relevant dataframes.\n\n Here, in the unified case, full dataset is loaded into memory.\n \"\"\"\n if self._ode_logger_path is not None:\n self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0)\n if self._network_logger_path is not None:\n self._network_logger = pd.read_csv(self._network_logger_path)\n\n def make_plots(self) -> None:\n \"\"\"Orchestration method for plotting ode logs, network logs, or both.\"\"\"\n if self._ode_logger_path is not None:\n self._make_plot(\n data={constants.ODE: self._ode_logger},\n save_path=os.path.join(self._save_folder, constants.ODE_PDF),\n )\n if self._network_logger_path is not None:\n self._make_plot(\n data={constants.SIM: self._network_logger},\n save_path=os.path.join(self._save_folder, constants.NETWORK_PDF),\n )\n if self._ode_logger_path is not None and self._network_logger_path is not None:\n self._make_plot(\n data={\n constants.ODE: self._ode_logger,\n constants.SIM: self._network_logger,\n },\n save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF),\n )\n\n def _make_plot(\n self,\n data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]],\n save_path: str,\n ) -> None:\n \"\"\"Make plots for a set of results (e.g. ode or network or both).\n\n Args:\n data: mapping from type of results (ode, network etc.)\n to dataframes with results.\n save_path: path to save the plot.\n \"\"\"\n # can use arbitrary dataframe since columns will be the same.\n tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys()))\n\n # e.g. [error, overlap, ...]\n group_names = list(tag_groups.keys())\n # e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...]\n group_key_names = list(tag_groups.values()) # e.g.\n\n num_graphs = len(tag_groups)\n num_rows = self.GRAPH_LAYOUT[0]\n num_columns = self.GRAPH_LAYOUT[1]\n\n fig, spec = self._get_figure_skeleton(\n height=4, width=5, num_columns=num_columns, num_rows=num_rows\n )\n\n for row in range(num_rows):\n for col in range(num_columns):\n\n graph_index = (row) * num_columns + col\n\n if graph_index < num_graphs:\n\n print(\"Plotting graph {}/{}\".format(graph_index + 1, num_graphs))\n group_name = group_names[graph_index]\n keys = group_key_names[graph_index]\n\n data_collection = {\n data_type: {key: data[data_type][key].dropna() for key in keys}\n for data_type in data.keys()\n }\n\n fig = self._plot_scalar(\n fig=fig,\n spec=spec,\n row=row,\n col=col,\n tag_group_name=group_name,\n data_collection=data_collection,\n )\n\n fig.savefig(save_path, dpi=100)\n" ]
[ [ "pandas.read_csv" ] ]
elouie/CodeSamples
[ "3fe9fcf23cbfc82d84a679ea16d69ae41e700f06" ]
[ "cv/CNN/student.py" ]
[ "# Please place any imports here.\r\n# BEGIN IMPORTS\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torchvision import transforms, datasets\r\n\r\n# END IMPORTS\r\n\r\n#########################################################\r\n### BASELINE MODEL\r\n#########################################################\r\n\r\nclass AnimalBaselineNet(nn.Module):\r\n def __init__(self, num_classes=16):\r\n super(AnimalBaselineNet, self).__init__()\r\n # TODO: Define layers of model architecture\r\n # TODO-BLOCK-BEGIN\r\n # conv1: convolution layer with 6 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv1 = nn.Conv2d(3, 6, kernel_size=3, stride=2, padding=1)\r\n # conv2: convolution layer with 12 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv2 = nn.Conv2d(6, 12, kernel_size=3, stride=2, padding=1)\r\n # conv3: convolution layer with 24 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv3 = nn.Conv2d(12, 24, kernel_size=3, stride=2, padding=1)\r\n # fc: fully connected layer with 128 output features\r\n self.fc = nn.Linear(24*8*8, 128)\r\n # cls: fully connected layer with 16 output features (the number of classes)\r\n self.cls = nn.Linear(128, num_classes)\r\n # TODO-BLOCK-END\r\n\r\n def forward(self, x):\r\n x = x.contiguous().view(-1, 3, 64, 64).float()\r\n\r\n # TODO: Define forward pass\r\n # TODO-BLOCK-BEGIN\r\n\r\n # ReLU nonlinearity\r\n x = F.relu(self.conv1(x))\r\n # ReLU nonlinearity\r\n x = F.relu(self.conv2(x))\r\n # ReLU nonlinearity\r\n x = F.relu(self.conv3(x))\r\n x = x.view(-1, 24*8*8)\r\n # ReLU nonlinearity\r\n x = F.relu(self.fc(x))\r\n x = self.cls(x)\r\n # TODO-BLOCK-END\r\n return x\r\n\r\ndef model_train(net, inputs, labels, criterion, optimizer):\r\n \"\"\"\r\n Will be used to train baseline and student models.\r\n\r\n Inputs:\r\n net network used to train\r\n inputs (torch Tensor) batch of input images to be passed\r\n through network\r\n labels (torch Tensor) ground truth labels for each image\r\n in inputs\r\n criterion loss function\r\n optimizer optimizer for network, used in backward pass\r\n\r\n Returns:\r\n running_loss (float) loss from this batch of images\r\n num_correct (torch Tensor, size 1) number of inputs\r\n in this batch predicted correctly\r\n total_images (float or int) total number of images in this batch\r\n\r\n Hint: Don't forget to zero out the gradient of the network before the backward pass. We do this before\r\n each backward pass as PyTorch accumulates the gradients on subsequent backward passes. This is useful\r\n in certain applications but not for our network.\r\n \"\"\"\r\n\r\n # TODO: Foward pass\r\n # TODO-BLOCK-BEGIN\r\n optimizer.zero_grad()\r\n total_images = labels.data.numpy().size\r\n # Need to run for each element in input batch\r\n outputs = net(inputs)\r\n # TODO-BLOCK-END\r\n\r\n # TODO: Backward pass\r\n # TODO-BLOCK-BEGIN\r\n # Need to generate num_correct from outputs\r\n _, predicted = torch.max(outputs, 1)\r\n num_correct = torch.sum(predicted == labels.data.reshape(-1))\r\n\r\n loss = criterion(outputs, labels.squeeze())\r\n loss.backward()\r\n optimizer.step()\r\n running_loss = loss.item()\r\n # TODO-BLOCK-END\r\n\r\n return running_loss, num_correct, total_images\r\n\r\n#########################################################\r\n### DATA AUGMENTATION\r\n#########################################################\r\n\r\nclass Shift(object):\r\n \"\"\"\r\n Shifts input image by random x amount between [-max_shift, max_shift]\r\n and separate random y amount between [-max_shift, max_shift]. A positive\r\n shift in the x- and y- direction corresponds to shifting the image right\r\n and downwards, respectively.\r\n\r\n Inputs:\r\n max_shift float; maximum magnitude amount to shift image in x and y directions.\r\n \"\"\"\r\n def __init__(self, max_shift=10):\r\n self.max_shift = max_shift\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image 3 x H x W image as torch Tensor\r\n\r\n Returns:\r\n shift_image 3 x H x W image as torch Tensor, shifted by random x\r\n and random y amount, each amount between [-max_shift, max_shift].\r\n Pixels outside original image boundary set to 0 (black).\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n # TODO: Shift image\r\n # TODO-BLOCK-BEGIN\r\n from scipy.ndimage import shift\r\n x_rand = random.randint(-self.max_shift, self.max_shift)\r\n y_rand = random.randint(-self.max_shift, self.max_shift)\r\n image = shift(image, [0, x_rand, y_rand], mode='constant', cval=0)\r\n # TODO-BLOCK-END\r\n\r\n return torch.Tensor(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\nclass Contrast(object):\r\n \"\"\"\r\n Randomly adjusts the contrast of an image. Uniformly select a contrast factor from\r\n [min_contrast, max_contrast]. Setting the contrast to 0 should set the intensity of all pixels to the\r\n mean intensity of the original image while a contrast of 1 returns the original image.\r\n\r\n Inputs:\r\n min_contrast non-negative float; minimum magnitude to set contrast\r\n max_contrast non-negative float; maximum magnitude to set contrast\r\n\r\n Returns:\r\n image 3 x H x W torch Tensor of image, with random contrast\r\n adjustment\r\n \"\"\"\r\n\r\n def __init__(self, min_contrast=0.3, max_contrast=1.0):\r\n self.min_contrast = min_contrast\r\n self.max_contrast = max_contrast\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image 3 x H x W image as torch Tensor\r\n\r\n Returns:\r\n shift_image 3 x H x W torch Tensor of image, with random contrast\r\n adjustment\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n\r\n # TODO: Change image contrast\r\n # TODO-BLOCK-BEGIN\r\n\r\n # We need to modify each channel separately\r\n # First, we need to get a contrast level 'c' from the parameters\r\n c = random.uniform(self.min_contrast, self.max_contrast)\r\n # Get the means per channel\r\n for i in range(image.shape[0]):\r\n m = np.mean(image[i, :, :])\r\n image[i, :, :] = ((image[i, :, :] - m) * c) + m\r\n # We need to shift the image around the means and multiply by the factor\r\n image = np.clip(image, 0, 1)\r\n # TODO-BLOCK-END\r\n\r\n return torch.Tensor(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\nclass Brightness(object):\r\n \"\"\"\r\n Randomly adjusts the brightness of an image. Uniformly select a brightness factor from\r\n [min_brightness, max_brightness]. Setting the brightness to 0 should set the image to black\r\n while a contrast of 1 returns the original image.\r\n\r\n Inputs:\r\n min_contrast non-negative float; minimum magnitude to set contrast\r\n max_contrast non-negative float; maximum magnitude to set contrast\r\n\r\n Returns:\r\n image 3 x H x W torch Tensor of image, with random contrast\r\n adjustment\r\n \"\"\"\r\n\r\n def __init__(self, min_brightness=0.3, max_brightness=1.0):\r\n self.min_brightness = min_brightness\r\n self.max_brightness = max_brightness\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image 3 x H x W image as torch Tensor\r\n\r\n Returns:\r\n shift_image 3 x H x W torch Tensor of image, with random brightness\r\n adjustment\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n\r\n # TODO: Change image contrast\r\n # TODO-BLOCK-BEGIN\r\n\r\n # First, we need to get a brightness level 'b' from the parameters\r\n b = random.uniform(self.min_brightness, self.max_brightness)\r\n image = np.clip(image*b, 0, 1)\r\n # TODO-BLOCK-END\r\n\r\n return torch.Tensor(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\nclass Rotate(object):\r\n \"\"\"\r\n Rotates input image by random angle within [-max_angle, max_angle]. Positive angle corresponds to\r\n counter-clockwise rotation\r\n\r\n Inputs:\r\n max_angle maximum magnitude of angle rotation, in degrees\r\n\r\n\r\n \"\"\"\r\n def __init__(self, max_angle=10):\r\n self.max_angle = max_angle\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image image as torch Tensor\r\n\r\n Returns:\r\n rotated_image image as torch Tensor; rotated by random angle\r\n between [-max_angle, max_angle].\r\n Pixels outside original image boundary set to 0 (black).\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n\r\n # TODO: Rotate image\r\n # TODO-BLOCK-BEGIN\r\n from scipy.ndimage.interpolation import rotate\r\n angle = random.uniform(-self.max_angle, self.max_angle)\r\n # Need to avoid interpolating zeroed padding pixels, so order=0.\r\n image = rotate(image, angle, axes=[1, 2], order=0, reshape=False, mode='constant', cval=0)\r\n # TODO-BLOCK-END\r\n\r\n return torch.Tensor(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\nclass HorizontalFlip(object):\r\n \"\"\"\r\n Randomly flips image horizontally.\r\n\r\n Inputs:\r\n p float in range [0,1]; probability that image should\r\n be randomly rotated\r\n \"\"\"\r\n def __init__(self, p=0.5):\r\n self.p = p\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image image as torch Tensor\r\n\r\n Returns:\r\n flipped_image image as torch Tensor flipped horizontally with\r\n probability p, original image otherwise.\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n\r\n # TODO: Flip image\r\n # TODO-BLOCK-BEGIN\r\n if random.random() > self.p:\r\n image[0,:,:] = np.fliplr(image[0,:,:])\r\n image[1,:,:] = np.fliplr(image[1,:,:])\r\n image[2,:,:] = np.fliplr(image[2,:,:])\r\n # TODO-BLOCK-END\r\n\r\n return torch.Tensor(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\n# Noise seems effective on new models, but didn't improve highest performing models.\r\nclass Noise(object):\r\n \"\"\"\r\n Randomly adds tiny amounts of noise.\r\n\r\n Inputs:\r\n e float in range [0,1]; e variance in noise\r\n \"\"\"\r\n def __init__(self, e=0.5):\r\n self.e = e\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image image as torch Tensor\r\n\r\n Returns:\r\n noisy_image image as torch Tensor with added Gaussian noise with variance e.\r\n \"\"\"\r\n image = image.numpy()\r\n _, H, W = image.shape\r\n\r\n image[0, :, :] = image[0, :, :] + np.random.normal(0, self.e, (H, W))\r\n image[1, :, :] = image[1, :, :] + np.random.normal(0, self.e, (H, W))\r\n image[2, :, :] = image[2, :, :] + np.random.normal(0, self.e, (H, W))\r\n\r\n return torch.clamp(torch.Tensor(image), 0, 1)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\n# RandomApply was created after we achieved\r\n# 50% accuracy, and trained effectively, but no new models performed better\r\n# with it.\r\nclass RandomApply(object):\r\n \"\"\"\r\n Randomly applies a transformation from a list.\r\n\r\n Inputs:\r\n transforms transforms to randomly select\r\n \"\"\"\r\n def __init__(self, img_transforms):\r\n self.img_transforms = img_transforms\r\n\r\n def __call__(self, image):\r\n \"\"\"\r\n Inputs:\r\n image image as torch Tensor\r\n\r\n Returns:\r\n transformed_image image to randomly transform\r\n \"\"\"\r\n return random.choice(self.img_transforms)(image)\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__\r\n\r\n#########################################################\r\n### STUDENT MODEL\r\n#########################################################\r\n\r\ndef get_student_settings(net):\r\n \"\"\"\r\n Return transform, batch size, epochs, criterion and\r\n optimizer to be used for training.\r\n \"\"\"\r\n dataset_means = [123./255., 116./255., 97./255.]\r\n dataset_stds = [ 54./255., 53./255., 52./255.]\r\n\r\n # TODO: Create data transform pipeline for your model\r\n # TODO-BLOCK-BEGIN\r\n # transforms.ToPILImage() must be first, followed by transforms.ToTensor()\r\n transform = transforms.Compose([\r\n transforms.ToPILImage(),\r\n transforms.ToTensor(),\r\n # Unused experimental transforms.\r\n RandomApply([\r\n Brightness(min_brightness=0.7, max_brightness=1.3),\r\n Contrast(min_contrast=0.3, max_contrast=2),\r\n Noise(0.05)\r\n ]),\r\n RandomApply([\r\n Shift(max_shift=16),\r\n Rotate(max_angle=45)\r\n ]),\r\n HorizontalFlip(p=0.5),\r\n transforms.Normalize(dataset_means, dataset_stds),\r\n ])\r\n # TODO-BLOCK-END\r\n\r\n # TODO: Settings for dataloader and training. These settings\r\n # will be useful for training your model.\r\n # TODO-BLOCK-BEGIN\r\n batch_size = 16\r\n epochs = 10\r\n # TODO-BLOCK-END\r\n\r\n # TODO: epochs, criterion and optimizer\r\n # TODO-BLOCK-BEGIN\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer = optim.Adam(net.parameters(), lr=0.001)\r\n # TODO-BLOCK-END\r\n\r\n return transform, batch_size, epochs, criterion, optimizer\r\n\r\n\r\nclass AnimalStudentNet(nn.Module):\r\n # LIL MAX\r\n # Simple two-layered CNN with argmax to linear\r\n def __init__(self, num_classes=16):\r\n super(AnimalStudentNet, self).__init__()\r\n # TODO: Define layers of model architecture\r\n # TODO-BLOCK-BEGIN\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.dropout = nn.Dropout2d(p=0.1)\r\n # conv1: convolution layer with 6 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv1 = nn.Conv2d(3, 6, kernel_size=3, stride=2, padding=1)\r\n # conv2: convolution layer with 12 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv2 = nn.Conv2d(6, 9, kernel_size=3, stride=2, padding=1)\r\n # conv2: convolution layer with 12 output channels, kernel size of 3, stride of 2, padding of 1\r\n self.conv3 = nn.Conv2d(9, 12, kernel_size=3, stride=2, padding=1)\r\n # fc: fully connected layer with 128 output features\r\n self.fc = nn.Linear(12*4*4, 96)\r\n # cls: fully connected layer with 16 output features (the number of classes)\r\n self.cls = nn.Linear(96, num_classes)\r\n # TODO-BLOCK-END\r\n\r\n def forward(self, x):\r\n x = x.contiguous().view(-1, 3, 64, 64).float()\r\n\r\n # TODO: Define forward pass\r\n # TODO-BLOCK-BEGIN\r\n\r\n x = self.dropout(x)\r\n # ReLU nonlinearity\r\n x = F.relu(self.conv1(x))\r\n # ReLU nonlinearity\r\n x = F.relu(self.conv2(x))\r\n # ReLU nonlinearity\r\n x = self.pool(F.relu(self.conv3(x)))\r\n # x = F.relu(self.conv3(x))\r\n # ReLU nonlinearity\r\n x = x.view(-1, 12*4*4)\r\n # ReLU nonlinearity\r\n x = F.relu(self.fc(x))\r\n x = self.cls(x)\r\n # TODO-BLOCK-END\r\n return x\r\n\r\n\r\n#########################################################\r\n### ADVERSARIAL IMAGES\r\n#########################################################\r\n\r\ndef get_adversarial(img, output, label, net, criterion, epsilon):\r\n \"\"\"\r\n Generates adversarial image by adding a small epsilon\r\n to each pixel, following the sign of the gradient.\r\n\r\n Inputs:\r\n img (torch Tensor) image propagated through network\r\n output (torch Tensor) output from forward pass of image\r\n through network\r\n label (torch Tensor) true label of img\r\n net image classification model\r\n criterion loss function to be used\r\n epsilon (float) perturbation value for each pixel\r\n\r\n Outputs:\r\n perturbed_img (torch Tensor, same dimensions as img)\r\n adversarial image, clamped such that all values\r\n are between [0,1]\r\n (Clamp: all values < 0 set to 0, all > 1 set to 1)\r\n noise (torch Tensor, same dimensions as img)\r\n matrix of noise that was added element-wise to image\r\n (i.e. difference between adversarial and original image)\r\n\r\n Hint: After the backward pass, the gradient for a parameter p of the network can be accessed using p.grad\r\n \"\"\"\r\n\r\n # TODO: Define forward pass\r\n # TODO-BLOCK-BEGIN\r\n # Retrieve the gradient loss.\r\n loss = criterion(output, label)\r\n loss.backward()\r\n gradient_loss = img.grad\r\n # Use the sign of the gradient to determine amount of noise to add.\r\n noise = epsilon * gradient_loss.sign()\r\n # Remember to clamp the pixels to valid ranges.\r\n perturbed_image = torch.clamp(img + noise, 0, 1)\r\n # TODO-BLOCK-END\r\n\r\n return perturbed_image, noise\r\n\r\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Dropout2d", "torch.max", "torch.Tensor", "numpy.clip", "numpy.fliplr", "torch.nn.Conv2d", "torch.nn.Linear", "scipy.ndimage.interpolation.rotate", "torch.nn.MaxPool2d", "numpy.mean", "numpy.random.normal", "torch.clamp", "scipy.ndimage.shift" ] ]
SkyerYao/stanford-tensorflow-tutorials
[ "43c97b6df32f280056fa7c11c83dcfcd1b97382c" ]
[ "2017/examples/03_linear_regression_sol.py" ]
[ "\"\"\" Simple linear regression example in TensorFlow\nThis program tries to predict the number of thefts from \nthe number of fire in the city of Chicago\nAuthor: Chip Huyen\nPrepared for the class CS 20SI: \"TensorFlow for Deep Learning Research\"\ncs20si.stanford.edu\n\"\"\"\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport xlrd\n\nimport utils\n\nDATA_FILE = 'data/fire_theft.xls'\n\n# Step 1: read in data from the .xls file\nbook = xlrd.open_workbook(DATA_FILE, encoding_override=\"utf-8\")\nsheet = book.sheet_by_index(0)\ndata = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])\nn_samples = sheet.nrows - 1\n\n# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)\nX = tf.placeholder(tf.float32, name='X')\nY = tf.placeholder(tf.float32, name='Y')\n\n# Step 3: create weight and bias, initialized to 0\nw = tf.Variable(0.0, name='weights')\nb = tf.Variable(0.0, name='bias')\n\n# Step 4: build model to predict Y\nY_predicted = X * w + b \n\n# Step 5: use the square error as the loss function\nloss = tf.square(Y - Y_predicted, name='loss')\n# loss = utils.huber_loss(Y, Y_predicted)\n\n# Step 6: using gradient descent with learning rate of 0.001 to minimize loss\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\n\nwith tf.Session() as sess:\n\t# Step 7: initialize the necessary variables, in this case, w and b\n\tsess.run(tf.global_variables_initializer()) \n\t\n\twriter = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)\n\t\n\t# Step 8: train the model\n\tfor i in range(50): # train the model 100 epochs\n\t\ttotal_loss = 0\n\t\tfor x, y in data:\n\t\t\t# Session runs train_op and fetch values of loss\n\t\t\t_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y}) \n\t\t\ttotal_loss += l\n\t\tprint('Epoch {0}: {1}'.format(i, total_loss/n_samples))\n\n\t# close the writer when you're done using it\n\twriter.close() \n\t\n\t# Step 9: output the values of w and b\n\tw, b = sess.run([w, b]) \n\n# plot the results\nX, Y = data.T[0], data.T[1]\nplt.plot(X, Y, 'bo', label='Real data')\nplt.plot(X, X * w + b, 'r', label='Predicted data')\nplt.legend()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.summary.FileWriter", "tensorflow.Variable", "tensorflow.placeholder", "matplotlib.pyplot.plot", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.square", "tensorflow.Session", "matplotlib.pyplot.show" ] ]
Voolkia/ML
[ "9b8de9d87cddeb8d13b3ac757f18e68c110c7408" ]
[ "notebooks/5. Batch Transforming/transform.py" ]
[ "from functools import reduce\nfrom config import PERIODO_INI, PERIODO_FIN\nimport pandas as pd\nimport numpy as np\n\npd.options.mode.chained_assignment = None\n\n\ndef check_periods(col):\n print(pd.DataFrame(\n {\"Rango\": [col.min(), col.max()]},\n index=['MIN', 'MAX'])\n )\n\n\n# HELPER FUNCTIONS\ndef transform_date(s):\n dates = {date: pd.to_datetime(date) for date in s.unique()}\n return s.map(dates)\n\n\ndef dates_to_int(dates):\n periodos = {fecha: i + 1\n for i, fecha\n in enumerate(sorted(dates.unique(),\n reverse=True))\n }\n return dates.map(periodos)\n\n\ndef simplify_history(x):\n return \"\".join([\"1\" if int(n) > 0 else \"0\" for n in x.split(\" \")])\n\n\ndef to_yearmonth(s):\n dates = {date: pd.Timestamp(date).strftime('%Y-%m') for date in s.unique()}\n return s.map(dates)\n\n\n# TRANSFORMING PIPELINE FUNCTIONS\ndef transform_polizas(df_polizas):\n df_polizas['FECHA_VIG_POL'] = transform_date(df_polizas['FECHA_VIG_POL'])\n df_polizas['mes_anio_vig'] = df_polizas['FECHA_VIG_POL'].dt.strftime('%Y-%m')\n to_pivot = df_polizas[[\"CIF_ID\",\n \"NUM_SECU_POL\",\n \"MCA_VIGENCIA\",\n \"mes_anio_vig\"]].drop_duplicates()\n del df_polizas\n df_polizas_pivoted = to_pivot.pivot_table(index='CIF_ID',\n columns=['mes_anio_vig'],\n values=['MCA_VIGENCIA'],\n aggfunc='count',\n fill_value=0)\n del to_pivot\n df_polizas_pivoted = df_polizas_pivoted.astype(str)\n df_polizas_pivoted[\"history\"] = df_polizas_pivoted.apply(\" \".join, axis=1)\n new_df = pd.DataFrame(df_polizas_pivoted.index)\n new_df = new_df.set_index('CIF_ID')\n new_df[\"hist_polizas\"] = df_polizas_pivoted[\"history\"]\n del df_polizas_pivoted\n return new_df\n\n\ndef add_id(df, with_table, id_col, fk_col):\n df_aux = pd.read_csv(with_table,\n sep='\\t',\n encoding='latin1',\n decimal=',',\n usecols=[id_col, fk_col]) \n return pd.merge(df, df_aux, on=fk_col, how='inner')\n\n\ndef transform_pagos(df_pagos):\n df_pagos[\"FECHA_VTO\"] = transform_date(df_pagos[\"FECHA_VTO\"])\n df_pagos[\"FEC_PAGO\"] = transform_date(df_pagos[\"FEC_PAGO\"])\n df_pagos[\"demora_pago\"] = ((df_pagos[\"FEC_PAGO\"] - df_pagos[\"FECHA_VTO\"]) / np.timedelta64(1, 'M')).astype(\"float\")\n df_pagos.loc[df_pagos[\"COD_COBRO\"] == \"TM\", \"COD_COBRO\"] = \"TA\"\n # FILTER CURRENT PERIOD\n df_pagos = df_pagos[df_pagos[\"FECHA_VTO\"].between(PERIODO_INI,\n PERIODO_FIN)]\n # TRANSFORM DATE TO PERIOD\n df_pagos[\"FECHA_VTO\"] = to_yearmonth(df_pagos[\"FECHA_VTO\"].dropna())\n df_pagos[\"periodo\"] = dates_to_int(df_pagos[\"FECHA_VTO\"])\n # BEGIN PIVOTING\n to_pivot = df_pagos[[\"CIF_ID\",\"demora_pago\",\"periodo\",\"COD_COBRO\",\"COD_SITUACION\",\"MONTO_PAGO\"]]\n df_pagos_datediff = to_pivot.pivot_table(index=[\"CIF_ID\"], columns=[\"periodo\"], values=[\"demora_pago\",\"MONTO_PAGO\"], aggfunc=\"mean\")\n df_pagos_datediff = pd.DataFrame(df_pagos_datediff.to_records())\n df_pagos_datediff = df_pagos_datediff.rename(columns=lambda x: x.replace(\"(\",\"\").replace(\")\",\"\").replace(\", \",\"_\").replace(\"'\",\"\"))\n df_cods = to_pivot.pivot_table(index=[\"CIF_ID\"], columns=[\"periodo\",\"COD_SITUACION\"], aggfunc=\"size\")\n df_cods = pd.DataFrame(df_cods.to_records())\n df_cods = df_cods.rename(columns=lambda x: x.replace(\"(\",\"\").replace(\")\", \"\").replace(\", \",\"_SITUACION_\").replace(\"'\", \"\"))\n df_codc = to_pivot.pivot_table(index=[\"CIF_ID\"], columns=[\"periodo\",\"COD_COBRO\"], aggfunc=\"size\")\n df_codc = pd.DataFrame(df_codc.to_records())\n df_codc = df_codc.rename(columns=lambda x: x.replace(\"(\",\"\").replace(\")\", \"\").replace(\", \",\"_COBRO_\").replace(\"'\", \"\"))\n del to_pivot\n del df_pagos\n return reduce(lambda left, right: pd.merge(left, right, on=['CIF_ID'], how='outer'), [df_cods, df_codc, df_pagos_datediff])\n\n\ndef transform_siniestros(df_sini):\n df_sini.drop_duplicates(subset=[\"NUM_SECU_POL\", \"FEC_DENU_SINI\"],\n keep='last',\n inplace=True)\n df_sini[\"FEC_DENU_SINI\"] = transform_date(df_sini[\"FEC_DENU_SINI\"])\n df_sini[\"FECHA_LIQUIDACION\"] = transform_date(df_sini[\"FECHA_LIQUIDACION\"])\n df_sini[\"FECHA_RECHAZO\"] = transform_date(df_sini[\"FECHA_RECHAZO\"])\n # FILTER CURRENT PERIOD\n df_sini = df_sini[df_sini[\"FEC_DENU_SINI\"].between(PERIODO_INI,\n PERIODO_FIN)]\n check_periods(df_sini[\"FEC_DENU_SINI\"])\n # TRANSFORM DATE TO PERIOD\n df_sini[\"FEC_DENU_SINI\"] = to_yearmonth(df_sini[\"FEC_DENU_SINI\"].dropna())\n df_sini[\"FECHA_LIQUIDACION\"] = to_yearmonth(df_sini[\"FECHA_LIQUIDACION\"].dropna())\n df_sini[\"FECHA_RECHAZO\"] = to_yearmonth(df_sini[\"FECHA_RECHAZO\"].dropna())\n periodos = {fecha: i + 1\n for i, fecha in enumerate(sorted(df_sini[\"FEC_DENU_SINI\"].unique(),\n reverse=True))\n }\n df_sini[\"periodo_denu_sini\"] = df_sini[\"FEC_DENU_SINI\"].map(periodos)\n df_sini[\"periodo_liquidacion_sini\"] = df_sini[\"FECHA_LIQUIDACION\"].map(periodos)\n df_sini[\"periodo_rechazo_sini\"] = df_sini[\"FECHA_RECHAZO\"].map(periodos)\n # BEGIN PIVOTING\n to_pivot = df_sini[[\"CIF_ID\",\n \"NUM_SECU_POL\",\n \"periodo_denu_sini\",\n \"periodo_liquidacion_sini\",\n \"periodo_rechazo_sini\"]]\n df_sini = to_pivot.pivot_table(index='CIF_ID',\n columns=['periodo_denu_sini'],\n values=['NUM_SECU_POL',\n 'periodo_liquidacion_sini',\n 'periodo_rechazo_sini'],\n aggfunc='count',\n fill_value=0)\n df_sini = pd.DataFrame(df_sini.to_records())\n df_sini = df_sini.rename(columns=lambda x: x.replace(\"(\",\"\").replace(\")\",\"\").replace(\", \",\"_\").replace(\"'\",\"\").replace(\"NUM_SECU_POL\",\"periodo_sini\"))\n return df_sini\n\n\ndef transform_interacciones(df):\n df = df[~pd.to_numeric(df['ID'], errors='coerce').isnull()]\n # SOME CLEANING\n to_check = []\n for val in df[\"CIF_ID\"].unique():\n try:\n float(val)\n except Exception:\n to_check.append(val)\n df = df[~df[\"CIF_ID\"].isin(to_check)]\n to_check = []\n for val in df[\"ID\"].unique():\n try:\n int(val)\n except Exception:\n to_check.append(val)\n df = df[~df[\"ID\"].isin(to_check)]\n df = df.drop(columns='ID').astype({'CIF_ID': 'float64'})\n df = df[df[\"IN_OUT\"].isin(['O', 'I', 'A'])]\n df[\"FECHA\"] = df[\"FECHA\"].str.slice(stop=10)\n df.loc[df[\"FECHA\"].str.contains(\" [0-9]\", na=False), \"FECHA\"] = df.loc[df[\"FECHA\"].str.contains(\" [0-9]\", na=False), \"FECHA\"].str.slice(stop=8) \n df[\"FECHA\"] = df[\"FECHA\"].str.replace(\" \", \"\")\n df[\"periodo\"] = transform_date(df[\"FECHA\"])\n # FILTER CURRENT PERIOD\n df = df[df[\"periodo\"].between(PERIODO_INI, PERIODO_FIN)]\n check_periods(df[\"periodo\"])\n df = df[[\"CIF_ID\", \"IN_OUT\", \"periodo\"]]\n # TRANSFORM DATE TO PERIOD\n df[\"periodo\"] = to_yearmonth(df[\"periodo\"].dropna())\n df[\"periodo_int\"] = dates_to_int(df[\"periodo\"])\n # BEGIN PIVOTING\n to_pivot = df[[\"CIF_ID\", \"IN_OUT\", \"periodo_int\"]]\n df = to_pivot.pivot_table(index=[\"CIF_ID\"], columns=[\"periodo_int\", \"IN_OUT\"], aggfunc=\"size\")\n df = pd.DataFrame(df.to_records())\n df = df.rename(columns=lambda x: x.replace(\"(\", \"\").replace(\")\", \"\").replace(\", \", \"_TIPOINT_\").replace(\"'\", \"\")) \n return df\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame", "numpy.timedelta64", "pandas.Timestamp", "pandas.to_numeric" ] ]
sqt-aliu/portlab
[ "366755c2cfe7bb53c1b236688684fc2c9d8bf4d1" ]
[ "portlab/report/bin/stockloan_payments.py" ]
[ "#!/opt/anaconda3/bin/python -u\nimport getopt\nimport os.path\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom time import sleep\nfrom datetime import datetime, timedelta\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))\nfrom common.lib.log import debug, error, fatal, info, warn\nfrom common.lib.cal import business_days\nfrom common.lib.db import query_mysql\nfrom common.lib.sym import local_hk_symbology\nfrom data.lib.reports import set_charges\n\n__ACCOUNTID__ = \"CPB10860\"\n\ndef print_usage():\n print (\" Usage: %s [options]\" % (os.path.basename(__file__))) \n print (\" Options:\")\n print (\" \\t-c, --exchcode\\t\\texchange code\")\n print (\" \\t-d, --database\\t\\tdatabase connection string\")\n print (\" \\t-p, --portfolio\\t\\tportfolio name\")\n print (\" \\t-s, --start\\t\\tstart date\")\n print (\" \\t-e, --end\\t\\tend date\") \n print (\" \\t-i, --input\\t\\tinput directory\") \n print (\" \\t-r, --dryrun\\t\\tdry run\") \n print (\" \\t-h,\\t\\t\\thelp\")\n\ndef format_time(time):\n today = datetime.strftime(datetime.now(), \"%Y%m%d\")\n return datetime.strptime(today + \"T\" + time, \"%Y%m%dT%H:%M:%S\")\n \ndef stockloan_payments(iDate, iPortfolio, dbConn, exchCode, inputDir, dryRun):\n inputFile = \"%s/SUB_%s_20953289.CSV\" % (inputDir, iDate.strftime(\"%Y-%m-%d\"))\n if os.path.exists(inputFile):\n info(\"Reading file %s\" % (inputFile))\n stockloan_df = pd.read_csv(inputFile, skiprows=1, parse_dates=['From Date','To Date'])\n stockloan_df = stockloan_df[stockloan_df['Account ID'] == __ACCOUNTID__] # filter by account\n \n stockload_grp_df = stockloan_df.groupby('To Date').sum()\n stockload_grp_df = pd.DataFrame(stockload_grp_df['Amount Due in Accrual Currency'])\n stockload_grp_df = stockload_grp_df.reset_index()\n stockload_grp_df.columns = ['date','amount']\n stockload_grp_df['type'] = 'Borrow Fee'\n stockload_grp_df['indicator'] = 'Short'\n stockload_grp_df['portfolio'] = iPortfolio\n\n # prepare insert to portfolios/charges database\n set_charges(stockload_grp_df, dbConn, dryrun=dryRun) \n \n else:\n warn(\"File %s not found.\" % (inputFile))\n \ndef main(argv): \n argDBConn = \"\"\n argExchCode = \"\"\n argPortfolio = \"\"\n argStart = \"\"\n argEnd = \"\"\n argDryRun = True \n argInput = \"/home/sqtdata/dfs/raw/live/bcar.day/reports\"\n try:\n opts, args = getopt.getopt(argv,\"hrc:d:s:e:p:i:\",[\"dryrun\",\"database=\",\"exchcode=\",\"start=\",\"end=\",\"portfolio=\",\"input=\"])\n except getopt.GetoptError:\n print_usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print_usage()\n sys.exit()\n elif opt in (\"-d\", \"--database\"):\n argDBConn = arg \n elif opt in (\"-c\", \"--exchcode\"):\n argExchCode = arg\n elif opt in (\"-p\", \"--portfolio\"):\n argPortfolio = arg \n elif opt in (\"-s\", \"--start\"):\n argStart = datetime.strptime(arg, '%Y%m%d')\n elif opt in (\"-e\", \"--end\"):\n argEnd = datetime.strptime(arg, '%Y%m%d') \n elif opt in (\"-i\", \"--input\"):\n argInput = arg \n elif opt in (\"-r\", \"--dryrun\"):\n argDryRun = False \n \n if len(argDBConn) == 0 or len(argExchCode) == 0 or len(argPortfolio) == 0 or len(argInput) == 0:\n print_usage()\n exit(0)\n if argStart > argEnd:\n error(\"Start date must be less than End date\")\n print_usage()\n exit(0) \n \n dates = business_days(argStart, argEnd, argExchCode)\n for date in dates:\n info(\"Running payments for %s\" % (date.strftime('%Y-%m-%d')))\n stockloan_payments(date, argPortfolio, argDBConn, argExchCode, argInput, argDryRun)\n \nif __name__ == '__main__':\n main(sys.argv[1:])\n \n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
Precistat/CompreFace
[ "dc850c23d4cadc355f77cef08adbbd5f430c01b7" ]
[ "embedding-calculator/src/services/facescan/scanner/facescanner.py" ]
[ "# Copyright (c) 2020 the original author or authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nimport numpy as np\n\nfrom src.services.dto.bounding_box import BoundingBoxDTO\nfrom src.services.dto.plugin_result import FaceDTO, EmbeddingDTO\nfrom src.services.imgtools.types import Array3D\nfrom src.services.facescan.plugins.managers import plugin_manager\n\n\nclass FaceScanner(ABC):\n ID = None\n\n def __init__(self):\n assert self.ID\n\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n cls.instance = super(FaceScanner, cls).__new__(cls)\n return cls.instance\n\n @abstractmethod\n def scan(self, img: Array3D, det_prob_threshold: float = None) -> List[FaceDTO]:\n \"\"\" Find face bounding boxes and calculate embeddings\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:\n \"\"\" Find face bounding boxes, without calculating embeddings\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def difference_threshold(self) -> float:\n \"\"\" Difference threshold between two embeddings\"\"\"\n raise NotImplementedError\n\n\nclass ScannerWithPluggins(FaceScanner):\n \"\"\"\n Class for backward compatibility.\n The scanner only performs face detection and embedding calculation.\n \"\"\"\n ID = \"ScannerWithPlugins\"\n\n def scan(self, img: Array3D, det_prob_threshold: float = None):\n return plugin_manager.detector(img, det_prob_threshold,\n [plugin_manager.calculator])\n\n def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:\n return plugin_manager.detector.find_faces(img, det_prob_threshold)\n\n @property\n def difference_threshold(self):\n return plugin_manager.calculator.ml_model.difference_threshold\n\n\nclass MockScanner(FaceScanner):\n ID = 'MockScanner'\n\n def scan(self, img: Array3D, det_prob_threshold: float = None) -> List[FaceDTO]:\n return [FaceDTO(box=BoundingBoxDTO(0, 0, 0, 0, 0),\n plugins_dto=[EmbeddingDTO(embedding=np.random.rand(1))],\n img=img, face_img=img)]\n\n def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:\n return [BoundingBoxDTO(0, 0, 0, 0, 0)]\n" ]
[ [ "numpy.random.rand" ] ]
tuming1990/tf-pdnn
[ "4fd25bcbc6024a6bf0e0f99b51f579c75a3cfdf1" ]
[ "io_func/model_io.py" ]
[ "# Copyright 2013 Yajie Miao Carnegie Mellon University\n# 2015 Yun Wang Carnegie Mellon University\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED\n# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,\n# MERCHANTABLITY OR NON-INFRINGEMENT.\n# See the Apache 2 License for the specific language governing permissions and\n# limitations under the License.\n\n# Various functions to write models from nets to files, and to read models from\n# files to nets\n\nimport numpy as np\nimport os\nimport sys\nimport cPickle\n\nfrom StringIO import StringIO\nimport json\n\nimport theano\nimport theano.tensor as T\n\nfrom datetime import datetime\n\nfrom io_func import smart_open\n\n# print log to standard output\ndef log(string):\n sys.stderr.write('[' + str(datetime.now()) + '] ' + str(string) + '\\n')\n\n# convert an array to a string\ndef array_2_string(array):\n str_out = StringIO()\n np.savetxt(str_out, array)\n return str_out.getvalue()\n\n# convert a string to an array\n#def string_2_array(string):\n# str_in = StringIO(string)\n# return np.loadtxt(str_in)\n\ndef string_2_array(string):\n str_in = StringIO(string)\n array_tmp = np.loadtxt(str_in)\n if len(array_tmp.shape) == 0:\n return np.array([array_tmp])\n return array_tmp\n\ndef _nnet2file(layers, set_layer_num = -1, filename='nnet.out', start_layer = 0, input_factor = 0.0, factor=[]):\n n_layers = len(layers)\n nnet_dict = {}\n if set_layer_num == -1:\n set_layer_num = n_layers\n\n for i in range(start_layer, set_layer_num):\n layer = layers[i]\n dict_a = 'W' + str(i)\n dropout_factor = 0.0\n if i == 0:\n dropout_factor = input_factor\n if i > 0 and len(factor) > 0:\n dropout_factor = factor[i-1]\n\n if layer.type == 'fc':\n nnet_dict[dict_a] = array_2_string((1.0 - dropout_factor) * layer.W.get_value())\n elif layer.type == 'conv':\n filter_shape = layer.filter_shape\n for next_X in xrange(filter_shape[0]):\n for this_X in xrange(filter_shape[1]):\n new_dict_a = dict_a + ' ' + str(next_X) + ' ' + str(this_X)\n nnet_dict[new_dict_a] = array_2_string((1.0-dropout_factor) * (layer.W.get_value())[next_X, this_X])\n\n dict_a = 'b' + str(i)\n nnet_dict[dict_a] = array_2_string(layer.b.get_value())\n\n with smart_open(filename, 'wb') as fp:\n json.dump(nnet_dict, fp, indent=2, sort_keys = True)\n fp.flush()\n\n\n# save the config classes; since we are using pickle to serialize the whole class, it's better to set the\n# data reading and learning rate interfaces to None.\ndef _cfg2file(cfg, filename='cfg.out'):\n cfg.lrate = None\n cfg.train_sets = None; cfg.train_xy = None; cfg.train_x = None; cfg.train_y = None\n cfg.valid_sets = None; cfg.valid_xy = None; cfg.valid_x = None; cfg.valid_y = None\n cfg.activation = None # saving the rectifier function causes errors; thus we don't save the activation function\n # the activation function is initialized from the activation text (\"sigmoid\") when the network\n # configuration is loaded\n with smart_open(filename, \"wb\") as output:\n cPickle.dump(cfg, output, cPickle.HIGHEST_PROTOCOL)\n\ndef _file2nnet(layers, set_layer_num = -1, filename='nnet.in', factor=1.0):\n n_layers = len(layers)\n nnet_dict = {}\n if set_layer_num == -1:\n set_layer_num = n_layers\n\n with smart_open(filename, 'rb') as fp:\n nnet_dict = json.load(fp)\n for i in xrange(set_layer_num):\n dict_a = 'W' + str(i)\n layer = layers[i]\n if layer.type == 'fc':\n mat_shape = layer.W.get_value().shape\n layer.W.set_value(factor * np.asarray(string_2_array(nnet_dict[dict_a]), dtype=theano.config.floatX).reshape(mat_shape))\n elif layer.type == 'conv':\n filter_shape = layer.filter_shape\n W_array = layer.W.get_value()\n for next_X in xrange(filter_shape[0]):\n for this_X in xrange(filter_shape[1]):\n new_dict_a = dict_a + ' ' + str(next_X) + ' ' + str(this_X)\n mat_shape = W_array[next_X, this_X, :, :].shape\n W_array[next_X, this_X, :, :] = factor * np.asarray(string_2_array(nnet_dict[new_dict_a]), dtype=theano.config.floatX).reshape(mat_shape)\n layer.W.set_value(W_array)\n dict_a = 'b' + str(i)\n layer.b.set_value(np.asarray(string_2_array(nnet_dict[dict_a]), dtype=theano.config.floatX))\n\ndef _cnn2file(conv_layers, filename='nnet.out', input_factor = 1.0, factor=[]):\n n_layers = len(conv_layers)\n nnet_dict = {}\n for i in xrange(n_layers):\n conv_layer = conv_layers[i]\n filter_shape = conv_layer.filter_shape\n\n dropout_factor = 0.0\n if i == 0:\n dropout_factor = input_factor\n if i > 0 and len(factor) > 0:\n dropout_factor = factor[i-1]\n\n for next_X in xrange(filter_shape[0]):\n for this_X in xrange(filter_shape[1]):\n dict_a = 'W' + str(i) + ' ' + str(next_X) + ' ' + str(this_X)\n nnet_dict[dict_a] = array_2_string(dropout_factor * (conv_layer.W.get_value())[next_X, this_X])\n\n dict_a = 'b' + str(i)\n nnet_dict[dict_a] = array_2_string(conv_layer.b.get_value())\n\n with smart_open(filename, 'wb') as fp:\n json.dump(nnet_dict, fp, indent=2, sort_keys = True)\n fp.flush()\n\ndef _file2cnn(conv_layers, filename='nnet.in', factor=1.0):\n n_layers = len(conv_layers)\n nnet_dict = {}\n\n with smart_open(filename, 'rb') as fp:\n nnet_dict = json.load(fp)\n for i in xrange(n_layers):\n conv_layer = conv_layers[i]\n filter_shape = conv_layer.filter_shape\n W_array = conv_layer.W.get_value()\n\n for next_X in xrange(filter_shape[0]):\n for this_X in xrange(filter_shape[1]):\n dict_a = 'W' + str(i) + ' ' + str(next_X) + ' ' + str(this_X)\n W_array[next_X, this_X, :, :] = factor * np.asarray(string_2_array(nnet_dict[dict_a]))\n\n conv_layer.W.set_value(W_array)\n\n dict_a = 'b' + str(i)\n conv_layer.b.set_value(np.asarray(string_2_array(nnet_dict[dict_a]), dtype=theano.config.floatX))\n" ]
[ [ "numpy.savetxt", "numpy.array", "numpy.loadtxt" ] ]
tanay-gangey/CacheReplacementOptimizer
[ "bb8179e0b86fc75c853fde6ac46cd97bed4fd22c" ]
[ "src/models/mainLSTM.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom lstm import LSTMParamInit, LSTMNet, LossLayer\nfrom sklearn.preprocessing import minmax_scale\n\n\n\ndef split(data, size):\n sequences, y = list(), list()\n for i in range(len(data)):\n end = i + size\n if end >= len(data):\n break\n sequences.append(np.array(data[i:end],dtype=np.float64))\n y.append(data[end])\n return sequences, y\n\ndef makedata(n):\n df = pd.read_csv(\"../../../../blkIO.txt\", sep=' ',header = None)\n df.columns = ['timestamp','pid','pname','blockNo', 'blockSize', 'readOrWrite', 'bdMajor', 'bdMinor', 'hash']\n df = df.drop(['pid', 'pname', 'blockSize', 'bdMajor', 'bdMinor', 'hash'], axis=1)\n readsAndWrites=df['blockNo'].tolist()\n ogmin = min(readsAndWrites)\n ogmax = max(readsAndWrites)\n readsAndWrites = minmax_scale(readsAndWrites,feature_range=(0,256))\n x, y = split(readsAndWrites[:int(0.05*len(readsAndWrites))], n)\n print(df.head())\n return x,y, ogmin, ogmax\n\ndef outlist(predlist,y_list):\n y_pred = list()\n for i in range(len(y_list)):\n y_pred.append(predlist[i].state.h[0])\n return y_pred\n \ndef mapback(predlist,omin,omax):\n y_pred = minmax_scale(predlist,feature_range=(omin,omax))\n return y_pred\n\n\ndef main():\n # learns to repeat simple sequence from random inputs\n np.random.seed(13)\n \n # parameters for input data dimension and lstm cell count\n mem_cell_ct = 100\n x_dim = 1000\n lstm_param = LSTMParamInit(mem_cell_ct, x_dim)\n lstm_net = LSTMNet(lstm_param)\n print(\"Before creating data\")\n input_val_arr,y_list, og_min, og_max = makedata(x_dim)\n print(\"After creating data\",len(input_val_arr),len(y_list),len(input_val_arr[0]))\n for epoch in range(1):\n print(\"epoch\", \"%2s\" % str(epoch), end=\": \")\n for index in range(len(y_list)):\n lstm_net.x_list_add(input_val_arr[index])\n\n \n loss = lstm_net.ylist(y_list, LossLayer)\n print(\"loss:\", \"%.3e\" % loss)\n lstm_param.update(lr=1)\n lstm_net.x_list_clear()\n y_pred1 = outlist(lstm_net.nodelist,y_list)\n y_pred = mapback(y_pred1,og_min,og_max)\n y_actual = mapback(y_list,og_min,og_max)\n #print(\"y_pred = [\" +\n # \", \".join([\"% 2.5f\" % lstm_net.nodelist[ind].state.h[0] for ind in range(len(y_list))]) +\n # \"]\", end=\", \")\n print(\"y_pred=\",y_pred[0:10])\n print(\"y_actual= \",y_actual[0:10])\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "pandas.read_csv", "sklearn.preprocessing.minmax_scale", "numpy.random.seed" ] ]
volkale/bab
[ "265e2387fa1e55db9c77088d6c20140880f1da44" ]
[ "bab/tests/test_mcmc.py" ]
[ "import numpy as np\nfrom bab.mcmc import get_mcmc\n\n\ndef test_mcmc_random_seed(stan_model, two_group_sample_data):\n y1, y2 = two_group_sample_data\n\n mcmc1 = get_mcmc(stan_model, y1, y2, rand_seed=1).extract()\n mcmc2 = get_mcmc(stan_model, y1, y2, rand_seed=1).extract()\n\n np.testing.assert_equal(mcmc1, mcmc2)\n\n\ndef test_mcmc(stan_model, two_group_sample_data):\n y1, y2 = two_group_sample_data\n\n mcmc = get_mcmc(stan_model, y1, y2, rand_seed=1)\n\n row_ind_1 = list(mcmc.summary()['summary_rownames']).index('mu[1]')\n row_ind_2 = list(mcmc.summary()['summary_rownames']).index('mu[2]')\n col_ind_m = list(mcmc.summary()['summary_colnames']).index('mean')\n col_ind_rh = list(mcmc.summary()['summary_colnames']).index('Rhat')\n\n assert np.isclose(mcmc.summary()['summary'][row_ind_1, col_ind_m], np.mean(y1), atol=0.1)\n assert np.isclose(mcmc.summary()['summary'][row_ind_2, col_ind_m], np.mean(y2), atol=0.1)\n\n assert np.isclose(mcmc.summary()['summary'][:, col_ind_rh], 1.0, atol=0.1).all()\n\n\ndef test_mcmc_aggregation(stan_model, two_group_sample_data):\n y1, y2 = two_group_sample_data\n\n w1 = w2 = 6 * [2]\n mcmc_short = get_mcmc(stan_model, y1, y2, w1=w1, w2=w2, rand_seed=1)\n mcmc_long = get_mcmc(stan_model, 2 * y1, 2 * y2, rand_seed=1)\n\n for parameter in mcmc_short.extract().keys():\n assert np.isclose(mcmc_short.extract()[parameter], mcmc_long.extract()[parameter]).all()\n" ]
[ [ "numpy.testing.assert_equal", "numpy.mean" ] ]
domcatalano/architectures
[ "93c6fc575f851141720663b478008f2d1543b2e3" ]
[ "emb-parallel/basic.py" ]
[ "## import things\nimport ray\nimport pandas as pd\nfrom prophet import Prophet\n\n## data pre-processing\ndf = pd.read_csv('./yellow_tripdata_2021-01.csv')\ndf[\"tpep_pickup_datetime\"] = pd.to_datetime(df[\"tpep_pickup_datetime\"] ).dt.date.astype(\"datetime64\")\ndf= df[[\"tpep_pickup_datetime\", \"VendorID\", \"PULocationID\"]]\ndf = df.rename(columns={\"tpep_pickup_datetime\":\"ds\", \"VendorID\":\"y\"})\ndf = df.groupby([df[\"ds\"], df[\"PULocationID\"]]).count().reset_index()\nloc_list = df[\"PULocationID\"].unique()\n\n## vanilla impl without ray\nresult = {}\nfor i in loc_list:\n m = Prophet()\n m.fit(df[df[\"PULocationID\"]==i])\n result[i]=m \n\n\n\n## ray connection\nray.init(\"anyscale://ci_cd\", log_to_driver=False, runtime_env={\"pip\":[\"prophet\"],\"excludes\":[\"yellow*\"]})\n@ray.remote\ndef fit_prophet(i):\n m = Prophet()\n m.fit(df[df[\"PULocationID\"]==i])\n return m\n\n## Fire Hose Approach -- fire as fast as you can and wait for the result\nresult = []\nfor i in loc_list:\n result.append(fit_prophet.remote(i))\nray.get(result)\n\n\n## back pressure to limit the # of tasks in flight\nresult = []\nmax_tasks = 10 # specifying the max number of results\nfor i in loc_list:\n if len(result) > max_tasks:\n # calculating how many results should be available\n num_ready = len(result)-max_tasks\n # wait for num_returns to be equal to num_ready, ensuring the amount of task in flight is checked\n ray.wait(result, num_returns=num_ready)\n result.append(fit_prophet.remote(i))\nray.get(result)\n\n\n\n \n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
dhaase-de/dh-python-dh
[ "40b04407e5f67ec261f559263718ec2b2588dabb" ]
[ "dh/network/__init__.py" ]
[ "\"\"\"\nTools for network communication.\n\"\"\"\n\nimport abc\nimport io\nimport json\nimport socket\nimport struct\nimport sys\nimport time\nimport zlib\n\nimport dh.ejson\nimport dh.utils\n\n# NumPy is only needed for some parts and is optional\ntry:\n import numpy as np\nexcept ImportError as e:\n _NUMPY_ERROR = e\nelse:\n _NUMPY_ERROR = None\n\n\n###\n#%% socket message types\n###\n\n\nclass SocketMessageType(abc.ABC):\n \"\"\"\n Base class providing `send()` and `recv()` methods for sending and\n receiving (higher-level) messages via the socket `socket`.\n \"\"\"\n\n @abc.abstractmethod\n def send(self, socket, x):\n pass\n\n @abc.abstractmethod\n def recv(self, socket):\n pass\n\n\nclass ByteSocketMessageType(SocketMessageType):\n \"\"\"\n Class providing methods for sending and receiving byte *messages* of up to\n 4 GiB in size via a given socket.\n\n Each message has a fixed-length (four byte) header, specifying the length\n of the message content. Thus, calls to `send()` and `recv()` always\n ensure that the entire message is being sent/received.\n\n If `compress` is `True`, messages are compressed before sending and\n decompressed after receiving. This reduces the network load but costs more\n time. The value for `compress` must be the same for both the server and the\n client.\n \"\"\"\n\n def __init__(self, compress=False):\n self._compress = compress\n\n def _recvn(self, socket, byteCount):\n \"\"\"\n Receive and return a fixed number of `byteCount` bytes from the socket.\n \"\"\"\n b = io.BytesIO()\n while True:\n currentByteCount = b.getbuffer().nbytes\n if currentByteCount >= byteCount:\n break\n packet = socket.recv(byteCount - currentByteCount)\n if len(packet) == 0:\n return None\n b.write(packet)\n return b.getvalue()\n\n def send(self, socket, b):\n if self._compress:\n b = zlib.compress(b)\n header = struct.pack(\">I\", int(len(b)))\n socket.sendall(header + b)\n\n def recv(self, socket):\n header = self._recvn(socket, 4)\n if header is None:\n return None\n length = struct.unpack(\">I\", header)[0]\n b = self._recvn(socket, length)\n if self._compress:\n b = zlib.decompress(b)\n return b\n\n\nclass NumpySocketMessageType(ByteSocketMessageType):\n \"\"\"\n Class providing `send()` and `recv()` methods for sending and receiving\n NumPy ndarray objects via the given socket.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if _NUMPY_ERROR is not None:\n raise _NUMPY_ERROR\n super().__init__(*args, **kwargs)\n\n def send(self, socket, x):\n b = io.BytesIO()\n np.save(file=b, arr=x, allow_pickle=False, fix_imports=False)\n super().send(socket, b.getvalue())\n\n def recv(self, socket):\n b = io.BytesIO(super().recv(socket))\n return np.load(file=b, allow_pickle=False, fix_imports=False)\n\n\nclass JsonSocketMessageType(ByteSocketMessageType):\n \"\"\"\n Class providing `send()` and `recv()` methods for sending and receiving\n JSON-serializable objects via the given socket.\n \"\"\"\n\n def send(self, socket, x):\n j = json.dumps(x, ensure_ascii=True)\n b = bytes(j, \"ascii\")\n super().send(socket, b)\n\n def recv(self, socket):\n b = super().recv(socket)\n j = b.decode(\"ascii\")\n x = json.loads(j)\n return x\n\n\nclass ExtendedJsonSocketMessageType(ByteSocketMessageType):\n \"\"\"\n Class providing `send()` and `recv()` methods for sending and receiving\n JSON-serializable (with extended range of supported types, see\n `dh.ejson`) objects via the given socket.\n\n .. seealso:: `dh.ejson`.\n \"\"\"\n\n def send(self, socket, x):\n j = dh.ejson.dumps(x)\n b = bytes(j, \"ascii\")\n super().send(socket, b)\n\n def recv(self, socket):\n b = super().recv(socket)\n j = b.decode(\"ascii\")\n x = dh.ejson.loads(j)\n return x\n\n\n###\n#%% extended socket with support for multiple message types\n###\n\n\nclass MessageSocket():\n \"\"\"\n This is a wrapper class for `socket.socket` which supports the methods\n `msend()` and `mrecv()`, which send/receive entire (higher-level) messages.\n\n For both methods, the `messageType` argument must be an instance of the\n class `SocketMessageType`.\n\n Note: in this context, 'message' means a high-level, user-defined object,\n not the 'message' used in the context of `socket.socket.recvmsg` and\n `socket.socket.sendmsg`.\n \"\"\"\n\n def __init__(self, socket):\n self._socket = socket\n\n def msend(self, messageType, x):\n messageType.send(self._socket, x)\n\n def mrecv(self, messageType):\n return messageType.recv(self._socket)\n\n\n###\n#%% socket servers/clients\n###\n\n\nclass SocketServer(abc.ABC):\n \"\"\"\n Simple socket server which accepts connections on the specified `host`\n and `port` and communicates with the client as specified in\n `communicate()`.\n\n See http://stackoverflow.com/a/19742674/1913780 for an explanation of\n `nodelay`.\n \"\"\"\n\n def __init__(self, host=\"\", port=7214, backlog=5, nodelay=True):\n print(\"Creating socket...\")\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if nodelay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n print(\"Binding socket to {}:{}...\".format(host if len(host) > 0 else \"*\", port))\n self._socket.bind((host, port))\n self._backlog = backlog\n self._nodelay = nodelay\n\n def _print(self, text):\n print(\"[{}] {}\".format(dh.utils.dtstr(compact=False), text))\n\n def run(self):\n self._socket.listen(self._backlog)\n while True:\n self._print(\"Waiting for connection...\")\n sys.stdout.flush()\n (connectionSocket, connectionAddress) = self._socket.accept()\n self._print(\"Accepted connection from {}:{}\".format(connectionAddress[0], connectionAddress[1]))\n t0 = time.time()\n try:\n self.communicate(MessageSocket(connectionSocket))\n connectionSocket.close()\n except Exception as e:\n self._print(\"** {}: {}\".format(type(e).__name__, e))\n self._print(\"Finished request from {}:{} after {} ms\".format(connectionAddress[0], connectionAddress[1], dh.utils.around((time.time() - t0) * 1000.0)))\n\n @abc.abstractmethod\n def communicate(self, socket):\n \"\"\"\n Implements the entire communication happening for one connection with a\n client via high-level socket messages (see `SocketMessageType`).\n\n Counterpart of `SocketClient.communicate`. See specific client/server\n implementations for examples.\n \"\"\"\n pass\n\n\nclass SocketClient(abc.ABC):\n \"\"\"\n Simple socket client which connects to the server on the specified `host`\n and `port` each time `query()` is called. The communication with the server\n is specified in `communicate()`.\n\n See http://stackoverflow.com/a/19742674/1913780 for an explanation of\n `nodelay`.\n \"\"\"\n\n def __init__(self, host, port=7214, nodelay=True):\n self._host = host\n self._port = port\n self._nodelay = nodelay\n\n def query(self, *args, **kwargs):\n # establish connection with the server\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if self._nodelay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n self._socket.connect((self._host, self._port))\n\n # actual communication, keep result\n result = self.communicate(MessageSocket(self._socket), *args, **kwargs)\n\n # close connection\n self._socket.shutdown(socket.SHUT_RDWR)\n self._socket.close()\n\n return result\n\n @abc.abstractmethod\n def communicate(self, socket, *args, **kwargs):\n \"\"\"\n Implements the entire communication happening for one connection with a\n server via high-level socket messages (see `SocketMessageType`).\n\n Counterpart of `SocketServer.communicate`. See specific client/server\n implementations for examples.\n \"\"\"\n pass\n\n\nclass ImageProcessingServer(SocketServer):\n \"\"\"\n Special case of `SocketServer` which accepts a NumPy array and JSON-encoded\n parameters and returns a NumPy array. The counterpart is the\n `ImageProcessingClient` class.\n\n To specify the processing behavior, sub-class this class and implement\n the static method `process(data, params)`.\n \"\"\"\n\n def communicate(self, socket):\n # receive input image and parameters\n data = socket.mrecv(NumpySocketMessageType())\n params = socket.mrecv(JsonSocketMessageType())\n\n # process\n try:\n result = self.process(data=data, params=params)\n except Exception as e:\n self._print(\"** {}: {}\".format(type(e).__name__, e))\n result = np.zeros(shape=(0, 0), dtype=\"uint8\")\n\n # send result image\n socket.msend(NumpySocketMessageType(), result)\n\n @staticmethod\n @abc.abstractmethod\n def process(data, params):\n \"\"\"\n This function specifies the processing behavior of this server and must\n be implemeted by the user.\n \"\"\"\n pass\n\n\nclass ImageProcessingClient(SocketClient):\n \"\"\"\n Special case of `SocketClient` which sends a NumPy array and JSON-encoded\n parameters and receives a NumPy array. The counterpart is the\n `ImageProcessingServer` class.\n\n The processing behavior is specified by sub-classing\n `ImageProcessingServer` and implementing the static method\n `process(data, params)`.\n \"\"\"\n\n def communicate(self, socket, data, params):\n # send input image and parameters\n socket.msend(NumpySocketMessageType(), data)\n socket.msend(JsonSocketMessageType(), params)\n\n # receive result image\n return socket.mrecv(NumpySocketMessageType())\n\n def process(self, data, params):\n \"\"\"\n Just another name for the `query` method (to better show the connection\n to the server's `process` method).\n \"\"\"\n return self.query(data=data, params=params)\n\n\nclass ImageProcessingServer2(SocketServer):\n \"\"\"\n Special case of `SocketServer` which accepts a NumPy array and JSON-encoded\n parameters and returns a NumPy array plus a JSON-encodable object. The\n counterpart is the `ImageProcessingClient2` class.\n\n To specify the processing behavior, sub-class this class and implement\n the static method `process(data, params)`.\n \"\"\"\n\n def communicate(self, socket):\n # receive input image and parameters\n data = socket.mrecv(NumpySocketMessageType())\n params = socket.mrecv(JsonSocketMessageType())\n\n # process\n try:\n (result, info) = self.process(data=data, params=params)\n except Exception as e:\n self._print(\"** {}: {}\".format(type(e).__name__, e))\n result = np.zeros(shape=(0, 0), dtype=\"uint8\")\n info = None\n\n # send result image and info\n socket.msend(NumpySocketMessageType(), result)\n socket.msend(JsonSocketMessageType(), info)\n\n @staticmethod\n @abc.abstractmethod\n def process(data, params):\n \"\"\"\n This function specifies the processing behavior of this server and must\n be implemeted by the user.\n \"\"\"\n pass\n\n\nclass ImageProcessingClient2(SocketClient):\n \"\"\"\n Special case of `SocketClient` which sends a NumPy array and JSON-encoded\n parameters and receives a NumPy array and a JSON-encoded object. The\n counterpart is the `ImageProcessingServer2` class.\n\n The processing behavior is specified by sub-classing\n `ImageProcessingServer` and implementing the static method\n `process(data, params)`.\n \"\"\"\n\n def communicate(self, socket, data, params):\n # send input image and parameters\n socket.msend(NumpySocketMessageType(), data)\n socket.msend(JsonSocketMessageType(), params)\n\n # receive result image\n result = socket.mrecv(NumpySocketMessageType())\n info = socket.mrecv(JsonSocketMessageType())\n return (result, info)\n\n def process(self, data, params):\n \"\"\"\n Just another name for the `query` method (to better show the connection\n to the server's `process` method).\n \"\"\"\n return self.query(data=data, params=params)\n" ]
[ [ "numpy.load", "numpy.zeros", "numpy.save" ] ]
nisheeth-golakiya/hybrid-sac
[ "025ee9a387ef27bcc7a300780a4911b74199ee48" ]
[ "utils.py" ]
[ "import torch\nimport numpy as np\n\ndef to_gym_action(action_c, action_d, flat_actions=True):\n # assuming both are torch tensors\n if flat_actions:\n ac = action_c.tolist()[0]\n else:\n ac = action_c.unsqueeze(-1).tolist()[0]\n ad = action_d.squeeze().item()\n return [ad, ac]\n\ndef gym_to_buffer(action, flat_actions=True):\n ad = action[0]\n if flat_actions:\n ac = np.hstack(action[1:])\n else:\n ac = action[1]\n return [ad] + np.array(ac).flatten().tolist()\n\ndef to_torch_action(actions, device):\n ad = torch.Tensor(actions[:, 0]).int().to(device)\n ac = torch.Tensor(actions[:, 1:]).to(device)\n return ac, ad\n" ]
[ [ "numpy.hstack", "numpy.array", "torch.Tensor" ] ]
itzdan/Azure-Sentinel-Notebooks
[ "8798b27d8c721ad51cd48e376a24d43e59564ee9" ]
[ "nbdemo/mp_data.py" ]
[ "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Demo QueryProvider.\"\"\"\nfrom functools import partial\nfrom pathlib import Path\nimport pickle\nfrom typing import List, Dict, Union, Any, Iterable\nfrom time import sleep\n\nimport pandas as pd\nimport yaml\n\nfrom msticpy.data.data_providers import AttribHolder\nfrom msticpy.data import QueryProvider\n\n\nclass _DataDriver():\n \"\"\"Demo data provider.\"\"\"\n def __init__(self):\n \"\"\"Initialize demo_provider.\"\"\"\n self.connected = False\n self.loaded = True\n self.connection_str = \"\"\n\n def connect(self, connection_str=\"default\", **kwargs):\n \"\"\"Connect to data source.\"\"\"\n del kwargs\n self.connected = True\n self.connection_str = connection_str\n print(\"Connected.\")\n\n\nclass QueryProviderDemo(QueryProvider):\n \"\"\"Query provider for demo data.\"\"\"\n\n _DATA_DEFS = {\n \"SecurityAlert\": {\n \"list_alerts\": \"data/alerts_list.pkl\",\n },\n \"WindowsSecurity\": {\n \"get_process_tree\": \"data/process_tree.pkl\",\n \"list_host_processes\": \"data/processes_on_host.pkl\",\n \"list_host_logons\": \".data/host_logons.pkl\",\n \"list_host_logon_failures\": \"data/failedLogons.pkl\",\n \"list_host_events\": \"data/all_events_df.pkl\",\n },\n \"Network\": {\n \"list_azure_network_flows_by_ip\": \"data/az_net_comms_df.pkl\",\n \"list_azure_network_flows_by_host\": \"data/az_net_comms_df.pkl\",\n },\n }\n\n # pylint: disable=super-init-not-called\n def __init__(self, **kwargs):\n \"\"\"Initialize Demo query provider.\"\"\"\n self._environment = kwargs.get('data_environment', 'default environment')\n print(f\"{self.__class__.__name__} initialized for {self._environment}\")\n data_src_file = kwargs.get('data_src_file')\n if not data_src_file:\n data_srcs = self._DATA_DEFS\n # raise ValueError(\"no query definition file name\")\n else:\n with open(data_src_file, \"r\") as src_file:\n data_srcs = yaml.safe_load(src_file)\n self._query_store = {}\n self._query_provider = _DataDriver()\n self.all_queries = AttribHolder()\n self._add_demo_query_functions(data_srcs)\n\n def _add_demo_query_functions(self, data_defs: Dict[str, Dict[str, str]]):\n for family, queries in data_defs.items():\n if not hasattr(self, family):\n setattr(self, family, AttribHolder())\n query_family = getattr(self, family)\n\n for query_name, file_name in queries.items():\n\n # Create the partial function\n query_func = partial(\n self._execute_query, data_family=family, query_name=query_name,\n data_file=file_name\n )\n\n setattr(query_family, query_name, query_func)\n setattr(self.all_queries, query_name, query_func)\n self._query_store[f\"{family}.{query_name}\"] = file_name\n\n def connect(self, connection_str: str = None, **kwargs):\n \"\"\"\n Connect to data source.\n\n Parameters\n ----------\n connection_str : str\n Connection string for the data source\n\n \"\"\"\n return self._query_provider.connect(connection_str=connection_str, **kwargs)\n\n @property\n def schema(self) -> Dict[str, Dict]:\n \"\"\"\n Return current data schema of connection.\n\n Returns\n -------\n Dict[str, Dict]\n Data schema of current connection.\n\n \"\"\"\n return {}\n\n @property\n def schema_tables(self) -> List[str]:\n \"\"\"\n Return list of tables in the data schema of the connection.\n\n Returns\n -------\n List[str]\n Tables in the of current connection.\n\n \"\"\"\n return []\n\n def import_query_file(self, query_file: str):\n \"\"\"\n Import a yaml data source definition.\n\n Parameters\n ----------\n query_file : str\n Path to the file to import\n\n \"\"\"\n raise NotImplementedError()\n\n def list_queries(self) -> List[str]:\n \"\"\"\n Return list of family.query in the store.\n\n Returns\n -------\n Iterable[str]\n List of queries\n\n \"\"\"\n return list(self._query_store.items())\n\n def query_help(self, query_name):\n \"\"\"Print help for query.\"\"\"\n print(f\"query_prov.{self._query_store[query_name]}(**kwargs)\")\n\n def exec_query(self, query: str) -> Union[pd.DataFrame, Any]:\n \"\"\"\n Execute simple query string.\n\n Parameters\n ----------\n query : str\n [description]\n\n Returns\n -------\n Union[pd.DataFrame, Any]\n Query results - a DataFrame if successful\n or a KqlResult if unsuccessful.\n\n \"\"\"\n raise NotImplementedError()\n\n def _execute_query(self, *args, **kwargs) -> Union[pd.DataFrame, Any]:\n if not self._query_provider.loaded:\n raise ValueError(\"Provider is not loaded.\")\n if not self._query_provider.connected:\n raise ValueError(\n \"No connection to a data source.\",\n \"Please call connect(connection_str) and retry.\",\n )\n sleep(1)\n query_name = kwargs.pop(\"query_name\")\n data_file = kwargs.pop(\"data_file\")\n return read_pd_df(data_file, query_name)\n\n\ndef read_pd_df(data_file, query_name):\n \"\"\"Read DataFrame from file.\"\"\"\n if not Path(data_file).is_file():\n raise FileNotFoundError(f\"Data file {data_file} for query {query_name} not found.\")\n\n if data_file.lower().endswith(\"csv\"):\n return pd.read_csv(data_file, infer_datetime_format=True, parse_dates=[\"TimeGenerated\"])\n return pd.read_pickle(data_file)\n\n\nclass TILookupDemo:\n \"\"\"TILookup demo class\"\"\"\n\n _DATA_DEFS = {\n \"ipv4\": \"data/ti_results_ipv4.pkl\",\n \"url\": \"data/ti_results_url.pkl\",\n }\n\n def lookup_ioc(self, ioc_type, **kwargs):\n \"\"\"Lookup single IoC.\"\"\"\n sleep(1)\n return read_pd_df(self._DATA_DEFS.get(ioc_type), ioc_type)\n\n @staticmethod\n def result_to_df(results):\n \"\"\"Convert IoC results to DataFrame.\"\"\"\n if isinstance(results, pd.DataFrame):\n return results\n\nclass GeoLiteLookupDemo:\n \"\"\"GeoLitLookup demo class.\"\"\"\n\n _DATA_DEFS = {\n \"ip_locs\": \"data/ip_locations.pkl\",\n }\n\n def lookup_ip(\n self,\n ip_address: str = None,\n ip_addr_list: Iterable = None,\n ip_entity: Any = None,\n ):\n \"\"\"Look up location.\"\"\"\n del ip_address, ip_addr_list, ip_entity\n with open(self._DATA_DEFS[\"ip_locs\"], \"rb\") as iploc_file:\n ip_locs = pickle.load(iploc_file)\n return str(ip_locs), ip_locs\n\n\n_ASN_DATA = pd.read_pickle(\"data/az_whois.df.pkl\")\n\n\ndef get_whois_info_demo(ip_addr, show_progress=False):\n \"\"\"Lookup Whois data from dataframe.\"\"\"\n sleep(0.02)\n if show_progress:\n print(\".\", end=\"\")\n if \"ExtASN\" not in _ASN_DATA.columns:\n return \"Unknown\", {}\n match_row = _ASN_DATA[_ASN_DATA[\"AllExtIPs\"] == ip_addr]\n asn_text = match_row[\"ExtASN\"].unique()[0]\n if isinstance(asn_text, tuple):\n return asn_text[0], {}\n return asn_text, {}\n" ]
[ [ "pandas.read_pickle", "pandas.read_csv" ] ]
cbonilla20/great_expectations
[ "c5a587a3b1bc5d72d433950aaceb4d09f199690a" ]
[ "tests/expectations/test_expectation_arguments.py" ]
[ "import logging\nfrom typing import List\n\nimport pandas as pd\nimport pytest\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core import (\n ExpectationConfiguration,\n ExpectationSuite,\n ExpectationSuiteValidationResult,\n ExpectationValidationResult,\n)\nfrom great_expectations.core.batch import RuntimeBatchRequest\nfrom great_expectations.data_context import BaseDataContext\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n InMemoryStoreBackendDefaults,\n)\nfrom great_expectations.validator.validator import Validator\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from pyspark.sql import DataFrame\nexcept ImportError:\n DataFrame = None\n\n logger.debug(\n \"Unable to load pyspark; install optional spark dependency for support.\"\n )\n\n\ndef build_in_memory_runtime_context():\n data_context_config: DataContextConfig = DataContextConfig(\n datasources={\n \"pandas_datasource\": {\n \"execution_engine\": {\n \"class_name\": \"PandasExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"class_name\": \"Datasource\",\n \"module_name\": \"great_expectations.datasource\",\n \"data_connectors\": {\n \"runtime_data_connector\": {\n \"class_name\": \"RuntimeDataConnector\",\n \"batch_identifiers\": [\n \"id_key_0\",\n \"id_key_1\",\n ],\n }\n },\n },\n \"spark_datasource\": {\n \"execution_engine\": {\n \"class_name\": \"SparkDFExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"class_name\": \"Datasource\",\n \"module_name\": \"great_expectations.datasource\",\n \"data_connectors\": {\n \"runtime_data_connector\": {\n \"class_name\": \"RuntimeDataConnector\",\n \"batch_identifiers\": [\n \"id_key_0\",\n \"id_key_1\",\n ],\n }\n },\n },\n },\n expectations_store_name=\"expectations_store\",\n validations_store_name=\"validations_store\",\n evaluation_parameter_store_name=\"evaluation_parameter_store\",\n checkpoint_store_name=\"checkpoint_store\",\n store_backend_defaults=InMemoryStoreBackendDefaults(),\n )\n\n context: BaseDataContext = BaseDataContext(project_config=data_context_config)\n\n return context\n\n\n@pytest.fixture\ndef in_memory_runtime_context():\n return build_in_memory_runtime_context()\n\n\n@pytest.fixture\ndef test_pandas_df():\n df: pd.DataFrame = pd.DataFrame(\n data=[[\"Scott\"], [\"Jeff\"], [\"Thomas\"], [\"Ann\"]], columns=[\"Name\"]\n )\n return df\n\n\n@pytest.fixture\ndef test_spark_df(test_pandas_df, spark_session):\n df: DataFrame = spark_session.createDataFrame(data=test_pandas_df)\n return df\n\n\ndef test_catch_exceptions_no_exceptions(in_memory_runtime_context, test_spark_df):\n catch_exceptions: bool = False # expect exceptions to be raised\n result_format: str = \"SUMMARY\"\n runtime_environment_arguments = {\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n }\n\n suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(\n \"test_suite\", overwrite_existing=True\n )\n\n expectation_configuration: ExpectationConfiguration\n\n expectation_meta: dict = {\"Notes\": \"Some notes\"}\n\n expectation_arguments_without_meta: dict\n\n expectation_arguments_column: dict = {\n \"include_config\": True,\n \"column\": \"Name\", # use correct column to avoid error\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n expectation_arguments_table: dict = {\n \"include_config\": True,\n \"value\": 4,\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_equal\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"spark_datasource\",\n data_connector_name=\"runtime_data_connector\",\n data_asset_name=\"insert_your_data_asset_name_here\",\n runtime_parameters={\"batch_data\": test_spark_df},\n batch_identifiers={\n \"id_key_0\": \"id_value_0\",\n \"id_key_1\": \"id_value_1\",\n },\n )\n\n validator: Validator = in_memory_runtime_context.get_validator(\n batch_request=runtime_batch_request,\n expectation_suite=suite,\n )\n\n # Test calling \"validator.validate()\" explicitly.\n\n validator_validation: ExpectationSuiteValidationResult = validator.validate(\n **runtime_environment_arguments\n )\n results: List[ExpectationValidationResult] = validator_validation.results\n assert len(results) == 2\n\n result: ExpectationValidationResult\n\n for result in results:\n assert result.success\n assert (\n \"exception_traceback\" not in result.exception_info\n ) or not result.exception_info[\"exception_traceback\"]\n assert (\n \"exception_message\" not in result.exception_info\n ) or not result.exception_info[\"exception_message\"]\n\n # Test calling \"validator.expect_*\" through \"validator.validate_expectation()\".\n\n expectation_parameters: dict\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result = validator.expect_column_values_to_not_be_null(**expectation_parameters)\n assert result.success\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result = validator.expect_table_row_count_to_equal(**expectation_parameters)\n assert result.success\n\n\ndef test_catch_exceptions_exception_occurred_catch_exceptions_false(\n in_memory_runtime_context, test_spark_df\n):\n catch_exceptions: bool = False # expect exceptions to be raised\n result_format: str = \"SUMMARY\"\n runtime_environment_arguments = {\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n }\n\n suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(\n \"test_suite\", overwrite_existing=True\n )\n\n expectation_configuration: ExpectationConfiguration\n\n expectation_meta: dict = {\"Notes\": \"Some notes\"}\n\n expectation_arguments_without_meta: dict\n\n expectation_arguments_column: dict = {\n \"include_config\": True,\n \"column\": \"unknown_column\", # use intentionally incorrect column to force error in \"MetricProvider\" evaluations\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n expectation_arguments_table: dict = {\n \"include_config\": True,\n \"value\": 4,\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_equal\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"spark_datasource\",\n data_connector_name=\"runtime_data_connector\",\n data_asset_name=\"insert_your_data_asset_name_here\",\n runtime_parameters={\"batch_data\": test_spark_df},\n batch_identifiers={\n \"id_key_0\": \"id_value_0\",\n \"id_key_1\": \"id_value_1\",\n },\n )\n\n validator: Validator = in_memory_runtime_context.get_validator(\n batch_request=runtime_batch_request,\n expectation_suite=suite,\n )\n\n expected_exception_message: str = (\n 'Error: The column \"unknown_column\" in BatchData does not exist.'\n )\n\n # Test calling \"validator.validate()\" explicitly.\n\n with pytest.raises(ge_exceptions.MetricResolutionError) as e:\n # noinspection PyUnusedLocal\n validator_validation: ExpectationSuiteValidationResult = validator.validate(\n **runtime_environment_arguments\n )\n assert e.value.message == expected_exception_message\n\n # Test calling \"validator.expect_*\" through \"validator.validate_expectation()\".\n\n expectation_parameters: dict\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n\n with pytest.raises(ge_exceptions.MetricResolutionError) as e:\n # noinspection PyUnusedLocal\n result: ExpectationValidationResult = (\n validator.expect_column_values_to_not_be_null(**expectation_parameters)\n )\n assert e.value.message == expected_exception_message\n\n # Confirm that even though exceptions may occur in some expectations, other expectations can be validated properly.\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result: ExpectationValidationResult = validator.expect_table_row_count_to_equal(\n **expectation_parameters\n )\n assert result.success\n\n\ndef test_catch_exceptions_exception_occurred_catch_exceptions_true(\n in_memory_runtime_context, test_spark_df\n):\n catch_exceptions: bool = True # expect exceptions to be caught\n result_format: str = \"SUMMARY\"\n runtime_environment_arguments = {\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n }\n\n suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(\n \"test_suite\", overwrite_existing=True\n )\n\n expectation_configuration: ExpectationConfiguration\n\n expectation_meta: dict = {\"Notes\": \"Some notes\"}\n\n expectation_arguments_without_meta: dict\n\n expectation_arguments_column: dict = {\n \"include_config\": True,\n \"column\": \"unknown_column\", # use intentionally incorrect column to force error in \"MetricProvider\" evaluations\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n expectation_arguments_table: dict = {\n \"include_config\": True,\n \"value\": 4,\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_equal\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"spark_datasource\",\n data_connector_name=\"runtime_data_connector\",\n data_asset_name=\"insert_your_data_asset_name_here\",\n runtime_parameters={\"batch_data\": test_spark_df},\n batch_identifiers={\n \"id_key_0\": \"id_value_0\",\n \"id_key_1\": \"id_value_1\",\n },\n )\n\n validator: Validator = in_memory_runtime_context.get_validator(\n batch_request=runtime_batch_request,\n expectation_suite=suite,\n )\n\n expected_exception_message: str = (\n 'Error: The column \"unknown_column\" in BatchData does not exist.'\n )\n\n # Test calling \"validator.validate()\" explicitly.\n\n validator_validation: ExpectationSuiteValidationResult = validator.validate(\n **runtime_environment_arguments\n )\n results: List[ExpectationValidationResult] = validator_validation.results\n assert len(results) == 2\n\n # Confirm that even though an exception occurred in one expectation, the other expectation is validated properly.\n\n results = sorted(\n results, key=lambda element: element.expectation_config[\"expectation_type\"]\n )\n\n result: ExpectationValidationResult\n\n result = results[0]\n assert (\n result.expectation_config[\"expectation_type\"]\n == \"expect_column_values_to_not_be_null\"\n )\n assert not result.success\n assert \"exception_traceback\" in result.exception_info\n assert \"exception_message\" in result.exception_info\n assert result.exception_info[\"exception_message\"] == expected_exception_message\n\n result = results[1]\n assert (\n result.expectation_config[\"expectation_type\"]\n == \"expect_table_row_count_to_equal\"\n )\n assert result.success\n assert (\n \"exception_traceback\" not in result.exception_info\n ) or not result.exception_info[\"exception_traceback\"]\n assert (\n \"exception_message\" not in result.exception_info\n ) or not result.exception_info[\"exception_message\"]\n\n # Test calling \"validator.expect_*\" through \"validator.validate_expectation()\".\n\n expectation_parameters: dict\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result = validator.expect_column_values_to_not_be_null(**expectation_parameters)\n assert not result.success\n assert \"exception_traceback\" in result.exception_info\n assert \"exception_message\" in result.exception_info\n assert result.exception_info[\"exception_message\"] == expected_exception_message\n\n # Confirm that even though exceptions may occur in some expectations, other expectations can be validated properly.\n\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_table\n )\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result = validator.expect_table_row_count_to_equal(**expectation_parameters)\n assert result.success\n assert (\n \"exception_traceback\" not in result.exception_info\n ) or not result.exception_info[\"exception_traceback\"]\n assert (\n \"exception_message\" not in result.exception_info\n ) or not result.exception_info[\"exception_message\"]\n\n\ndef test_result_format_configured_no_set_default_override(\n in_memory_runtime_context, test_spark_df\n):\n catch_exceptions: bool = False # expect exceptions to be raised\n result_format: str = \"SUMMARY\"\n runtime_environment_arguments = {\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n }\n\n suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(\n \"test_suite\", overwrite_existing=True\n )\n\n expectation_configuration: ExpectationConfiguration\n\n expectation_meta: dict = {\"Notes\": \"Some notes\"}\n\n expectation_arguments_without_meta: dict\n\n expectation_arguments_column: dict = {\n \"include_config\": True,\n \"column\": \"Name\", # use correct column to avoid error\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"spark_datasource\",\n data_connector_name=\"runtime_data_connector\",\n data_asset_name=\"insert_your_data_asset_name_here\",\n runtime_parameters={\"batch_data\": test_spark_df},\n batch_identifiers={\n \"id_key_0\": \"id_value_0\",\n \"id_key_1\": \"id_value_1\",\n },\n )\n\n validator: Validator = in_memory_runtime_context.get_validator(\n batch_request=runtime_batch_request,\n expectation_suite=suite,\n )\n\n # Test calling \"validator.validate()\" explicitly.\n\n validator_validation: ExpectationSuiteValidationResult = validator.validate(\n **runtime_environment_arguments\n )\n results: List[ExpectationValidationResult] = validator_validation.results\n assert len(results) == 1\n\n result: ExpectationValidationResult\n\n result = results[0]\n assert len(result.result.keys()) > 0\n\n # Test calling \"validator.expect_*\" through \"validator.validate_expectation()\".\n\n expectation_parameters: dict\n\n expectation_parameters = dict(\n **expectation_arguments_without_meta, **expectation_meta\n )\n result = validator.expect_column_values_to_not_be_null(**expectation_parameters)\n assert len(result.result.keys()) > 0\n\n\ndef test_result_format_configured_with_set_default_override(\n in_memory_runtime_context, test_spark_df\n):\n catch_exceptions: bool = False # expect exceptions to be raised\n result_format: str = \"SUMMARY\"\n runtime_environment_arguments = {\n \"catch_exceptions\": catch_exceptions,\n \"result_format\": result_format,\n }\n\n suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(\n \"test_suite\", overwrite_existing=True\n )\n\n expectation_configuration: ExpectationConfiguration\n\n expectation_meta: dict = {\"Notes\": \"Some notes\"}\n\n expectation_arguments_without_meta: dict\n\n expectation_arguments_column: dict = {\n \"include_config\": True,\n \"column\": \"Name\", # use correct column to avoid error\n }\n expectation_arguments_without_meta = dict(\n **runtime_environment_arguments, **expectation_arguments_column\n )\n expectation_configuration = ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs=expectation_arguments_without_meta,\n meta=expectation_meta,\n )\n suite.add_expectation(expectation_configuration=expectation_configuration)\n\n runtime_batch_request = RuntimeBatchRequest(\n datasource_name=\"spark_datasource\",\n data_connector_name=\"runtime_data_connector\",\n data_asset_name=\"insert_your_data_asset_name_here\",\n runtime_parameters={\"batch_data\": test_spark_df},\n batch_identifiers={\n \"id_key_0\": \"id_value_0\",\n \"id_key_1\": \"id_value_1\",\n },\n )\n\n validator: Validator = in_memory_runtime_context.get_validator(\n batch_request=runtime_batch_request,\n expectation_suite=suite,\n )\n\n validator.set_default_expectation_argument(\"result_format\", \"BOOLEAN_ONLY\")\n\n # Test calling \"validator.validate()\" explicitly.\n\n validator_validation: ExpectationSuiteValidationResult = validator.validate()\n results: List[ExpectationValidationResult] = validator_validation.results\n assert len(results) == 1\n\n result: ExpectationValidationResult\n\n result = results[0]\n assert len(result.result.keys()) == 0\n\n # Test calling \"validator.expect_*\" through \"validator.validate_expectation()\".\n\n expectation_parameters: dict\n\n expectation_parameters = dict(**expectation_arguments_column, **expectation_meta)\n result = validator.expect_column_values_to_not_be_null(**expectation_parameters)\n assert len(result.result.keys()) == 0\n" ]
[ [ "pandas.DataFrame" ] ]
Digvijaykumar21/c
[ "33ac15c98120ff1375641503b99622f7244e2c94" ]
[ "chatbot.py" ]
[ "from tensorflow.keras.models import load_model, Model\r\nfrom tensorflow.keras.layers import Input, Concatenate\r\nimport tensorflow as tf\r\nimport os\r\nfrom tensorflow.python.keras.layers import Layer\r\nfrom tensorflow.python.keras import backend as K\r\nimport pickle\r\nimport numpy as np\r\nimport re\r\nfrom AttentionLayer import AttentionLayer\r\n\r\nwith open('dic.pkl', 'rb') as f:\r\n vocab = pickle.load(f)\r\nwith open('inv.pkl', 'rb') as f:\r\n inv_vocab = pickle.load(f)\r\n\r\n\r\ndef clean_text(txt):\r\n txt = txt.lower()\r\n txt = re.sub(r\"i'm\", \"i am\", txt)\r\n txt = re.sub(r\"he's\", \"he is\", txt)\r\n txt = re.sub(r\"she's\", \"she is\", txt)\r\n txt = re.sub(r\"that's\", \"that is\", txt)\r\n txt = re.sub(r\"what's\", \"what is\", txt)\r\n txt = re.sub(r\"where's\", \"where is\", txt)\r\n txt = re.sub(r\"\\'ll\", \" will\", txt)\r\n txt = re.sub(r\"\\'ve\", \" have\", txt)\r\n txt = re.sub(r\"\\'re\", \" are\", txt)\r\n txt = re.sub(r\"\\'d\", \" would\", txt)\r\n txt = re.sub(r\"won't\", \"will not\", txt)\r\n txt = re.sub(r\"can't\", \"can not\", txt)\r\n txt = re.sub(r\"[^\\w\\s]\", \"\", txt)\r\n return txt\r\n\r\n\r\n\r\nattn_layer = AttentionLayer()\r\n\r\nmodel = load_model('chatbot.h5', custom_objects={'AttentionLayer' : attn_layer})\r\n\r\n\r\n\r\nencoder_inputs = model.layers[0].input\r\nembed = model.layers[2]\r\nenc_embed = embed(encoder_inputs)\r\nenocoder_layer = model.layers[3]\r\n\r\nencoder_outputs, fstate_h, fstate_c, bstate_h, bstate_c = enocoder_layer(enc_embed)\r\n\r\nh = Concatenate()([fstate_h, bstate_h])\r\nc = Concatenate()([fstate_c, bstate_c])\r\nencoder_states = [h, c]\r\n\r\nenc_model = Model(encoder_inputs, \r\n [encoder_outputs,\r\n encoder_states])\r\n\r\n\r\nlatent_dim = 800\r\n\r\ndecoder_inputs = model.layers[1].input\r\ndecoder_lstm = model.layers[6]\r\ndecoder_dense = model.layers[9]\r\ndecoder_state_input_h = Input(shape=(latent_dim,), name='input_3')\r\ndecoder_state_input_c = Input(shape=(latent_dim,), name='input_4')\r\n\r\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\r\n\r\ndec_embed = embed(decoder_inputs)\r\n\r\ndecoder_outputs, state_h, state_c = decoder_lstm(dec_embed, initial_state=decoder_states_inputs)\r\ndecoder_states = [state_h, state_c]\r\n\r\ndec_model = Model([decoder_inputs, decoder_states_inputs], [decoder_outputs] + decoder_states)\r\n\r\ndec_dense = model.layers[-1]\r\nattn_layer = model.layers[7]\r\n\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nprint(\"##########################################\")\r\nprint(\"# start chatting ver. 1.0 #\")\r\nprint(\"##########################################\")\r\n\r\n\r\nprepro1 = \"\"\r\nwhile prepro1 != 'q':\r\n \r\n prepro1 = input(\"you : \")\r\n try:\r\n prepro1 = clean_text(prepro1)\r\n prepro = [prepro1]\r\n \r\n txt = []\r\n for x in prepro:\r\n lst = []\r\n for y in x.split():\r\n try:\r\n lst.append(vocab[y])\r\n except:\r\n lst.append(vocab['<OUT>'])\r\n txt.append(lst)\r\n txt = pad_sequences(txt, 13, padding='post')\r\n\r\n\r\n ###\r\n enc_op, stat = enc_model.predict( txt )\r\n\r\n empty_target_seq = np.zeros( ( 1 , 1) )\r\n empty_target_seq[0, 0] = vocab['<SOS>']\r\n stop_condition = False\r\n decoded_translation = ''\r\n\r\n\r\n while not stop_condition :\r\n\r\n dec_outputs , h , c = dec_model.predict([ empty_target_seq ] + stat )\r\n\r\n ###\r\n ###########################\r\n attn_op, attn_state = attn_layer([enc_op, dec_outputs])\r\n decoder_concat_input = Concatenate(axis=-1)([dec_outputs, attn_op])\r\n decoder_concat_input = dec_dense(decoder_concat_input)\r\n ###########################\r\n\r\n sampled_word_index = np.argmax( decoder_concat_input[0, -1, :] )\r\n\r\n sampled_word = inv_vocab[sampled_word_index] + ' '\r\n\r\n if sampled_word != '<EOS> ':\r\n decoded_translation += sampled_word \r\n\r\n\r\n if sampled_word == '<EOS> ' or len(decoded_translation.split()) > 13:\r\n stop_condition = True\r\n\r\n empty_target_seq = np.zeros( ( 1 , 1 ) ) \r\n empty_target_seq[ 0 , 0 ] = sampled_word_index\r\n stat = [ h , c ] \r\n\r\n print(\"chatbot attention : \", decoded_translation )\r\n print(\"==============================================\")\r\n\r\n except:\r\n print(\"sorry didn't got you , please type again :( \")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.models.load_model", "tensorflow.keras.models.Model", "numpy.argmax", "numpy.zeros", "tensorflow.keras.layers.Input" ] ]
LakmalAsh/Emotion-recognition-part
[ "38ed02b7cd92f25c9ca55bf1599b35939e851339" ]
[ "train_emotion_classifier.py" ]
[ "\"\"\"\r\nDescription: Train emotion classification model\r\n\"\"\"\r\n\r\nfrom keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping\r\nfrom keras.callbacks import ReduceLROnPlateau\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom load_and_process import load_fer2013\r\nfrom load_and_process import preprocess_input\r\nfrom models.cnn import mini_XCEPTION\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# parameters\r\nbatch_size = 32\r\nnum_epochs = 10000\r\ninput_shape = (48, 48, 1)\r\nvalidation_split = .2\r\nverbose = 1\r\nnum_classes = 7\r\npatience = 50\r\nbase_path = 'models/'\r\n\r\n# data generator\r\ndata_generator = ImageDataGenerator(\r\n featurewise_center=False,\r\n featurewise_std_normalization=False,\r\n rotation_range=10,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n zoom_range=.1,\r\n horizontal_flip=True)\r\n\r\n# model parameters/compilation\r\nmodel = mini_XCEPTION(input_shape, num_classes)\r\nmodel.compile(optimizer='adam', loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\nmodel.summary()\r\n\r\n\r\n\r\n\r\n\r\n # callbacks\r\nlog_file_path = base_path + '_emotion_training.log'\r\ncsv_logger = CSVLogger(log_file_path, append=False)\r\nearly_stop = EarlyStopping('val_loss', patience=patience)\r\nreduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,\r\n patience=int(patience/4), verbose=1)\r\ntrained_models_path = base_path + '_mini_XCEPTION'\r\nmodel_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'\r\nmodel_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,\r\n save_best_only=True)\r\ncallbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]\r\n\r\n# loading dataset\r\nfaces, emotions = load_fer2013()\r\nfaces = preprocess_input(faces)\r\nnum_samples, num_classes = emotions.shape\r\nxtrain, xtest,ytrain,ytest = train_test_split(faces, emotions,test_size=0.2,shuffle=True)\r\nmodel.fit_generator(data_generator.flow(xtrain, ytrain,\r\n batch_size),\r\n steps_per_epoch=len(xtrain) / batch_size,\r\n epochs=num_epochs, verbose=1, callbacks=callbacks,\r\n validation_data=(xtest,ytest))\r\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]