repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
FlorianWilhelm/mlstm4reco
[ "023ff281c7cdb5aa8a9ae3ef08e0629276865424" ]
[ "experiments/run.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport shutil\nimport pickle\nimport time\nimport argparse\n\nimport torch\nimport numpy as np\nfrom IPython.core import ultratb\nfrom spotlight.sequence.implicit import ImplicitSequenceModel\nfrom spotlight.cross_validation import user_based_train_test_split\nfrom spotlight.datasets.goodbooks import get_goodbooks_dataset\nfrom spotlight.datasets.amazon import get_amazon_dataset\nfrom spotlight.datasets.movielens import get_movielens_dataset\nfrom spotlight.evaluation import sequence_mrr_score\nfrom spotlight.torch_utils import set_seed\nimport hyperopt\nfrom hyperopt import Trials, hp, fmin, STATUS_OK, STATUS_FAIL\n\nfrom mlstm4reco.representations import mLSTMNet\n\n\nCUDA = torch.cuda.is_available()\n\n# Start IPython shell on exception (nice for debugging)\nsys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1)\n\n# First element is always default\nDATASETS = ['1m', '10m', 'amazon', 'goodbooks']\nMODELS = ['mlstm', 'lstm']\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='Run experiment for benchmarks.')\n parser.add_argument('dataset',\n type=str,\n choices=DATASETS,\n default=DATASETS[0],\n help='Name of the dataset or variant for Movielens')\n parser.add_argument('-n', '--num_trials',\n dest='num_trials',\n default=100,\n type=int,\n help='Number of trials to run')\n parser.add_argument('-m', '--model',\n dest='model',\n default=MODELS[0],\n choices=MODELS,\n type=str,\n help='Model for experiment')\n\n return parser.parse_args(args)\n\n\ndef hyperparameter_space(model_type):\n \"\"\"Define hyperopt hyperparameter space\n\n Args:\n model_type: Restrict to model if provided.\n\n Returns:\n hyperparameter object\n \"\"\"\n\n common_space = {\n 'batch_size': hp.quniform('batch_size', 64, 256, 16),\n 'learn_rate': hp.loguniform('learn_rate', -6, -3),\n 'l2': hp.loguniform('l2', -25, -9),\n 'n_iter': hp.quniform('n_iter', 20, 50, 5),\n 'loss': hp.choice('loss', ['adaptive_hinge']),\n 'embedding_dim': hp.quniform('embedding_dim', 16, 128, 8),\n }\n\n models = [\n {\n 'type': 'lstm',\n 'representation': 'lstm',\n **common_space\n },\n {\n 'type': 'mlstm',\n **common_space\n }\n ]\n\n space = [model for model in models if model['type'] == model_type]\n assert len(space) == 1\n\n return space[0]\n\n\ndef get_objective(train, valid, test, random_state=None):\n\n def objective(space):\n batch_size = int(space['batch_size'])\n learn_rate = space['learn_rate']\n loss = space['loss']\n n_iter = int(space['n_iter'])\n embedding_dim = int(space['embedding_dim'])\n l2 = space['l2']\n\n if space['type'] == 'mlstm':\n representation = mLSTMNet(\n train.num_items,\n embedding_dim=embedding_dim)\n model = ImplicitSequenceModel(\n loss=loss,\n batch_size=batch_size,\n representation=representation,\n learning_rate=learn_rate,\n n_iter=n_iter,\n l2=l2,\n use_cuda=CUDA,\n random_state=random_state)\n elif space['type'] == 'lstm':\n representation = space['representation']\n model = ImplicitSequenceModel(\n loss=loss,\n embedding_dim=embedding_dim,\n batch_size=batch_size,\n representation=representation,\n learning_rate=learn_rate,\n n_iter=n_iter,\n l2=l2,\n use_cuda=CUDA,\n random_state=random_state)\n else:\n raise ValueError('Unknown model type {}'.format(space.get('type', 'NA')))\n\n start = time.clock()\n try:\n model.fit(train, verbose=True)\n except ValueError:\n elapsed = time.clock() - start\n return {'loss': 0.0,\n 'status': STATUS_FAIL,\n 'validation_mrr': 0.0,\n 'test_mrr': 0.0,\n 'elapsed': elapsed,\n 'hyper': space}\n elapsed = time.clock() - start\n print(model)\n\n validation_mrr = sequence_mrr_score(\n model,\n valid,\n exclude_preceding=True\n ).mean()\n test_mrr = sequence_mrr_score(\n model,\n test,\n exclude_preceding=True\n ).mean()\n\n print('MRR {} {}'.format(validation_mrr, test_mrr))\n\n if np.isnan(validation_mrr):\n status = STATUS_FAIL\n else:\n status = STATUS_OK\n\n return {'loss': -validation_mrr,\n 'status': status,\n 'validation_mrr': validation_mrr,\n 'test_mrr': test_mrr,\n 'elapsed': elapsed,\n 'hyper': space}\n return objective\n\n\ndef optimize(objective, space, trials_fname=None, max_evals=5):\n\n if trials_fname is not None and os.path.exists(trials_fname):\n with open(trials_fname, 'rb') as trials_file:\n trials = pickle.load(trials_file)\n else:\n trials = Trials()\n\n fmin(objective,\n space=space,\n algo=hyperopt.tpe.suggest,\n trials=trials,\n max_evals=max_evals)\n\n if trials_fname is not None:\n temporary = '{}.temp'.format(trials_fname)\n with open(temporary, 'wb') as trials_file:\n pickle.dump(trials, trials_file)\n shutil.move(temporary, trials_fname)\n\n return trials\n\n\ndef summarize_trials(trials):\n results = trials.trials\n model_type = results[0]['result']['hyper']['type']\n\n results = sorted(results, key=lambda x: -x['result']['validation_mrr'])\n\n if results:\n print('Best {}: {}'.format(model_type, results[0]['result']))\n\n results = sorted(results, key=lambda x: -x['result']['test_mrr'])\n\n if results:\n print('Best test {}: {}'.format(model_type, results[0]['result']))\n\n\ndef main(args):\n status = 'available' if CUDA else 'not available'\n print(\"CUDA is {}!\".format(status))\n args = parse_args(args)\n\n # Fix random_state\n seed = 72\n set_seed(seed)\n random_state = np.random.RandomState(seed)\n\n max_sequence_length = 100\n min_sequence_length = 20\n step_size = max_sequence_length\n\n if args.dataset == 'amazon':\n max_sequence_length = 50\n min_sequence_length = 5\n step_size = max_sequence_length\n dataset = get_amazon_dataset()\n elif args.dataset == 'goodbooks':\n dataset = get_goodbooks_dataset()\n else:\n dataset = get_movielens_dataset(args.dataset.upper())\n\n args.variant = args.dataset\n train, rest = user_based_train_test_split(\n dataset,\n test_percentage=0.2,\n random_state=random_state)\n test, valid = user_based_train_test_split(\n rest,\n test_percentage=0.5,\n random_state=random_state)\n train = train.to_sequence(\n max_sequence_length=max_sequence_length,\n min_sequence_length=min_sequence_length,\n step_size=step_size)\n test = test.to_sequence(\n max_sequence_length=max_sequence_length,\n min_sequence_length=min_sequence_length,\n step_size=step_size)\n valid = valid.to_sequence(\n max_sequence_length=max_sequence_length,\n min_sequence_length=min_sequence_length,\n step_size=step_size)\n\n print('model: {}, data: {}'.format(args.model, train))\n\n fname = 'experiment_{}_{}.pickle'.format(args.model, args.dataset)\n objective = get_objective(train, valid, test, random_state)\n space = hyperparameter_space(args.model)\n\n for iteration in range(args.num_trials):\n print('Iteration {}'.format(iteration))\n trials = optimize(objective,\n space,\n trials_fname=fname,\n max_evals=iteration + 1)\n\n summarize_trials(trials)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "numpy.isnan", "numpy.random.RandomState", "torch.cuda.is_available" ] ]
lukaswenzl/High-Redshift-Quasars-with-Random-Forests
[ "edf67a0129277cfa17392a0808498404d8a26c6a" ]
[ "result_analysis/kstar_sample.py" ]
[ "import numpy as np\nimport pandas as pd\n\ndata = pd.read_csv(\"../data/results/full14.csv\")\nkstars = data.query(\"pred_class == 'K'\")\n\nkstars = kstars.sample(n=1000000)\nkstars.to_csv(\"../data/analysis/kstars_1mio.csv\")\n" ]
[ [ "pandas.read_csv" ] ]
ShGanesh/Simple_Python_Calculator_with_GUI
[ "f81c0a96c96c51e6ac9bda38f7d9d9b3879481e1" ]
[ "graphing_calci.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport tkinter as tk\nfrom tkinter import *\nimport re\n\nreplacements = {\n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'exp': 'np.exp',\n 'sqrt': 'np.sqrt',\n '^': '**',\n}\n\nallowed_words = [\n 'x',\n 'sin',\n 'cos',\n 'sqrt',\n 'exp',\n]\n\n\ndef string2func(string): \t\t\t\t\t\t\t#evaluates the string and returns a function of x\n for word in re.findall('[a-zA-Z_]+', string):\t# find all words and check if all are allowed:\n if word not in allowed_words:\n raise ValueError(\n '\"{}\" is forbidden to use in math expression'.format(word)\n )\n\n for old, new in replacements.items():\n string = string.replace(old, new)\n\n def func(x):\n return eval(string)\n\n return func\n\n\ndef printGraph(e):\n\tplt.clf()\n\tfunc = string2func(e)\n\ta = float(0)\n\tb = float(100)\n\tx = np.linspace(a, b, 250)\n\n\tplt.plot(x, func(x))\n\tplt.xlim(a, b)\n\tplt.show()\n\n\ndef printInput():\n\tip=eqn.get()\n\teqn.set=(\"\")\n\tprintGraph(ip) \n\n\nwindow=Tk()\neqn=tk.StringVar()\nlbl=Label(window, text=\"Type your Equation\", fg='red', font=(\"Helvetica\", 16))\nlbl.place(x=50, y=40)\nlbl2=Label(window, text=\"y = \", fg='black', font=(\"Helvetica\", 12))\nlbl2.place(x=27, y=85)\ntxtfld=Entry(window, text=\"This is Entry Widget\", textvariable=eqn, bd=6)\ntxtfld.place(x=60, y=80)\nbtn=Button(window, text=\"Plot\", fg='blue', command=printInput)\nbtn.pack()\nbtn.place(x=120, y=120)\n\nwindow.title('GRAPHING CALCULATOR')\nwindow.geometry(\"300x180+10+10\")\nwindow.mainloop()\n\n\n" ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.show", "numpy.linspace" ] ]
charles-cowart/microsetta-private-api
[ "96d3c09c8e30e00c87ce82c5e7d16ba4eb93ce74" ]
[ "microsetta_private_api/repo/metadata_repo/_transforms.py" ]
[ "from ._constants import MISSING_VALUE, UNSPECIFIED\nfrom functools import reduce\nfrom operator import or_\nimport pandas as pd\nimport numpy as np\n\n\nWEIGHT_KG = 'weight_kg'\nWEIGHT_UNITS = 'weight_units'\nHEIGHT_CM = 'height_cm'\nHEIGHT_UNITS = 'height_units'\nBIRTH_YEAR = 'birth_year'\nBIRTH_MONTH = 'birth_month'\nCOLLECTION_TIMESTAMP = 'collection_timestamp'\nBMI_ = 'bmi'\nAGE_YEARS = 'age_years'\nAGE_CAT = 'age_cat'\nBMI_CAT = 'bmi_cat'\nINCHES = 'inches'\nCENTIMETERS = 'centimeters'\nPOUNDS = 'pounds'\nKILOGRAMS = 'kilograms'\nALCOHOL_CONSUMPTION = 'alcohol_consumption'\nALCOHOL_FREQUENCY = 'alcohol_frequency'\nSEX = 'sex'\nGENDER = 'gender'\nANONYMIZED_NAME = 'anonymized_name'\nSAMPLE_NAME = 'sample_name'\n\n\nclass Transformer:\n REQUIRED_COLUMNS = None\n COLUMN_NAME = None\n EXISTING_UNITS_COL_UPDATE = None\n\n @classmethod\n def satisfies_requirements(cls, df):\n return cls.REQUIRED_COLUMNS.issubset(set(df.columns))\n\n @classmethod\n def apply(cls, df):\n return cls._transform(df).fillna(UNSPECIFIED)\n\n @classmethod\n def _transform(cls, df):\n raise NotImplementedError\n\n @staticmethod\n def not_null_map(*args):\n # args are expected to be pd.Series instances\n\n # reduce(or_, ...) acts as an elementwise logical OR\n\n # ~(...) performs an elementwise logical NOT\n return ~(reduce(or_, map(pd.isnull, args)))\n\n @classmethod\n def basis(cls, index):\n return pd.Series([None] * len(index), index=index,\n name=cls.COLUMN_NAME)\n\n\nclass BMI(Transformer):\n REQUIRED_COLUMNS = frozenset([WEIGHT_KG, HEIGHT_CM])\n COLUMN_NAME = BMI_\n\n @classmethod\n def _transform(cls, df):\n # weight in kilograms / (height in meters)^2\n weight = pd.to_numeric(df[WEIGHT_KG], errors='coerce')\n height = pd.to_numeric(df[HEIGHT_CM], errors='coerce')\n height /= 100 # covert to meters\n height *= height #\n\n not_null_map = cls.not_null_map(weight, height)\n\n series = cls.basis(df.index)\n series.loc[not_null_map] = \\\n (weight.loc[not_null_map] / height.loc[not_null_map]).round(1)\n\n return series\n\n\nclass AgeYears(Transformer):\n REQUIRED_COLUMNS = frozenset([BIRTH_YEAR, BIRTH_MONTH,\n COLLECTION_TIMESTAMP])\n COLUMN_NAME = AGE_YEARS\n\n @classmethod\n def _transform(cls, df):\n def make_month_year(row):\n # TODO: this function can probably be greatly reduced\n # if only applied to positions which assessed to be not null\n # prior\n mo = row[BIRTH_MONTH]\n yr = row[BIRTH_YEAR]\n if pd.isnull(mo) or pd.isnull(yr):\n return '-'\n else:\n return '%s-%s' % (mo, yr)\n\n birth_month_year = df.apply(make_month_year, axis=1)\n birth_month_year = pd.to_datetime(birth_month_year, errors='coerce',\n format='%B-%Y')\n collection_timestamp = pd.to_datetime(df[COLLECTION_TIMESTAMP],\n errors='coerce')\n\n not_null_map = cls.not_null_map(birth_month_year, collection_timestamp)\n\n series = cls.basis(df.index)\n\n collection_timestamp = collection_timestamp[not_null_map]\n birth_month_year = birth_month_year[not_null_map]\n\n # compute timedelta64 types, and express as year\n td = collection_timestamp - birth_month_year\n td_as_year = (td / np.timedelta64(1, 'Y')).round(1)\n series.loc[not_null_map] = td_as_year\n\n return series\n\n\nclass AgeCat(Transformer):\n REQUIRED_COLUMNS = frozenset([AGE_YEARS, ])\n COLUMN_NAME = AGE_CAT\n\n @classmethod\n def _transform(cls, df):\n # these bounds are based on the values previously used for metadata\n # pulldown in labadmin\n bounds = [('baby', 0, 3),\n ('child', 3, 13),\n ('teen', 13, 20),\n ('20s', 20, 30),\n ('30s', 30, 40),\n ('40s', 40, 50),\n ('50s', 50, 60),\n ('60s', 60, 70),\n ('70+', 70, 123)]\n\n age_years = pd.to_numeric(df[AGE_YEARS], errors='coerce')\n age_cat = cls.basis(df.index)\n\n for label, lower, upper in bounds:\n # this bounds checking is consistent with that used for metadata\n # pulldown in labadmin where the lowerbound is inclusive and\n # upperbound is exclusive.\n positions = (age_years >= lower) & (age_years < upper)\n age_cat.loc[positions] = label\n\n return age_cat\n\n\nclass BMICat(Transformer):\n REQUIRED_COLUMNS = frozenset([BMI_, ])\n COLUMN_NAME = BMI_CAT\n\n @classmethod\n def _transform(cls, df):\n # these bounds are based on the values previously used for metadata\n # pulldown in labadmin\n bounds = [('Underweight', 8, 18.5),\n ('Normal', 18.5, 25),\n ('Overweight', 25, 30),\n ('Obese', 30, 80)]\n bmi = pd.to_numeric(df[BMI_], errors='coerce')\n bmi_cat = cls.basis(df.index)\n\n for label, lower, upper in bounds:\n # this bounds checking is consistent with that used for metadata\n # pulldown in labadmin where the lowerbound is inclusive and\n # upperbound is exclusive.\n positions = (bmi >= lower) & (bmi < upper)\n bmi_cat.loc[positions] = label\n\n return bmi_cat\n\n\nclass AlcoholConsumption(Transformer):\n REQUIRED_COLUMNS = frozenset([ALCOHOL_FREQUENCY, ])\n COLUMN_NAME = ALCOHOL_CONSUMPTION\n\n @classmethod\n def _transform(cls, df):\n mapping = {'Rarely (a few times/month)': 'Yes',\n 'Occasionally (1-2 times/week)': 'Yes',\n 'Regularly (3-5 times/week)': 'Yes',\n 'Daily': 'Yes',\n 'Never': 'No',\n 'Unspecified': UNSPECIFIED,\n MISSING_VALUE: MISSING_VALUE}\n\n # using value_counts() here as it drops NA by default whereas\n # unique() retains NA values.\n observed_values = set(df[ALCOHOL_FREQUENCY].value_counts().index)\n if not observed_values.issubset(mapping):\n raise KeyError(\"Unexpected values present in column %s: %s\" %\n (ALCOHOL_FREQUENCY, observed_values - set(mapping)))\n\n series = df[ALCOHOL_FREQUENCY].replace(mapping, inplace=False)\n series.name = cls.COLUMN_NAME\n return series\n\n\nclass Sex(Transformer):\n # The existing pulldown code cast entries of GENDER to lowercase, and\n # stored them within the SEX variable. Adding here for consistency\n # with existing metadata in Qiita.\n REQUIRED_COLUMNS = frozenset([GENDER, ])\n COLUMN_NAME = SEX\n\n @classmethod\n def _transform(cls, df):\n mapping = {'Female': 'female',\n 'Male': 'male',\n 'Other': 'other',\n\n # Lower case is not ideal here, however that's what is\n # presently in Qiita\n 'Unspecified': 'unspecified',\n MISSING_VALUE: MISSING_VALUE}\n\n observed_values = set(df[GENDER].value_counts().index)\n if not observed_values.issubset(mapping):\n raise KeyError(\"Unexpected values present in column %s: %s\" %\n (GENDER, observed_values - set(mapping)))\n\n series = df[GENDER].replace(mapping, inplace=False)\n series.name = cls.COLUMN_NAME\n return series\n\n\ndef _normalizer(df, focus_col, units_col, units_value, factor):\n # get our columns\n focus = pd.to_numeric(df[focus_col], errors='coerce')\n units = df[units_col]\n\n # operate on a copy as to retain non-unit focus values (e.g., values\n # already expressed as centimeters\n result = focus.copy()\n\n # anything negative is weird so kill it\n result[result < 0] = None\n\n # figure out what positions are safe to operate on\n not_null_map = Transformer.not_null_map(result, units)\n\n # take entries like where either focus or units are null and kill them\n result.loc[not_null_map[~not_null_map].index] = None\n\n # reduce to only those safe to operate on\n focus_not_null = result[not_null_map]\n units_not_null = units[not_null_map]\n focus_adj = focus_not_null.loc[units_not_null == units_value]\n\n # adjust the indices that need adjustment\n result.loc[focus_adj.index] = focus_adj * factor\n\n return result\n\n\nclass _Normalize(Transformer):\n FOCUS_COL = None\n FOCUS_UNITS = None\n UNITS_COL = None\n FACTOR = None\n\n @classmethod\n def _transform(cls, df):\n return _normalizer(df, cls.FOCUS_COL, cls.UNITS_COL, cls.FOCUS_UNITS,\n cls.FACTOR)\n\n\nclass NormalizeHeight(_Normalize):\n REQUIRED_COLUMNS = frozenset([HEIGHT_UNITS, HEIGHT_CM])\n COLUMN_NAME = HEIGHT_CM\n\n # after normalization to centimeters, we need to also update the\n # height_units column to reflect these values are now centimeters\n EXISTING_UNITS_COL_UPDATE = (HEIGHT_UNITS, CENTIMETERS)\n FOCUS_COL = HEIGHT_CM\n UNITS_COL = HEIGHT_UNITS\n FOCUS_UNITS = INCHES\n FACTOR = 2.54\n\n\nclass NormalizeWeight(_Normalize):\n REQUIRED_COLUMNS = frozenset([WEIGHT_UNITS, WEIGHT_KG])\n COLUMN_NAME = WEIGHT_KG\n\n # after normalization to kilograms, we need to also update the\n # weight_units column to reflect these values are now in kg\n EXISTING_UNITS_COL_UPDATE = (WEIGHT_UNITS, KILOGRAMS)\n FOCUS_COL = WEIGHT_KG\n FOCUS_UNITS = POUNDS\n UNITS_COL = WEIGHT_UNITS\n FACTOR = (1 / 2.20462)\n\n\n# transforms are order dependent as some entries (e.g., BMICat) depend\n# on the presence of a BMI column\nHUMAN_TRANSFORMS = (AgeYears, AgeCat, NormalizeWeight, NormalizeHeight,\n BMI, BMICat, AlcoholConsumption, Sex)\n\n\ndef apply_transforms(df, transforms):\n for transform in transforms:\n if transform.satisfies_requirements(df):\n # note: not using df.apply here as casts are needed on a\n # case-by-case basis, and pandas is much more efficient\n # casting whole columns\n subset = df[transform.REQUIRED_COLUMNS]\n series = transform.apply(subset)\n\n # it is almost certainly the case that the input dataframe is\n # str already, so let's fall back. And these will be serialized\n # anyway w/o type information.\n series = series.astype(str)\n\n # NOTE: this operation can either change AN EXISTING column in the\n # DataFrame or ADD a column. Both are valid. Operations such as\n # the creation of \"age_years\" will CREATE a new column, whereas\n # the normalization of \"height_cm\" will MODIFY an existing one.\n df[transform.COLUMN_NAME] = series\n\n # update a an existing column if the transform needs to. An example\n # is with height, where once values are normalized to centimeters\n # within the height_cm column, we need to then also modify the\n # height_units column for all non-null height_cm entries as they\n # are assured to be centimeters.\n if transform.EXISTING_UNITS_COL_UPDATE is not None:\n column, value = transform.EXISTING_UNITS_COL_UPDATE\n df.loc[~df[transform.COLUMN_NAME].isnull(), column] = value\n\n return df\n" ]
[ [ "numpy.timedelta64", "pandas.to_datetime", "pandas.to_numeric", "pandas.isnull" ] ]
FablabHome/PCMS_home_robotics
[ "21202fb73811edfcbdfde204ba33fb8bd4360d4b", "21202fb73811edfcbdfde204ba33fb8bd4360d4b" ]
[ "RCJ_pcms_base/scripts/tests/PersonFollower/DrawWaypoint.py", "RCJ_pcms_base/scripts/GenderDetectionNode.py" ]
[ "import cv2 as cv\nimport numpy as np\nimport rospy\nfrom home_robot_msgs.msg import PFWaypoints\n\n\ndef callback(wpts: PFWaypoints):\n global waypoints\n waypoints = wpts.waypoints\n\n\nrospy.init_node('draw_waypoint')\nrate = rospy.Rate(30)\n\nwaypoints = []\ncm2pixel_ratio = 1 / 10\n\nrospy.Subscriber(\n '/waypoint_recorder/waypoints',\n PFWaypoints,\n callback,\n queue_size=1\n)\n\nwhile not rospy.is_shutdown():\n board = np.zeros((480, 640, 3))\n for waypoint in waypoints:\n x = waypoint.x\n y = waypoint.y\n z = waypoint.z\n\n wpt_x = int(x * cm2pixel_ratio + 320)\n wpt_y = int(z * cm2pixel_ratio + 240)\n\n cv.circle(board, (wpt_x, wpt_y), 5, (32, 255, 0), -1)\n\n cv.imshow('map', board)\n key = cv.waitKey(1) & 0xFF\n if key in [27, ord('q')]:\n break\n\n rate.sleep()\n\ncv.destroyAllWindows()\n", "#!/usr/bin/env python3\n\"\"\"\nMIT License\n\nCopyright (c) 2020 rootadminWalker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\n\nimport cv2 as cv\nimport numpy as np\nimport rospy\nfrom cv_bridge import CvBridge\nfrom home_robot_msgs.msg import ObjectBoxes\n\n\nclass GenderDetectionNode:\n def __init__(self, age_gender: cv.dnn_Net):\n self.ag_recognizer = age_gender\n\n self.result_pub = rospy.Publisher(\n '~details',\n ObjectBoxes,\n queue_size=1\n )\n\n rospy.Subscriber(\n '/FD/faces',\n ObjectBoxes,\n self.callback,\n queue_size=1\n )\n\n self.bridge = CvBridge()\n self.detected_faces = None\n self.main()\n\n def callback(self, faces: ObjectBoxes):\n self.detected_faces = faces\n\n def main(self):\n while not rospy.is_shutdown():\n faces = self.detected_faces\n if faces is None:\n continue\n\n frame = self.bridge.compressed_imgmsg_to_cv2(faces.source_img)\n for face in faces.boxes:\n face_roi = self.bridge.compressed_imgmsg_to_cv2(face.source_img)\n blob = cv.dnn.blobFromImage(\n face_roi,\n size=(62, 62),\n scalefactor=1.0,\n mean=(0, 0, 0),\n swapRB=False,\n crop=False\n )\n self.ag_recognizer.setInput(blob)\n p_age, p_gender = self.ag_recognizer.forward(['age_conv3', 'prob'])\n gender = np.argmax(p_gender)\n age = int(p_age[0][0][0][0] * 100)\n\n face.label = f'{gender}:{age}'\n\n # gender_txt = 'Male' if gender == 1 else 'Female'\n # color = (255, 100, 32) if gender == 1 else (32, 0, 255)\n #\n # age_txt = f'Looks like {age}'\n # cv.putText(frame, gender_txt, (face.x1, face.y1 - 20), cv.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n # cv.putText(frame, age_txt, (face.x1, face.y1 - 10), cv.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n # cv.rectangle(frame, (face.x1, face.y1), (face.x2, face.y2), color, 2)\n\n # cv.imshow('frame', frame)\n # key = cv.waitKey(1) & 0xFF\n # if key in [27, ord('q')]:\n # break\n self.result_pub.publish(faces)\n\n\nif __name__ == '__main__':\n rospy.init_node('GAD')\n bin_path = rospy.get_param('~bin_path')\n xml_path = rospy.get_param('~xml_path')\n gender_age = cv.dnn.readNet(bin_path, xml_path)\n node = GenderDetectionNode(gender_age)\n" ]
[ [ "numpy.zeros" ], [ "numpy.argmax" ] ]
vladvelici/dbxlogger
[ "75fc34d5fe7479727c37eb22e80cdd4e5917bb6b" ]
[ "examples/torchbearer/cifar10.py" ]
[ "import argparse\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.optim as optim\nfrom torchvision import transforms\n\nimport torchbearer\nfrom torchbearer import Trial\nfrom torchbearer.cv_utils import DatasetValidationSplitter\n\nimport dbxlogger as dbx\nfrom dbxlogger.integrations.torchbearer_print import DbxCallbackPrint\nfrom dbxlogger.integrations.torchbearer import DbxCallback\n\n\nclass SimpleModel(nn.Module):\n def __init__(self):\n super(SimpleModel, self).__init__()\n self.convs = nn.Sequential(\n nn.Conv2d(3, 16, stride=2, kernel_size=3),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.Conv2d(16, 32, stride=2, kernel_size=3),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 64, stride=2, kernel_size=3),\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n\n self.classifier = nn.Linear(576, 10)\n\n def forward(self, x):\n x = self.convs(x)\n x = x.view(-1, 576)\n return self.classifier(x)\n\n\ndef datasets(batch_size, data_path):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n dataset = torchvision.datasets.CIFAR10(root=data_path, train=True, download=True,\n transform=transforms.Compose([transforms.ToTensor(), normalize]))\n\n dataset.targets = dataset.targets[:96]\n dataset.data = dataset.data[:96, :, :, :]\n\n splitter = DatasetValidationSplitter(len(dataset), 0.1)\n trainset = splitter.get_train_dataset(dataset)\n valset = splitter.get_val_dataset(dataset)\n\n traingen = torch.utils.data.DataLoader(trainset, pin_memory=True, batch_size=batch_size, shuffle=True, num_workers=10)\n valgen = torch.utils.data.DataLoader(valset, pin_memory=True, batch_size=batch_size, shuffle=True, num_workers=10)\n\n\n testset = torchvision.datasets.CIFAR10(root=data_path, train=False, download=True,\n transform=transforms.Compose([transforms.ToTensor(), normalize]))\n testgen = torch.utils.data.DataLoader(testset, pin_memory=True, batch_size=batch_size, shuffle=False, num_workers=10)\n\n return traingen, valgen, testgen\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-bs\", type=int, help=\"batch size\", default=128)\n parser.add_argument(\"-lr\", type=float, help=\"learning rate\", default=0.001)\n parser.add_argument(\"-epochs\", type=int, help=\"num epochs\", default=5)\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument(\"--data-path\", type=str, default=\"./data\")\n\n dbx.add_arguments_to(parser)\n args = parser.parse_args()\n\n model = SimpleModel()\n traingen, valgen, testgen = datasets(args.bs, args.data_path)\n\n device = 'cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu'\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n loss = nn.CrossEntropyLoss()\n\n exp = dbx.exp_from_args(\n args,\n kind=\"torchbearer-cifar\",\n args_to_ignore=[\"no_cuda\", \"data_path\"],\n env=False)\n exp.save()\n exp.print_info()\n\n callbacks = [DbxCallback(exp)]\n\n trial = Trial(model, optimizer, loss, metrics=['acc', 'loss'], callbacks=callbacks).to(device)\n trial.with_generators(train_generator=traingen, val_generator=valgen, test_generator=testgen)\n history = trial.run(epochs=args.epochs, verbose=1)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
Daffan/APPLR
[ "ceb394cd337b4b4ccb3915d05d1f42fd317c5c8e" ]
[ "continuous/net.py" ]
[ "import torch\nfrom torch import nn\nfrom tianshou.data import to_torch\nimport numpy as np\n\nclass Net(nn.Module):\n def __init__(self, num_layers=2, state_shape=728, action_shape=0, concat=False, hidden_layer_size=512, device=\"cpu\"):\n super().__init__()\n self.device = device\n state_shape = 728\n self.block1 = nn.Sequential(\n nn.Conv1d(in_channels=1, out_channels=64, kernel_size=7, stride=3),\n nn.ReLU(), nn.BatchNorm1d(64), nn.MaxPool1d(kernel_size = 3)\n )\n self.block2 = nn.Sequential(\n nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),\n nn.ReLU(), nn.BatchNorm1d(64), \n nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),\n nn.BatchNorm1d(64)\n )\n self.block3 = nn.Sequential(\n nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),\n nn.ReLU(), nn.BatchNorm1d(64), \n nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),\n nn.BatchNorm1d(64)\n )\n\n feature_shape = 967 + np.prod(action_shape)\n layers = [feature_shape] + [hidden_layer_size]*num_layers\n self.last_linear = []\n for i, o in zip(layers[:-1], layers[1:]):\n print(i, o)\n self.last_linear.append(nn.Linear(i, o))\n self.last_linear.append(nn.ReLU(inplace=True))\n self.last_linear = nn.Sequential(*self.last_linear)\n\n def forward(self, obs, state=None, info={}):\n obs = to_torch(obs, device=self.device, dtype=torch.float32)\n obs = obs.reshape(obs.size(0), -1)\n\n batch_size = obs.shape[0]\n laser = obs.view(batch_size, 1, -1)[:,:,:721]\n params = obs.view(batch_size, -1)[:,721:]\n\n embedding1 = self.block1(laser)\n embedding2 = self.block2(embedding1)\n \n y = torch.cat((embedding1, embedding2), dim = 2)\n y = nn.ReLU()(y)\n # embedding3 = self.block3(y)\n\n # y = torch.cat((embedding2, embedding3), dim = 2)\n y = nn.ReLU()(y)\n y = nn.AvgPool1d(10)(y)\n y = y.view(batch_size, -1)\n\n feature = torch.cat((y, params), dim = 1)\n feature = self.last_linear(feature)\n \n return feature, state\n\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.cat", "torch.nn.MaxPool1d", "torch.nn.Linear", "numpy.prod", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.AvgPool1d" ] ]
18150167970/knowledge-distillation
[ "90989b6b4a696645841b892d9a0e0019d186d642" ]
[ "model/faster_rcnn.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport torch as t\nimport numpy as np\nimport cupy as cp\nfrom utils import array_tool as at\nfrom model.utils.bbox_tools import loc2bbox\nfrom model.utils.nms import non_maximum_suppression\n\nfrom torch import nn\nfrom data.dataset import preprocess\nfrom torch.nn import functional as F\nfrom utils.config import opt\n\n\ndef nograd(f):\n def new_f(*args, **kwargs):\n with t.no_grad():\n return f(*args, **kwargs)\n return new_f\n\n\nclass FasterRCNN(nn.Module):\n \"\"\"Base class for Faster R-CNN.\n\n This is a base class for Faster R-CNN links supporting object detection\n API [#]_. The following three stages constitute Faster R-CNN.\n\n 1. **Feature extraction**: Images are taken and their \\\n feature maps are calculated.\n 2. **Region Proposal Networks**: Given the feature maps calculated in \\\n the previous stage, produce set of RoIs around objects.\n 3. **Localization and Classification Heads**: Using feature maps that \\\n belong to the proposed RoIs, classify the categories of the objects \\\n in the RoIs and improve localizations.\n\n Each stage is carried out by one of the callable\n :class:`torch.nn.Module` objects :obj:`feature`, :obj:`rpn` and :obj:`head`.\n\n There are two functions :meth:`predict` and :meth:`__call__` to conduct\n object detection.\n :meth:`predict` takes images and returns bounding boxes that are converted\n to image coordinates. This will be useful for a scenario when\n Faster R-CNN is treated as a black box function, for instance.\n :meth:`__call__` is provided for a scnerario when intermediate outputs\n are needed, for instance, for training and debugging.\n\n Links that support obejct detection API have method :meth:`predict` with\n the same interface. Please refer to :meth:`predict` for\n further details.\n\n .. [#] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \\\n Faster R-CNN: Towards Real-Time Object Detection with \\\n Region Proposal Networks. NIPS 2015.\n\n Args:\n extractor (nn.Module): A module that takes a BCHW image\n array and returns feature maps.\n rpn (nn.Module): A module that has the same interface as\n :class:`model.region_proposal_network.RegionProposalNetwork`.\n Please refer to the documentation found there.\n head (nn.Module): A module that takes\n a BCHW variable, RoIs and batch indices for RoIs. This returns class\n dependent localization paramters and class scores.\n loc_normalize_mean (tuple of four floats): Mean values of\n localization estimates.\n loc_normalize_std (tupler of four floats): Standard deviation\n of localization estimates.\n\n \"\"\"\n\n def __init__(self, extractor, rpn, head,\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.1, 0.1, 0.2, 0.2)\n ):\n super(FasterRCNN, self).__init__()\n self.extractor = extractor\n self.rpn = rpn\n self.head = head\n\n # mean and std\n self.loc_normalize_mean = loc_normalize_mean\n self.loc_normalize_std = loc_normalize_std\n self.use_preset('evaluate')\n # self.conv = nn.Conv2d(512, 1, 1)\n\n @property\n def n_class(self):\n # Total number of classes including the background.\n return self.head.n_class\n\n def forward(self, x, scale=1.):\n \"\"\"Forward Faster R-CNN.\n\n Scaling paramter :obj:`scale` is used by RPN to determine the\n threshold to select small objects, which are going to be\n rejected irrespective of their confidence scores.\n\n Here are notations used.\n\n * :math:`N` is the number of batch size\n * :math:`R'` is the total number of RoIs produced across batches. \\\n Given :math:`R_i` proposed RoIs from the :math:`i` th image, \\\n :math:`R' = \\\\sum _{i=1} ^ N R_i`.\n * :math:`L` is the number of classes excluding the background.\n\n Classes are ordered by the background, the first class, ..., and\n the :math:`L` th class.\n\n Args:\n x (autograd.Variable): 4D image variable.\n scale (float): Amount of scaling applied to the raw image\n during preprocessing.\n\n Returns:\n Variable, Variable, array, array:\n Returns tuple of four values listed below.\n\n * **roi_cls_locs**: Offsets and scalings for the proposed RoIs. \\\n Its shape is :math:`(R', (L + 1) \\\\times 4)`.\n * **roi_scores**: Class predictions for the proposed RoIs. \\\n Its shape is :math:`(R', L + 1)`.\n * **rois**: RoIs proposed by RPN. Its shape is \\\n :math:`(R', 4)`.\n * **roi_indices**: Batch indices of RoIs. Its shape is \\\n :math:`(R',)`.\n\n \"\"\"\n img_size = x.shape[2:]\n\n h = self.extractor(x)\n rpn_locs, rpn_scores, rois, roi_indices, anchor = \\\n self.rpn(h, img_size, scale)\n roi_cls_locs, roi_scores = self.head(\n h, rois, roi_indices)\n return roi_cls_locs, roi_scores, rois, roi_indices\n\n def use_preset(self, preset):\n \"\"\"Use the given preset during prediction.\n\n This method changes values of :obj:`self.nms_thresh` and\n :obj:`self.score_thresh`. These values are a threshold value\n used for non maximum suppression and a threshold value\n to discard low confidence proposals in :meth:`predict`,\n respectively.\n\n If the attributes need to be changed to something\n other than the values provided in the presets, please modify\n them by directly accessing the public attributes.\n\n Args:\n preset ({'visualize', 'evaluate'): A string to determine the\n preset to use.\n\n \"\"\"\n if preset == 'visualize':\n self.nms_thresh = 0.3\n self.score_thresh = 0.7\n elif preset == 'evaluate':\n self.nms_thresh = 0.3\n self.score_thresh = opt.predict_socre\n else:\n raise ValueError('preset must be visualize or evaluate')\n\n def _suppress(self, raw_cls_bbox, raw_prob):\n bbox = list()\n label = list()\n score = list()\n # skip cls_id = 0 because it is the background class\n for l in range(1, self.n_class):\n cls_bbox_l = raw_cls_bbox.reshape((-1, self.n_class, 4))[:, l, :]\n prob_l = raw_prob[:, l]\n mask = prob_l > self.score_thresh\n cls_bbox_l = cls_bbox_l[mask]\n prob_l = prob_l[mask]\n # print cp.array(cls_bbox_l).shape\n keep = non_maximum_suppression(\n cp.array(cls_bbox_l), self.nms_thresh, prob_l)\n keep = cp.asnumpy(keep)\n bbox.append(cls_bbox_l[keep])\n # The labels are in [0, self.n_class - 2].\n label.append((l - 1) * np.ones((len(keep),)))\n score.append(prob_l[keep])\n bbox = np.concatenate(bbox, axis=0).astype(np.float32)\n label = np.concatenate(label, axis=0).astype(np.int32)\n score = np.concatenate(score, axis=0).astype(np.float32)\n return bbox, label, score\n\n @nograd\n def predict(self, imgs, sizes=None, visualize=False):\n \"\"\"Detect objects from images.\n\n This method predicts objects for each image.\n\n Args:\n imgs (iterable of numpy.ndarray): Arrays holding images.\n All images are in CHW and RGB format\n and the range of their value is :math:`[0, 255]`.\n\n Returns:\n tuple of lists:\n This method returns a tuple of three lists,\n :obj:`(bboxes, labels, scores)`.\n\n * **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \\\n where :math:`R` is the number of bounding boxes in a image. \\\n Each bouding box is organized by \\\n :math:`(y_{min}, x_{min}, y_{max}, x_{max})` \\\n in the second axis.\n * **labels** : A list of integer arrays of shape :math:`(R,)`. \\\n Each value indicates the class of the bounding box. \\\n Values are in range :math:`[0, L - 1]`, where :math:`L` is the \\\n number of the foreground classes.\n * **scores** : A list of float arrays of shape :math:`(R,)`. \\\n Each value indicates how confident the prediction is.\n\n \"\"\"\n self.eval()\n if visualize:\n self.use_preset('visualize')\n prepared_imgs = list()\n sizes = list()\n for img in imgs:\n size = img.shape[1:]\n img = preprocess(at.tonumpy(img))\n prepared_imgs.append(img)\n sizes.append(size)\n else:\n prepared_imgs = imgs\n bboxes = list()\n labels = list()\n scores = list()\n feature = list()\n for img, size in zip(prepared_imgs, sizes):\n\n img = at.totensor(img[None]).float()\n scale = img.shape[3] / size[1]\n roi_cls_loc, roi_scores, rois, _ = self(img, scale=scale)\n # We are assuming that batch size is 1.\n roi_score = roi_scores.data\n\n roi_cls_loc = roi_cls_loc.data\n roi = at.totensor(rois) / scale\n\n # Convert predictions to bounding boxes in image coordinates.\n # Bounding boxes are scaled to the scale of the input images.\n mean = t.Tensor(self.loc_normalize_mean).cuda(). \\\n repeat(self.n_class)[None]\n std = t.Tensor(self.loc_normalize_std).cuda(). \\\n repeat(self.n_class)[None]\n\n roi_cls_loc = (roi_cls_loc * std + mean)\n roi_cls_loc = roi_cls_loc.view(-1, self.n_class, 4)\n roi = roi.view(-1, 1, 4).expand_as(roi_cls_loc)\n cls_bbox = loc2bbox(at.tonumpy(roi).reshape((-1, 4)),\n at.tonumpy(roi_cls_loc).reshape((-1, 4)))\n cls_bbox = at.totensor(cls_bbox)\n cls_bbox = cls_bbox.view(-1, self.n_class * 4)\n # clip bounding box\n cls_bbox[:, 0::2] = (cls_bbox[:, 0::2]).clamp(min=0, max=size[0])\n cls_bbox[:, 1::2] = (cls_bbox[:, 1::2]).clamp(min=0, max=size[1])\n\n prob = at.tonumpy(F.softmax(at.totensor(roi_score), dim=1))\n\n raw_cls_bbox = at.tonumpy(cls_bbox)\n raw_prob = at.tonumpy(prob)\n\n bbox, label, score = self._suppress(raw_cls_bbox, raw_prob)\n bboxes.append(bbox)\n labels.append(label)\n scores.append(score)\n h = self.extractor(img)\n # h = self.conv(h)\n feature.append(h)\n self.use_preset('evaluate')\n self.train()\n return bboxes, labels, scores, h\n\n # 梯度下降规则\n def get_optimizer(self):\n \"\"\"\n return optimizer, It could be overwriten if you want to specify\n special optimizer\n \"\"\"\n lr = opt.lr\n params = []\n for key, value in dict(self.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params': [value],\n 'lr': lr * 2, 'weight_decay': 0}]\n else:\n params += [{'params': [value], 'lr': lr,\n 'weight_decay': opt.weight_decay}]\n if opt.use_adam:\n self.optimizer = t.optim.Adam(params)\n else:\n self.optimizer = t.optim.SGD(params, momentum=0.9)\n return self.optimizer\n\n def scale_lr(self, decay=0.1):\n for param_group in self.optimizer.param_groups:\n param_group['lr'] *= decay\n return self.optimizer\n" ]
[ [ "torch.optim.Adam", "torch.Tensor", "numpy.concatenate", "torch.no_grad", "torch.optim.SGD" ] ]
gustavo1046/WebCrawler---Python
[ "9180cc111629e9bda3ddcb309b2ff94a77822425" ]
[ "mining.py" ]
[ "\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nfrom time import sleep\r\nfrom selenium import webdriver\r\n\r\nurl = 'https://www.usquidditch.org/teams'\r\n\r\n\r\n#open selenium - use your own info for PATH\r\nPATH = (r\"C:\\\\Users\\\\Gustavo\\\\Desktop\\\\scoutfy\\\\Team USA\\\\chromedriver.exe\")\r\n\r\ndriver = webdriver.Chrome(PATH)\r\ndriver.get(url)\r\ndriver.implicitly_wait(3)\r\n\r\n\r\n#wait for page loading\r\nsleep(2)\r\n#html got with selenium\r\nselenium_get = driver.page_source\r\n\r\nsoup = bs(selenium_get, 'html.parser')\r\n\r\nlink=[]\r\n\r\n#close selenium\r\ndriver.close() \r\n\r\nfor a in soup.findAll('a'):\r\n link.append(a['href'])\r\n\r\n#get every team website\r\nlink=link[115:346]\r\n#get every team's info from the main page\r\ninfo = soup.find('tbody').findAll('tr')\r\n\r\ndef find_acronym(s):\r\n #return only letters between ()\r\n return s[s.find(\"(\")+1:s.find(\")\")]\r\n\r\ndef find_class(soup):\r\n list =[]\r\n for el in range(230):\r\n element = (soup.find(\"table\", attrs = {\"class\" : \"teams-display\"}))\r\n list.append(element.find(\"tr\", attrs ={\"id\" : \"t\"+str(el)}).find_all(\"td\")[2].get_text())\r\n# find_class(soup)\r\n\r\ndef find_socialmedia(soup):\r\n refs = []\r\n listsocial = []\r\n acuts = (soup.find_all(\"a\"))\r\n for el in acuts:\r\n refs.append(el.get(\"href\"))\r\n listsocial.append(refs)\r\n return listsocial\r\n# find_socialmedia(soup)\r\n\r\n\r\ndef main():\r\n # this list will store every dictionary of each club\r\n my_list = []\r\n\r\n for i in range(len(info)):\r\n # This dict will store the information of each club\r\n my_dict = {}\r\n \r\n my_dict['Name'] = info[i].findAll('td')[0].text\r\n my_dict['City'] = info[i].findAll('td')[1].text.split(',')[0].strip()\r\n my_dict['State'] = info[i].findAll('td')[1].text.split(',')[1].strip()\r\n\r\n #here it's oppened every team's new page with another info\r\n \r\n url = 'https://www.usquidditch.org' + str(link[i])\r\n \r\n agent = {\"User-Agent\":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\r\n req = requests.get(url, headers=agent)\r\n soup = bs(req.content, features=\"lxml\") \r\n \r\n my_dict['Acronym'] = find_acronym(soup.find('h2').text)\r\n my_dict['social_media'] = find_socialmedia(soup.find(\"ul\", attrs ={\"class\" : \"team-social-media-links\"}))\r\n\r\n #this are all the constants that must be added\r\n my_dict['Category'] = 'Non Recognized'\r\n my_dict['Sport'] = 'Quidditch'\r\n my_dict['Country'] = 'United States of America'\r\n my_dict['Affilated'] = 'US Quidditch'\r\n my_dict['Recorded By'] = 'Gustavo Florindo Oliveira'\r\n my_dict['Recorded Date'] = '27/02/2022'\r\n\r\n my_list.append(my_dict)\r\n\r\n return my_list\r\n\r\nres = main()\r\n\r\ndf = pd.DataFrame(data=res)\r\ndf.to_excel('US_Quidditch.xlsx', index=False)" ]
[ [ "pandas.DataFrame" ] ]
Wangbaiyue007/nmt-multi30k-pytorch
[ "3b1724ef3a049a6c55cfb21217508195b8b653e6" ]
[ "utils.py" ]
[ "import copy\nimport torch\nfrom collections import Counter\nfrom torchtext.vocab import Vocab\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.nn.utils.rnn import pad_sequence\n\ndef train(model, train_iter, optimizer, loss_fn, device):\n # global steps\n model.train()\n losses = 0\n for (src, tgt) in train_iter:\n src = src.to(device)\n tgt = tgt.to(device)\n\n tgt_input = tgt[:-1, :]\n logits = model(src, tgt_input)\n tgt_out = tgt[1:, :]\n\n optimizer.zero_grad()\n \n loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))\n loss.backward()\n\n clip_grad_norm_(model.parameters(), max_norm=1)\n \n optimizer.step()\n # steps += 1\n# for param_group in optimizer.param_groups:\n# param_group['lr'] = lrate(steps)\n \n losses += loss.item()\n return losses / len(train_iter)\n\ndef evaluate(model, val_iter, loss_fn, device):\n model.eval()\n losses = 0\n for (src, tgt) in val_iter:\n src = src.to(device)\n tgt = tgt.to(device)\n\n tgt_input = tgt[:-1, :]\n logits = model(src, tgt_input)\n tgt_out = tgt[1:, :]\n\n loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))\n losses += loss.item()\n\n return losses / len(val_iter)\n\ndef build_vocab(vocab_pth, tokenizer, min_freq=1):\n count = Counter()\n with open(vocab_pth, mode='r', encoding=\"utf8\") as f:\n texts = f.readlines()\n for text in texts:\n count.update(tokenizer(text.lower().rstrip(\"\\n\")))\n\n return Vocab(count, min_freq=min_freq, specials=['<unk>', '<pad>', '<bos>', '<eos>'])\n \ndef data_process(filepaths, src_vocab, tgt_vocab, src_tokenizer, tgt_tokenizer):\n raw_de_iter = iter(open(filepaths[0], encoding=\"utf8\"))\n raw_en_iter = iter(open(filepaths[1], encoding=\"utf8\"))\n data = []\n for (raw_de, raw_en) in zip(raw_de_iter, raw_en_iter):\n de_tensor = torch.tensor([src_vocab[token] for token in src_tokenizer(raw_de.lower().rstrip(\"\\n\"))], dtype=torch.long)\n en_tensor = torch.tensor([tgt_vocab[token] for token in tgt_tokenizer(raw_en.lower().rstrip(\"\\n\"))], dtype=torch.long)\n data.append((de_tensor, en_tensor))\n\n return data\n\ndef get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX):\n def generate_batch(data_batch):\n de_batch, en_batch = [], []\n for (de_item, en_item) in data_batch:\n de_batch.append(torch.cat([torch.tensor([BOS_IDX]), de_item, torch.tensor([EOS_IDX])], dim=0))\n en_batch.append(torch.cat([torch.tensor([BOS_IDX]), en_item, torch.tensor([EOS_IDX])], dim=0))\n de_batch = pad_sequence(de_batch, padding_value=PAD_IDX)\n en_batch = pad_sequence(en_batch, padding_value=PAD_IDX)\n return de_batch, en_batch\n return generate_batch\n\ndef count_parameters(model):\n params = 0\n for param in model.parameters():\n if param.requires_grad:\n params += param.numel()\n\n return params\n\ndef generate_square_subsequent_mask(sz):\n mask = (torch.triu(torch.ones((sz, sz))) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol, end_symbol, device=('cuda' if torch.cuda.is_available() else 'cpu')):\n src = src.to(device)\n src_mask = src_mask.to(device)\n\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(device)\n for _ in range(max_len - 1):\n memory = memory.to(device)\n # memory_mask = torch.zeros(ys.shape[0], memory.shape[0]).to(device).type(torch.bool)\n tgt_mask = (generate_square_subsequent_mask(ys.size(0), device).type(torch.bool)).to(device)\n out = model.decode(ys, memory, tgt_mask)\n out = out.transpose(0, 1)\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.item()\n\n ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)\n if next_word == end_symbol:\n break\n\n return ys\n\ndef translate(model, src, src_vocab, tgt_vocab, src_tokenizer, BOS_IDX, EOS_IDX, device):\n model.eval()\n tokens = [BOS_IDX] + [src_vocab.stoi[tok] for tok in src_tokenizer(src)] + [EOS_IDX]\n num_tokens = len(tokens)\n src = (torch.LongTensor(tokens).reshape(num_tokens, 1))\n src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)\n tgt_tokens = greedy_decode(model,\n src,\n src_mask,\n max_len=num_tokens + 5,\n start_symbol=BOS_IDX,\n end_symbol=EOS_IDX,\n device=device).flatten()\n \n return \" \".join([tgt_vocab.itos[tok] for tok in tgt_tokens]).replace(\"<bos>\", \"\").replace(\"<eos>\", \"\")\n" ]
[ [ "torch.LongTensor", "torch.ones", "torch.max", "torch.zeros", "torch.nn.utils.rnn.pad_sequence", "torch.tensor", "torch.cuda.is_available" ] ]
razofz/scarf-1
[ "2c97ed6d9433ff95154ce8b26f382139a708207d" ]
[ "scarf/knn_utils.py" ]
[ "\"\"\"\nUtility functions for running the KNN algorithm.\n\"\"\"\nimport numpy as np\nfrom .writers import create_zarr_dataset\nfrom .ann import AnnStream\nfrom .utils import tqdmbar\nimport pandas as pd\nfrom scipy.sparse import csr_matrix, coo_matrix\nfrom typing import List\nfrom numba import jit\n\n\n__all__ = [\"self_query_knn\", \"smoothen_dists\", \"export_knn_to_mtx\", \"merge_graphs\"]\n\n\ndef self_query_knn(ann_obj: AnnStream, store, chunk_size: int, nthreads: int) -> float:\n \"\"\"\n Constructs KNN graph.\n\n Args:\n ann_obj ():\n store ():\n chunk_size ():\n nthreads (): Number of threads to use.\n\n Returns:\n None\n \"\"\"\n from threadpoolctl import threadpool_limits\n\n n_cells, n_neighbors = ann_obj.nCells, ann_obj.k\n z_knn = create_zarr_dataset(\n store, \"indices\", (chunk_size,), \"u8\", (n_cells, n_neighbors)\n )\n z_dist = create_zarr_dataset(\n store, \"distances\", (chunk_size,), \"f8\", (n_cells, n_neighbors)\n )\n nsample_start = 0\n tnm = 0 # Number of missed recall\n with threadpool_limits(limits=nthreads):\n for i in ann_obj.iter_blocks(msg=\"Saving KNN graph\"):\n nsample_end = nsample_start + i.shape[0]\n ki, kv, nm = ann_obj.transform_ann(\n ann_obj.reducer(i),\n k=n_neighbors,\n self_indices=np.arange(nsample_start, nsample_end),\n )\n z_knn[nsample_start:nsample_end, :] = ki\n z_dist[nsample_start:nsample_end, :] = kv\n nsample_start = nsample_end\n tnm += nm\n recall = ann_obj.data.shape[0] - tnm\n recall = 100 * recall / ann_obj.data.shape[0]\n return recall\n\n\ndef _is_umap_version_new():\n import umap\n from packaging import version\n\n if version.parse(umap.__version__) >= version.parse(\"0.5.0\"):\n return True\n else:\n return False\n\n\ndef smoothen_dists(store, z_idx, z_dist, lc: float, bw: float, chunk_size: int):\n \"\"\"\n Smoothens KNN distances.\n\n Args:\n store ():\n z_idx ():\n z_dist ():\n lc ():\n bw ():\n chunk_size ():\n\n Returns:\n None\n\n \"\"\"\n from umap.umap_ import smooth_knn_dist, compute_membership_strengths\n\n umap_is_latest = _is_umap_version_new()\n\n n_cells, n_neighbors = z_idx.shape\n zge = create_zarr_dataset(\n store, f\"edges\", (chunk_size,), (\"u8\", \"u8\"), (n_cells * n_neighbors, 2)\n )\n zgw = create_zarr_dataset(\n store, f\"weights\", (chunk_size,), \"f8\", (n_cells * n_neighbors,)\n )\n last_row = 0\n val_counts = 0\n null_idx = []\n global_min = 1\n for i in tqdmbar(range(0, n_cells, chunk_size), desc=\"Smoothening KNN distances\"):\n if i + chunk_size > n_cells:\n ki, kv = z_idx[i:n_cells, :], z_dist[i:n_cells, :]\n else:\n ki, kv = z_idx[i : i + chunk_size, :], z_dist[i : i + chunk_size, :]\n kv = kv.astype(np.float32, order=\"C\")\n sigmas, rhos = smooth_knn_dist(\n kv, k=n_neighbors, local_connectivity=lc, bandwidth=bw\n )\n if umap_is_latest:\n rows, cols, vals, _ = compute_membership_strengths(ki, kv, sigmas, rhos)\n else:\n rows, cols, vals = compute_membership_strengths(ki, kv, sigmas, rhos)\n rows = rows + last_row\n start = val_counts\n end = val_counts + len(rows)\n last_row = rows[-1] + 1\n val_counts += len(rows)\n zge[start:end, 0] = rows\n zge[start:end, 1] = cols\n zgw[start:end] = vals\n\n # Fixing edges with 0 weights\n # We are doing these steps here to have minimum operations outside\n # the scope of a progress bar\n nidx = vals == 0\n if nidx.sum() > 0:\n min_val = vals[~nidx].min()\n if min_val < global_min:\n global_min = min_val\n null_idx.extend(nidx)\n\n # The whole zarr array needs to copied, modified and written back.\n # Or is this assumption wrong?\n w = zgw[:]\n w[null_idx] = global_min\n zgw[:] = w\n return None\n\n\ndef export_knn_to_mtx(mtx: str, csr_graph, batch_size: int = 1000) -> None:\n \"\"\"\n Exports KNN matrix in Matrix Market format.\n\n Args:\n mtx:\n csr_graph:\n batch_size:\n\n Returns:\n None\n\n \"\"\"\n n_cells = csr_graph.shape[0]\n with open(mtx, \"w\") as h:\n h.write(\"%%MatrixMarket matrix coordinate real general\\n% Generated by Scarf\\n\")\n h.write(f\"{n_cells} {n_cells} {csr_graph.nnz}\\n\")\n s = 0\n for e in tqdmbar(\n range(batch_size, n_cells + batch_size, batch_size),\n desc=\"Saving KNN matrix in MTX format\",\n ):\n if e > n_cells:\n e = n_cells\n sg = csr_graph[s:e].tocoo()\n df = pd.DataFrame({\"row\": sg.row + s + 1, \"col\": sg.col + 1, \"d\": sg.data})\n df.to_csv(h, sep=\" \", header=False, index=False, mode=\"a\")\n s = e\n if s != n_cells:\n raise ValueError(\n \"ERROR: Internal loop count error in export_knn_to_mtx. Please report this bug\"\n )\n return None\n\n\n@jit(nopython=True)\ndef calc_snn(indices: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculates shared nearest neighbour between each node and its neighbour.\n\n Args:\n indices: KNN graph indices\n\n Returns: A numpy matrix of shape (n_cells, n neighbours)\n\n \"\"\"\n ncells, nk = indices.shape\n snn = np.zeros((ncells, nk))\n for i in range(ncells):\n for j in range(nk):\n k = indices[i][j]\n snn[i][j] = len(set(indices[i]).intersection(set(indices[k])))\n return snn / (nk - 1)\n\n\ndef weight_sort_indices(\n i: np.ndarray, w: np.ndarray, wn: np.ndarray, n: int\n) -> (np.ndarray, np.ndarray):\n \"\"\"\n Sort the array i and w based on values of wn. Only keep the top n values.\n\n Args:\n i: A 1D array of indices\n w: A 1D array of weights\n wn: A 1D array of weights. These weights are used for sorting\n n: Number of neighbours to retain.\n\n Returns: A tuple of two 1D arrays representing sorted and filtered\n indices and their corresponding weights\n\n \"\"\"\n\n idx = np.argsort(wn)[::-1]\n i = i[idx]\n w = w[idx]\n # Removing duplicate neighbours\n _, idx = np.unique(i, return_index=True)\n idx = sorted(idx)\n return i[idx][:n], w[idx][:n]\n\n\ndef merge_graphs(csr_mats: List[csr_matrix]) -> coo_matrix:\n \"\"\"\n Merge multiple graphs of same size and shape such that the merged graph have the same size and shape.\n Edge values are sorted based on their weight and the shared neighbours.\n\n Args:\n csr_mats: A list of two or more CSR matrices representing the graphs to be merged.\n\n Returns: A merged graph in CSR matrix form.\n The merged graph has same number of edges as each graph\n\n \"\"\"\n try:\n assert len(set([x.shape for x in csr_mats])) == 1\n except AssertionError:\n raise ValueError(\"ERROR: All graphs do not have the same shape.\")\n try:\n assert len(set([x.size for x in csr_mats])) == 1\n except AssertionError:\n raise ValueError(\"ERROR: All graphs do not have the same number of edges\")\n\n nk = csr_mats[0][0].indices.shape[0]\n snns = []\n for mat in tqdmbar(csr_mats, desc=\"Identifying SNNs in graphs\"):\n snns.append(\n calc_snn(mat.indices.reshape((mat.shape[0], mat[0].indices.shape[0])))\n )\n row, data = [], []\n for i in tqdmbar(range(csr_mats[0].shape[0]), desc=\"Merging graph edges\"):\n mi = np.hstack([mat[i].indices for mat in csr_mats])\n mwn = np.hstack([mat[i].data + snns[n][i] for n, mat in enumerate(csr_mats)])\n mw = np.hstack([mat[i].data for mat in csr_mats])\n mi, mw = weight_sort_indices(mi, mw, mwn, nk)\n row.extend(mi)\n data.extend(mw)\n s = csr_mats[0].shape\n col = np.repeat(range(s[0]), nk)\n return coo_matrix((data, (row, col)), shape=s)\n" ]
[ [ "numpy.hstack", "scipy.sparse.coo_matrix", "numpy.unique", "numpy.arange", "pandas.DataFrame", "numpy.argsort", "numpy.zeros" ] ]
cbrnr/gsoc2021
[ "b0663b74445681ad4e7d1bb0937488be1aa37b4b" ]
[ "prototypes/originals/plot_pyqtgraph.py" ]
[ "\"\"\"\nThis is a prototype of a Raw-Plot based on pyqtgraph.\nIt was originally created by Clemens Brunner (https://github.com/cbrnr).\n\"\"\"\nimport sys\nimport numpy as np\nfrom PyQt5.QtWidgets import QApplication\nimport pyqtgraph as pg\nfrom pyqtgraph import PlotCurveItem\nimport mne\n\n\nclass RawCurveItem(PlotCurveItem):\n def __init__(self, raw=None, *args, **kwargs):\n self._raw = raw\n self.limit = 1000 # maximum number of samples to be plotted\n super().__init__(*args, **kwargs)\n\n @property\n def raw(self):\n return self._raw\n\n @raw.setter\n def raw(self, value):\n self._raw = value\n self.viewRangeChanged()\n\n def viewRangeChanged(self):\n viewbox = self.getViewBox()\n if viewbox is None:\n return\n xrange = viewbox.viewRange()[0]\n start = max(0, int(xrange[0]))\n stop = min(len(self.raw), int(xrange[1] + 1))\n\n ds = int((stop - start) / self.limit) + 1\n print(ds)\n if ds == 1:\n visible = self.raw[start:stop]\n scale = 1\n else:\n # Here convert data into a down-sampled array suitable for visualizing.\n # Must do this piecewise to limit memory usage.\n samples = 1 + ((stop-start) // ds)\n visible = np.zeros(samples*2, dtype=self.raw.dtype)\n sourcePtr = start\n targetPtr = 0\n\n # read data in chunks of ~1M samples\n chunkSize = (1000000//ds) * ds\n while sourcePtr < stop-1:\n chunk = self.raw[sourcePtr:min(stop,sourcePtr+chunkSize)]\n sourcePtr += len(chunk)\n\n # reshape chunk to be integral multiple of ds\n chunk = chunk[:(len(chunk)//ds) * ds].reshape(len(chunk)//ds, ds)\n\n # compute max and min\n chunkMax = chunk.max(axis=1)\n chunkMin = chunk.min(axis=1)\n\n # interleave min and max into plot data to preserve envelope shape\n visible[targetPtr:targetPtr+chunk.shape[0]*2:2] = chunkMin\n visible[1+targetPtr:1+targetPtr+chunk.shape[0]*2:2] = chunkMax\n targetPtr += chunk.shape[0]*2\n\n visible = visible[:targetPtr]\n scale = ds * 0.5\n self.setData(visible)\n self.setPos(start, 0)\n self.resetTransform()\n self.scale(scale, 1)\n\n\n# load data\nfrom mne.datasets import sample\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\n\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\nraw.filter(1, None)\ndata = raw.get_data()\n\n# create window with empty layout\napp = QApplication(sys.argv)\npg.setConfigOptions(background=\"w\", foreground=\"k\", antialias=True)\nwin = pg.GraphicsView()\nwin.resize(1000, 800)\nlayout = pg.GraphicsLayout()\nlayout.setSpacing(0)\nwin.setCentralItem(layout)\nwin.show()\n\nplots = []\nfor ch in range(100): # add plots in rows\n p = layout.addPlot(row=ch, col=0, enableMenu=False)\n p.setXRange(0, 2000)\n p.setLimits(xMin=0, xMax=data.shape[1])\n curve = RawCurveItem(data[ch] * 1e6)\n curve.setPen(\"k\")\n p.addItem(curve)\n plots.append(p)\nfor ch in range(100 - 1): # link axes\n plots[ch].setXLink(plots[ch + 1])\n plots[ch].setYLink(plots[ch + 1])\n plots[ch].getAxis(\"bottom\").setStyle(showValues=False)\n plots[ch].getAxis(\"bottom\").hide()\n\nlayout.setSpacing(0)\n\nsys.exit(app.exec())\n" ]
[ [ "numpy.zeros" ] ]
jbussemaker/pyCycle
[ "656a14fc89cd397fd00913c0ac7465a5f7e5cb95" ]
[ "pycycle/elements/heat_exchanger.py" ]
[ "import numpy as np\nimport openmdao.api as om\n\nfrom pycycle.thermo.thermo import Thermo\n\nfrom pycycle.thermo.cea.species_data import janaf\nfrom pycycle.constants import AIR_ELEMENTS\nfrom pycycle.flow_in import FlowIn\nfrom pycycle.passthrough import PassThrough\n\n\nclass Areas(om.ExplicitComponent):\n \"\"\"\n Compute the area of the heat exchanger\n \"\"\"\n def setup(self):\n self.add_input('length_hex', units='ft', desc='length of the heat exchanger')\n self.add_input('radius_hex', units='ft', desc='radius of a heat exchanger tube')\n self.add_input('number_hex', desc='number of heat exchanger tubes')\n\n self.add_output('area_hex_external', units='ft**2', desc='external area of the heat exchanger')\n self.add_output('area_hex_internal', units='ft**2', desc='internal area of the heat exchanger')\n\n self.declare_partials('area_hex_external', '*')\n self.declare_partials('area_hex_internal', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['area_hex_external'] = 2*np.pi*inputs['radius_hex']*inputs['length_hex']*inputs['number_hex']\n outputs['area_hex_internal'] = np.pi*inputs['radius_hex']**2*inputs['number_hex']\n\n def compute_partials(self, inputs, J):\n J['area_hex_external', 'length_hex'] = 2*np.pi*inputs['radius_hex']*inputs['number_hex']\n J['area_hex_external', 'radius_hex'] = 2*np.pi*inputs['length_hex']*inputs['number_hex']\n J['area_hex_external', 'number_hex'] = 2*np.pi*inputs['radius_hex']*inputs['length_hex']\n J['area_hex_internal', 'radius_hex'] = 2*np.pi*inputs['radius_hex']*inputs['number_hex']\n J['area_hex_internal', 'number_hex'] = np.pi*inputs['radius_hex']**2\n\n\nclass Coeff(om.ExplicitComponent):\n \"\"\"\n Compute the coefficients of the fluid and coolant\n \"\"\"\n def setup(self):\n self.add_input('W_fluid', units='lbm/s', desc='fluid mass flow')\n self.add_input('Cp_fluid', units='Btu/(lbm*degR)', desc='heat capacity at constant pressure of fluid')\n self.add_input('W_cool', units='lbm/s', desc='coolant mass flow')\n self.add_input('Cp_cool', units='Btu/(lbm*degR)', desc='heat capacity at constant pressure of coolant')\n\n self.add_output('C_max', units='Btu/(s*degR)', desc='minimum heat coefficient')\n self.add_output('C_min', units='Btu/(s*degR)', desc='maximum heat coefficient')\n self.add_output('C_r', desc='ratio heat coefficients')\n\n self.declare_partials('C_min', '*')\n self.declare_partials('C_max', '*')\n self.declare_partials('C_r', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n if inputs['W_fluid']*inputs['Cp_fluid'] > inputs['W_cool']*inputs['Cp_cool']:\n outputs['C_max'] = inputs['W_fluid']*inputs['Cp_fluid']\n outputs['C_min'] = inputs['W_cool']*inputs['Cp_cool']\n outputs['C_r'] = inputs['W_cool']*inputs['Cp_cool']/(inputs['W_fluid']*inputs['Cp_fluid'])\n else:\n outputs['C_min'] = inputs['W_fluid']*inputs['Cp_fluid']\n outputs['C_max'] = inputs['W_cool']*inputs['Cp_cool']\n outputs['C_r'] = inputs['W_fluid']*inputs['Cp_fluid']/(inputs['W_cool']*inputs['Cp_cool'])\n\n def compute_partials(self, inputs, J):\n if inputs['W_fluid']*inputs['Cp_fluid'] > inputs['W_cool']*inputs['Cp_cool']:\n J['C_max', 'W_fluid'] = inputs['Cp_fluid']\n J['C_max', 'Cp_fluid'] = inputs['W_fluid']\n J['C_min', 'W_cool'] = inputs['Cp_cool']\n J['C_min', 'Cp_cool'] = inputs['W_cool']\n J['C_r', 'W_fluid'] = -inputs['W_cool']*inputs['Cp_cool']/(inputs['W_fluid']**2*inputs['Cp_fluid'])\n J['C_r', 'Cp_fluid'] = -inputs['W_cool']*inputs['Cp_cool']/(inputs['W_fluid']*inputs['Cp_fluid']**2)\n J['C_r', 'W_cool'] = inputs['Cp_cool']/(inputs['W_fluid']*inputs['Cp_fluid'])\n J['C_r', 'Cp_cool'] = inputs['W_cool']/(inputs['W_fluid']*inputs['Cp_fluid'])\n else:\n J['C_min', 'W_fluid'] = inputs['Cp_fluid']\n J['C_min', 'Cp_fluid'] = inputs['W_fluid']\n J['C_max', 'W_cool'] = inputs['Cp_cool']\n J['C_max', 'Cp_cool'] = inputs['W_cool']\n J['C_r', 'W_fluid'] = inputs['Cp_fluid']/(inputs['W_cool']*inputs['Cp_cool'])\n J['C_r', 'Cp_fluid'] = inputs['W_fluid']/(inputs['W_cool']*inputs['Cp_cool'])\n J['C_r', 'W_cool'] = -inputs['W_fluid']*inputs['Cp_fluid']/(inputs['W_cool']**2*inputs['Cp_cool'])\n J['C_r', 'Cp_cool'] = -inputs['W_fluid']*inputs['Cp_fluid']/(inputs['W_cool']*inputs['Cp_cool']**2)\n\n\nclass NTUCalc(om.ExplicitComponent):\n \"\"\"\n Compute the NTU\n \"\"\"\n def setup(self):\n self.add_input('area_hex', units='ft**2', desc='area of the heat exchanger')\n self.add_input('h_overall', units='Btu/(s*degR*ft**2)', desc='overall heat transfer coefficient')\n self.add_input('C_min', units='Btu/(s*degR)', desc='minimum heat coefficient')\n\n self.add_output('NTU', desc='NTU result')\n\n self.declare_partials('NTU', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['NTU'] = inputs['area_hex']*inputs['h_overall']/inputs['C_min']\n\n def compute_partials(self, inputs, J):\n J['NTU', 'area_hex'] = inputs['h_overall']/inputs['C_min']\n J['NTU', 'h_overall'] = inputs['area_hex']/inputs['C_min']\n J['NTU', 'C_min'] = -inputs['area_hex']*inputs['h_overall']/inputs['C_min']**2\n\n\nclass EffCalc(om.ExplicitComponent):\n \"\"\"\n Calculate the effectiveness\n \"\"\"\n def setup(self):\n self.add_input('W_fluid', units='lbm/s', desc='fluid mass flow')\n self.add_input('Cp_fluid', units='Btu/(lbm*degR)', desc='heat capacity at constant pressure of fluid')\n self.add_input('W_cool', units='lbm/s', desc='coolant mass flow')\n self.add_input('Cp_cool', units='Btu/(lbm*degR)', desc='heat capacity at constant pressure of coolant')\n\n self.add_input('C_r', desc='ratio heat coefficients')\n self.add_input('NTU', desc='NTU result')\n\n self.add_output('eff', desc='effectiveness result')\n\n self.declare_partials('eff', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n if inputs['W_fluid']*inputs['Cp_fluid'] > inputs['W_cool']*inputs['Cp_cool']:\n outputs['eff'] = 1-np.exp(-inputs['C_r']**(-1)*(1-np.exp(-inputs['C_r']*inputs['NTU'])))\n else:\n outputs['eff'] = (1/inputs['C_r'])*(1-np.exp(-inputs['C_r']*(1-np.exp(-inputs['NTU']))))\n\n def compute_partials(self, inputs, J):\n if inputs['W_fluid']*inputs['Cp_fluid'] > inputs['W_cool']*inputs['Cp_cool']:\n J['eff', 'C_r'] = -np.exp(-inputs['C_r']**(-1)*(1-np.exp(-inputs['C_r']*inputs['NTU'])))* \\\n (1-np.exp(-inputs['C_r']*inputs['NTU'])/inputs['C_r']**2-inputs['NTU']*np.exp(-inputs['C_r']*inputs['NTU'])/inputs['C_r'])\n J['eff', 'NTU'] = np.exp(-inputs['C_r']**(-1)*(1-np.exp(-inputs['C_r']*inputs['NTU']))-inputs['C_r']*inputs['NTU'])\n else:\n J['eff', 'C_r'] = -(1/inputs['C_r'])*(1-np.exp(-inputs['C_r']*(1-np.exp(-inputs['NTU']))))/inputs['C_r']**2 - \\\n (np.exp(-inputs['NTU'])-1)*np.exp(-inputs['C_r']*(1-np.exp(-inputs['NTU'])))/inputs['C_r']\n J['eff', 'NTU'] = np.exp(-inputs['C_r']*(1-np.exp(-inputs['NTU']))-inputs['NTU'])\n\n\nclass Qactual(om.ExplicitComponent):\n \"\"\"\n Compute actual heat transfer rate\n \"\"\"\n def setup(self):\n self.add_input('eff', desc='effectiveness result')\n self.add_input('C_min', units='Btu/(s*degR)', desc='minimum heat coefficient')\n self.add_input('T_fluid_in', units='degR', desc='temperature of incoming fluid')\n self.add_input('T_cool_in', units='degR', desc='temperature of incoming coolant')\n\n self.add_output('q_actual', units='Btu/s', desc='actual heat transfer per mass flow rate')\n\n self.declare_partials('q_actual', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['q_actual'] = inputs['eff']*inputs['C_min']*(inputs['T_fluid_in']-inputs['T_cool_in'])\n\n def compute_partials(self, inputs, J):\n J['q_actual', 'eff'] = inputs['C_min']*(inputs['T_fluid_in']-inputs['T_cool_in'])\n J['q_actual', 'C_min'] = inputs['eff']*(inputs['T_fluid_in']-inputs['T_cool_in'])\n J['q_actual', 'T_fluid_in'] = inputs['eff']*inputs['C_min']\n J['q_actual', 'T_cool_in'] = -inputs['eff']*inputs['C_min']\n\n\nclass TempChanges(om.ExplicitComponent):\n \"\"\"\n Compute temperature changes of fluid and coolant\n \"\"\"\n def setup(self):\n self.add_input('ht_in_fluid', units='Btu/lbm', desc='fluid incoming total enthalpy')\n self.add_input('ht_in_cool', units='Btu/lbm', desc='coolant incoming total enthalpy')\n self.add_input('q_actual', units='Btu/s', desc='heat ratio per mass flow rate')\n self.add_input('W_fluid', units='lbm/s', desc='fluid mass flow')\n self.add_input('W_cool', units='lbm/s', desc='coolant mass flow')\n\n self.add_output('ht_out_fluid', units='Btu/lbm', desc='fluid outgoing total enthalpy')\n self.add_output('ht_out_cool', units='Btu/lbm', desc='coolant outgoing total enthalpy')\n\n self.declare_partials('ht_out_fluid', '*')\n self.declare_partials('ht_out_cool', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['ht_out_fluid'] = inputs['ht_in_fluid']-inputs['q_actual']/inputs['W_fluid']\n outputs['ht_out_cool'] = inputs['ht_in_cool']+inputs['q_actual']/inputs['W_cool']\n\n def compute_partials(self, inputs, J):\n J['ht_out_fluid', 'ht_in_fluid'] = 1.\n J['ht_out_fluid', 'q_actual'] = -1./inputs['W_fluid']\n J['ht_out_fluid', 'W_fluid'] = inputs['q_actual']/inputs['W_fluid']**2\n J['ht_out_cool', 'ht_in_cool'] = 1.\n J['ht_out_cool', 'q_actual'] = 1./inputs['W_cool']\n J['ht_out_cool', 'W_cool'] = -inputs['q_actual']/inputs['W_cool']**2\n\n\nclass PressureLossFlow(om.ExplicitComponent):\n \"\"\"\n Calculates pressure loss of the 2 flows\n Equations taken from Compact Heat Exchangers (Kays & London, 2018)\n \"\"\"\n def setup(self):\n self.add_input('W', units='lbm/s', desc='mass flow')\n self.add_input('rho_in', units='lbm/ft**3', desc='inlet density')\n self.add_input('ff', desc='friction factor')\n self.add_input('area_hex', units='ft**2', desc='area of the heat exchanger')\n self.add_input('area_flow', units='ft**2', desc='minimum flow cross sectional area')\n\n self.add_output('p_loss', units='lbf/ft**2', desc='pressure loss of flow')\n\n self.declare_partials('p_loss', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['p_loss'] = inputs['W']**2*inputs['ff']*inputs['area_hex']/(2*inputs['rho_in']*inputs['area_flow']**3)\n\n def compute_partials(self, inputs, J):\n J['p_loss', 'W'] = 2*inputs['W']*inputs['ff']*inputs['area_hex']/(2*inputs['rho_in']*inputs['area_flow']**3)\n J['p_loss', 'rho_in'] = -inputs['W']**2*inputs['ff']*inputs['area_hex']/(2*inputs['rho_in']**2*inputs['area_flow']**3)\n J['p_loss', 'ff'] = inputs['W']**2*inputs['area_hex']/(2*inputs['rho_in']*inputs['area_flow']**3)\n J['p_loss', 'area_hex'] = inputs['W']**2*inputs['ff']/(2*inputs['rho_in']*inputs['area_flow']**3)\n J['p_loss', 'area_flow'] = -3*inputs['W']**2*inputs['ff']*inputs['area_hex']/(2*inputs['rho_in']*inputs['area_flow']**4)\n\n\nclass PressureLoss(om.ExplicitComponent):\n \"\"\"\n Calculates pressure loss across the heat exchanger.\n \"\"\"\n def setup(self):\n self.add_input('dPqP_fluid', units='lbf/ft**2', val=0.03,\n desc='pressure differential as a fraction of incoming pressure')\n self.add_input('dPqP_cool', units='lbf/ft**2', val=0.12,\n desc='pressure differential as a fraction of incoming pressure')\n self.add_input('Pt_in_fluid', units='lbf/ft**2', desc='fluid inlet total pressure')\n self.add_input('Pt_in_cool', units='lbf/ft**2', desc='coolant inlet total pressure')\n\n self.add_output('Pt_out_fluid', units='lbf/ft**2', desc='fluid exit total pressure', lower=1e-3)\n self.add_output('Pt_out_cool', units='lbf/ft**2', desc='coolant exit total pressure', lower=1e-3)\n\n self.declare_partials('Pt_out_fluid', '*')\n self.declare_partials('Pt_out_cool', '*')\n self.set_check_partial_options('*', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['Pt_out_fluid'] = inputs['Pt_in_fluid'] - inputs['dPqP_fluid']\n outputs['Pt_out_cool'] = inputs['Pt_in_cool'] - inputs['dPqP_cool']\n\n def compute_partials(self, inputs, J):\n J['Pt_out_fluid', 'dPqP_fluid'] = -1.0\n J['Pt_out_fluid', 'Pt_in_fluid'] = 1.0\n J['Pt_out_cool', 'dPqP_cool'] = -1.0\n J['Pt_out_cool', 'Pt_in_cool'] = 1.0\n\n\nclass HeatExchanger(om.Group):\n \"\"\"\n Calculates the outlet stations temperatures of the heat exchanger\n\n --------------\n Flow Stations\n --------------\n Fl_fluid_I\n Fl_fluid_O\n Fl_cool_I\n Fl_cool_O\n\n -------------\n Design\n -------------\n inputs\n --------\n Fl_fluid_I\n Fl_cool_I\n\n outputs\n --------\n Fl_fluid_O\n Fl_cool_O\n -------------\n Off-Design\n -------------\n inputs\n --------\n\n\n implicit states\n ---------------\n\n\n \"\"\"\n\n def initialize(self):\n\n self.options.declare('thermo_data', default=janaf,\n desc='thmodynamic data set', recordable=False)\n self.options.declare('Fl_I1_elements', default=AIR_ELEMENTS,\n desc='set of elements present in the fluid')\n self.options.declare('Fl_I2_elements', default=AIR_ELEMENTS,\n desc='set of elements present in the coolant')\n self.options.declare('statics', default=True,\n desc='If True, calculate static properties.')\n self.options.declare('design', default=True,\n desc='Switch between on-design and off-design calculation.')\n self.options.declare('designed_stream', default=1, values=(1, 2),\n desc='control for which stream has its area varied to match static pressure (1 means, you vary Fl_I1)')\n self.options.declare('internal_solver', default=True,\n desc='If True, a newton solver is used inside the mixer to converge the impulse balance')\n\n def setup(self):\n thermo_data = self.options['thermo_data']\n fluid_elements = self.options['Fl_I1_elements']\n coolant_elements = self.options['Fl_I2_elements']\n statics = self.options['statics']\n design = self.options['design']\n\n in_flow = FlowIn(fl_name='Fl_I1')\n self.add_subsystem('in_flow_fluid', in_flow, promotes=['Fl_I1:tot:*', 'Fl_I1:stat:*']) # 1 = fluid\n\n in_flow = FlowIn(fl_name='Fl_I2')\n self.add_subsystem('in_flow_cool', in_flow, promotes=['Fl_I2:tot:*', 'Fl_I2:stat:*']) # 2 = coolant\n\n # Calculate the heat exchanger area\n prom_in = ['length_hex', 'radius_hex', 'number_hex']\n self.add_subsystem('areas', Areas(), promotes_inputs=prom_in)\n\n # Calculate the different coefficients\n prom_in = [('W_fluid', 'Fl_I1:stat:W'), ('Cp_fluid', 'Fl_I1:stat:Cp'), ('W_cool', 'Fl_I2:stat:W'), ('Cp_cool', 'Fl_I2:stat:Cp')]\n self.add_subsystem('coeff', Coeff(), promotes_inputs=prom_in)\n\n # Calculate the NTU\n prom_in = [('area_hex', 'area_hex_external'), 'h_overall', 'C_min']\n self.add_subsystem('ntu', NTUCalc(), promotes_inputs=prom_in)\n\n # Calculate the efficiency\n prom_in = [('W_fluid', 'Fl_I1:stat:W'), ('Cp_fluid', 'Fl_I1:stat:Cp'), ('W_cool', 'Fl_I2:stat:W'), ('Cp_cool', 'Fl_I2:stat:Cp'), 'C_r', 'NTU']\n self.add_subsystem('eff_calc', EffCalc(), promotes_inputs=prom_in)\n\n # Calculate actual heat transfer rate\n prom_in = ['eff', 'C_min', ('T_fluid_in', 'Fl_I1:stat:T'), ('T_cool_in', 'Fl_I2:stat:T')]\n self.add_subsystem('q_calc', Qactual(), promotes_inputs=prom_in)\n\n # Calculate fluid and coolant temperature changes\n prom_in = [('ht_in_fluid', 'Fl_I1:tot:h'), ('ht_in_cool', 'Fl_I2:tot:h'), 'q_actual', ('W_fluid', 'Fl_I1:stat:W'), ('W_cool', 'Fl_I2:stat:W')]\n self.add_subsystem('temp_changes', TempChanges(), promotes_inputs=prom_in)\n\n # Calculate the core pressure drop\n prom_in = [('W', 'Fl_I1:stat:W'), ('rho_in', 'Fl_I1:stat:rho'), ('ff', 'ff_core'), ('area_hex', 'area_hex_internal'), ('area_flow', 'Fl_I1:stat:area')]\n self.add_subsystem('p_loss_core', PressureLossFlow(), promotes_inputs=prom_in)\n\n # Calculate the bypass pressure drop\n prom_in = [('W', 'Fl_I2:stat:W'), ('rho_in', 'Fl_I2:stat:rho'), ('ff', 'ff_bypass'), ('area_hex', 'area_hex_external'), ('area_flow', 'Fl_I2:stat:area')]\n self.add_subsystem('p_loss_bypass', PressureLossFlow(), promotes_inputs=prom_in)\n\n # Calculate fluid and coolant pressure changes\n prom_in = [('Pt_in_fluid', 'Fl_I1:tot:P'), ('Pt_in_cool', 'Fl_I2:tot:P'), 'dPqP_fluid', 'dPqP_cool']\n self.add_subsystem('p_loss', PressureLoss(), promotes_inputs=prom_in)\n\n # Connect all calculations\n self.connect('areas.area_hex_external', 'area_hex_external')\n self.connect('areas.area_hex_internal', 'area_hex_internal')\n self.connect('coeff.C_min', 'C_min')\n self.connect('coeff.C_r', 'C_r')\n self.connect('ntu.NTU', 'NTU')\n self.connect('eff_calc.eff', 'eff')\n self.connect('q_calc.q_actual', 'q_actual')\n self.connect('p_loss_core.p_loss', 'dPqP_fluid')\n self.connect('p_loss_bypass.p_loss', 'dPqP_cool')\n\n # Total Calc\n real_flow_fluid = Thermo(mode='total_hP', fl_name='Fl_O1:tot',\n method='CEA',\n thermo_kwargs={'elements': fluid_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I1:tot:composition')]\n self.add_subsystem('real_flow_fluid', real_flow_fluid, promotes_inputs=prom_in,\n promotes_outputs=['Fl_O1:*'])\n self.connect('temp_changes.ht_out_fluid', 'real_flow_fluid.h')\n self.connect('p_loss.Pt_out_fluid', 'real_flow_fluid.P')\n\n real_flow_cool = Thermo(mode='total_hP', fl_name='Fl_O2:tot',\n method='CEA',\n thermo_kwargs={'elements':coolant_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I2:tot:composition')]\n self.add_subsystem('real_flow_cool', real_flow_cool, promotes_inputs=prom_in,\n promotes_outputs=['Fl_O2:*'])\n self.connect('temp_changes.ht_out_cool', 'real_flow_cool.h')\n self.connect('p_loss.Pt_out_cool', 'real_flow_cool.P')\n\n if statics:\n if design:\n # Calculate static properties\n out_stat_fluid = Thermo(mode='static_A', fl_name='Fl_O1:stat',\n method='CEA',\n thermo_kwargs={'elements': fluid_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I1:tot:composition'),\n ('W', 'Fl_I1:stat:W'), ('area', 'Fl_I1:stat:area')]\n prom_out = ['Fl_O1:stat:*']\n self.add_subsystem('out_stat_fluid', out_stat_fluid, promotes_inputs=prom_in,\n promotes_outputs=prom_out)\n\n self.connect('Fl_O1:tot:S', 'out_stat_fluid.S')\n self.connect('Fl_O1:tot:h', 'out_stat_fluid.ht')\n self.connect('Fl_O1:tot:P', 'out_stat_fluid.guess:Pt')\n self.connect('Fl_O1:tot:gamma', 'out_stat_fluid.guess:gamt')\n\n out_stat_cool = Thermo(mode='static_A', fl_name='Fl_O2:stat',\n method='CEA',\n thermo_kwargs={'elements': coolant_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I2:tot:composition'),\n ('W', 'Fl_I2:stat:W'), ('area', 'Fl_I2:stat:area')]\n prom_out = ['Fl_O2:stat:*']\n self.add_subsystem('out_stat_cool', out_stat_cool, promotes_inputs=prom_in,\n promotes_outputs=prom_out)\n\n self.connect('Fl_O2:tot:S', 'out_stat_cool.S')\n self.connect('Fl_O2:tot:h', 'out_stat_cool.ht')\n self.connect('Fl_O2:tot:P', 'out_stat_cool.guess:Pt')\n self.connect('Fl_O2:tot:gamma', 'out_stat_cool.guess:gamt')\n else:\n # Calculate static properties\n out_stat_fluid = Thermo(mode='static_A', fl_name='Fl_O1:stat',\n method='CEA',\n thermo_kwargs={'elements': fluid_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I1:tot:composition'),\n ('W', 'Fl_I1:stat:W'), ('area', 'Fl_I1:stat:area')]\n prom_out = ['Fl_O1:stat:*']\n self.add_subsystem('out_stat_fluid', out_stat_fluid, promotes_inputs=prom_in,\n promotes_outputs=prom_out)\n\n self.connect('Fl_O1:tot:S', 'out_stat_fluid.S')\n self.connect('Fl_O1:tot:h', 'out_stat_fluid.ht')\n self.connect('Fl_O1:tot:P', 'out_stat_fluid.guess:Pt')\n self.connect('Fl_O1:tot:gamma', 'out_stat_fluid.guess:gamt')\n\n out_stat_cool = Thermo(mode='static_A', fl_name='Fl_O2:stat',\n method='CEA',\n thermo_kwargs={'elements': coolant_elements,\n 'spec': thermo_data})\n prom_in = [('composition', 'Fl_I2:tot:composition'),\n ('W', 'Fl_I2:stat:W'), ('area', 'Fl_I2:stat:area')]\n prom_out = ['Fl_O2:stat:*']\n self.add_subsystem('out_stat_cool', out_stat_cool, promotes_inputs=prom_in,\n promotes_outputs=prom_out)\n\n self.connect('Fl_O2:tot:S', 'out_stat_cool.S')\n self.connect('Fl_O2:tot:h', 'out_stat_cool.ht')\n self.connect('Fl_O2:tot:P', 'out_stat_cool.guess:Pt')\n self.connect('Fl_O2:tot:gamma', 'out_stat_cool.guess:gamt')\n else:\n self.add_subsystem('W_passthru_fluid', PassThrough('Fl_I1:stat:W', 'Fl_O1:stat:W', 1.0, units=\"lbm/s\"), promotes=['*'])\n self.add_subsystem('W_passthru_cool', PassThrough('Fl_I2:stat:W', 'Fl_O2:stat:W', 1.0, units=\"lbm/s\"), promotes=['*'])" ]
[ [ "numpy.exp" ] ]
lauriraudla/pOliver
[ "ef2a6cc7c04a2d6946854c15b084e75e64038625" ]
[ "LUT.py" ]
[ "import numpy as np\n\n# distances_LUT = np.array(\n# [0.5581873069080149, 0.673282329238654, 0.775489401649724, 0.880352084715722, 1.0049236875481926,\n# 1.1211567901331803, 1.2531766413769805, 1.3515307222188782, 1.437071400857621, 1.5519779219846181,\n# 1.728146900469168, 1.8134751192793028, 1.9285571729392947, 2.0473719519786098, 2.1612241891527417,\n# 2.2097560471350994, 2.2826633643130867, 2.4182262426644714, 2.541580164900719, 2.610065617127852,\n# 2.7475799442339346, 2.9416952821297375, 2.968958913665457, 3.087244192759196, 3.186368930339813,\n# 3.26907265531844, 3.3630390005204283, 3.4349546865983442]\n# )\n\ndistances_LUT = np.array(\n [0.55, 0.7, 0.83, 1.0, 1.14, 1.26, 1.37, 1.55, 1.7, 1.82, 1.89, 1.94, 2.08,\n 2.22, 2.39, 2.41, 2.48, 2.65, 2.69, 2.81, 2.93, 3.06, 3.23, 3.42]\n)\n\nspeeds_LUT = np.array(\n [45, 44, 47, 55, 58, 62, 63, 65, 72, 75, 78, 81, 82, 88, 91, 93, 96, 100, 102, 104, 107, 114, 119, 122])\n\n#688, 687, 687, 689, 691, 688, 717, 677, 680, 668, 686, 691, 689, 673, 680, 659, 651, 668, 666, 651, 679, 664, 668, 662, 668, 655, 673, 675\n\n\ndef get_thrower_speed(distance):\n # print(\"alustan kiiruse arvutamist\")\n best_match = speeds_LUT[np.argmin(np.absolute(distances_LUT-distance))]\n # print(\"kiirus arvutatud\")\n return best_match\n\n#distances_LUT = np.array(\n #[239, 147, 151, 108, 81, 79, 60, 60, 47, 37, 113, 146])\n#speeds_LUT = np.array(\n #[58, 67, 77, 77, 87, 87, 99, 100, 114, 126, 76, 68])\n" ]
[ [ "numpy.array", "numpy.absolute" ] ]
lukedottec/ai-gym
[ "1a94777c7d6d4c53871855758ea0c0b8d325f297" ]
[ "unity/p3_collab-compet/agent.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom collections import namedtuple, deque\nimport copy\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom model import Actor, Critic\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparameters\nBUFFER_SIZE = int(1e5)\nBATCH_SIZE = 64\nGAMMA = 0.97\nTAU = 1e-3\nLR_ACTOR = 1e-4\nLR_CRITIC = 1e-4\nWEIGHT_DECAY = 0\nCHECKPOINT_FOLDER = './'\n\n\nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):\n \"\"\"Initialize parameters and noise process.\"\"\"\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.size = size\n self.reset()\n def reset(self):\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n self.state = copy.copy(self.mu)\n def sample(self):\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\n x = self.state\n # dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=int(buffer_size)) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n return (states, actions, rewards, next_states, dones)\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n\nclass ActorCriticAgents():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n def __init__(self, state_size, action_size, num_agents, random_seed):\n \"\"\"Initialize an Agent object.\n\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n num_agents (int): number of agents\n random_seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.num_agents = num_agents\n self.seed = random.seed(random_seed)\n self.BATCH_SIZE = BATCH_SIZE\n self.GAMMA = GAMMA\n self.TAU = TAU\n self.LR_ACTOR = LR_ACTOR\n self.LR_CRITIC = LR_CRITIC\n self.CRITIC_WEIGHT_DECAY = WEIGHT_DECAY\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, random_seed).to(device)\n self.actor_target = Actor(state_size, action_size, random_seed).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(state_size, action_size, random_seed).to(device)\n self.critic_target = Critic(state_size, action_size, random_seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=self.CRITIC_WEIGHT_DECAY)\n # Noise process\n self.noise = OUNoise((num_agents, action_size), random_seed)\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)\n def step(self, state, action, reward, next_state, done):\n \"\"\"Save experience in replay memory, and use random sample from buffer to learn.\"\"\"\n # Save experience / reward\n for i in range(self.num_agents):\n self.memory.add(state[i,:], action[i,:], reward[i], next_state[i,:], done[i])\n # Learn, if enough samples are available in memory\n if len(self.memory) > self.BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, self.GAMMA)\n def act(self, states, add_noise=True):\n \"\"\"Returns actions for given state as per current policy.\"\"\"\n states = torch.from_numpy(states).float().to(device)\n actions = np.zeros((self.num_agents, self.action_size))\n self.actor_local.eval()\n with torch.no_grad():\n for agent_num, state in enumerate(states):\n action = self.actor_local(state).cpu().data.numpy()\n actions[agent_num, :] = action\n self.actor_local.train()\n if add_noise:\n actions += self.noise.sample()\n return np.clip(actions, -1, 1)\n def reset(self):\n self.noise.reset()\n def learn(self, experiences, gamma):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Compute critic loss\n Q_expected = self.critic_local(states, actions)\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n # Minimize the loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic_local, self.critic_target, self.TAU)\n self.soft_update(self.actor_local, self.actor_target, self.TAU)\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)" ]
[ [ "numpy.clip", "numpy.random.standard_normal", "torch.from_numpy", "numpy.ones", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.cuda.is_available", "numpy.zeros", "numpy.vstack" ] ]
ereide/pyga-camcal
[ "fd25748ddb11c5b05ef24a2deca2689e0d899875" ]
[ "tests/testbenchmark.py" ]
[ "import numpy as np\n\nfrom pygacal.common.cgatools import *\n\nfrom pygacal.rotation import minimizeError\nfrom pygacal.rotation.benchmark import *\nfrom pygacal.rotation.costfunction import *\nfrom pygacal.rotation.mapping import ( RotorLineMapping, BivectorLineMapping, LinePropertyBivectorMapping, \n BivectorPlaneMapping, PlanePropertyBivectorMapping, \n BivectorLogCostLineMapping, BivectorLineEstimationMapping, BivectorLineMultMapping, \n BivectorWeightedLineMapping, BivectorLogSumLineMapping, BivectorSumLogLineMapping, \n ExtendedBivectorMapping)\n\nfrom pygacal.geometry import perturbeObject\nfrom pygacal.geometry.lines import RotorLine2Line, createRandomLines\nfrom pygacal.geometry.planes import RotorPlane2Plane, createRandomPlanes\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\nclass TestBenchmark(unittest.TestCase):\n def testBenchmarkLinesMinimizeError(self):\n seed = 21\n sigma_T = 0.005\n sigma_R = 0.002\n N = 10\n\n line1, line2 = createRandomLines(2)\n R_real = RotorLine2Line(line1, line2)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N)\n validationlinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N)\n print(\"Training and validation sets created with sig_r = %f and sig_t = %f, N = %d\" % (sigma_R, sigma_T, N))\n\n map_list = [BivectorLineMapping]\n\n for map_obj in map_list:\n np.random.seed(seed)\n print(map_obj.name)\n benchmarkMinimizeError(R_real, traininglinesets, validationlinesets, fileout = None, mapping = map_obj)\n\n def testBenchmarkPlanesMinimizeError(self):\n seed = 234\n sigma_T = 0.5\n sigma_R = 0.2\n N = 50 \n\n plane1, plane2 = createRandomPlanes(2, scale = 20)\n R_real = RotorPlane2Plane(plane1, plane2)\n\n trainingsets = createNoisyPlaneSet(R_real, sigma_R, sigma_T, N)\n validationsets = createNoisyPlaneSet(R_real, sigma_R, sigma_T, N)\n print(\"Training and validation sets created with sig_r = %f and sig_t = %f, N = %d\" % (sigma_R, sigma_T, N))\n\n print(\"Training and validation sets created with sig_r = %f and sig_t = %f, N = %d\" % (sigma_R, sigma_T, N))\n\n map_list = [BivectorPlaneMapping, PlanePropertyBivectorMapping]\n\n plot = Plot3D()\n plane = validationsets[0][0]\n plot.addPlane(R_real * plane * ~R_real, color='r')\n\n for map_obj in map_list:\n np.random.seed(seed)\n print(map_obj.name)\n _, _, R_min = benchmarkMinimizeError(R_real, trainingsets, validationsets, fileout = None, mapping = map_obj, verificationfunction = planePointCostMetric)\n\n plot.addPlane(R_min * plane * ~R_min, color = map_obj.color)\n\n plot.show(False)\n \n def testBenchmarkMinimizeErrorPlot(self):\n fileprint = False\n\n sigma_R = 0.005\n sigma_T = 0.007\n N_training = 30\n N_plot = 4\n\n line1, line2 = createRandomLines(2)\n R_real = RotorLine2Line(line1, line2)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_training)\n R_min, nit = minimizeError(traininglinesets, RotorLineMapping) \n\n validationlines = createRandomLines(N_plot)\n plot = Plot3D()\n\n for line in validationlines:\n line_real = R_real * line * ~R_real\n line_est = R_min * line * ~R_min\n\n plot.addLine(line_real)\n plot.addLine(line_est)\n \n if fileprint:\n timestring =time.strftime(\"%Y%m%d-%H%M%S\")\n figname = \"../benchmarkreports/plot_%s.png\" %timestring\n plot.save(figname)\n\n def testBenchmarkMinimizeErrorParameters(self):\n print(\"\\nWARNING: VERY SLOW\")\n\n translation_errors = [0.0001, 0.01]\n rotation_errors = [0.0001]\n N_list = [5, 10]\n\n benchmarkMinimizeErrorParameters(translation_errors, rotation_errors, N_list)\n\n def testPlotParameterDependencies(self):\n print(\"\\nWARNING: VERY SLOW\")\n\n sigma = np.array([0.0001, 0.01, 1])\n N_list = np.array([3, 9, 15])\n\n benchmarkParameterErrorPlot(N_list = N_list, sigma=sigma, show = False)\n\n\n def testLinePointError(self):\n print(\"\\nWARNING: SLOW\")\n #Testing how using the distance from certain points on a line as a good external metric of how well we are performing.\n\n np.random.seed(10)\n\n sigma_R = 0.05 \n sigma_T = 0.07\n N = 20\n\n line1, line2 = createRandomLines(2)\n R_real = RotorLine2Line(line1, line2)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N)\n\n #Test various minimazation algorithms \n R_rotor, nit = minimizeError(traininglinesets, RotorLineMapping)\n R_bivector, nit = minimizeError(traininglinesets, BivectorLineMapping)\n R_lineproduct, nit = minimizeError(traininglinesets, LinePropertyBivectorMapping)\n R_dummy = RotorLine2Line(traininglinesets[0][0], traininglinesets[0][1]) #comparing it to just taking the first one we find\n \n R_list = [R_rotor, R_bivector, R_lineproduct, R_dummy]\n\n costs = []\n\n N_val = 10\n N_points = 4\n\n for R in R_list:\n costs.append(linePointCostMetric(R, R_real, N_val = N_val))\n\n print(\"rotor_pointcost \", costs[0])\n print(\"bivector_pointcost \", costs[1])\n print(\"lineproduct_pointcost \", costs[2])\n print(\"dummy_pointcost \", costs[3])\n \n\n def testExtremeLineRotation(self):\n print(\"\\nRunning testExtremeLineRotation\")\n print(\"\")\n np.random.seed(1)\n #Test extreme values\n sigma_R = 0.1 \n sigma_T = 1\n N_train = 100\n N_val = 20\n line_scale = 1000\n translation_scale = 1000\n\n line1, line2 = createRandomLines(2)\n a = createRandomVector(scale = translation_scale)\n print(\"Translated lineA by \", a)\n\n b = createRandomVector(scale = translation_scale)\n print(\"Translated lineB by \", b)\n\n T_a = Translator(a)\n T_b = Translator(b)\n\n #Move them far away from the origin\n lineA = T_a * line1 * ~T_a \n lineB = T_b * line2 * ~T_b \n\n R_real = RotorLine2Line(lineA, lineB)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_train, scale = line_scale)\n validationlinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_val, scale = line_scale)\n\n mappingList = [BivectorLineMapping]\n\n x0 = BivectorLineMapping.inverserotorconversion(R_real) \n x0 += np.array([0.1, 0.2, -0.1, 1, -0.1, 0.21])\n\n plot = Plot3D()\n\n testline = validationlinesets[0][0]\n plot.addLine(R_real * testline * ~R_real, color='r')\n\n for mapping in mappingList:\n\n\n t0 = time.time()\n print(\"Running %s\" % mapping.name)\n\n #Test various minimazation algorithms \n R_min, nit = minimizeError(traininglinesets, mapping, x0 = x0)\n\n costfunction = mapping.costfunction\n\n #Finding the cost if we used the actual rotor used to generate the matrix\n realtrainingcost = costfunction(R_real, traininglinesets)\n print(\"Real training cost is %s\" % str(realtrainingcost)) \n\n realvalidationcost = costfunction(R_real, validationlinesets)\n print(\"Real validation cost is %s\" % str(realvalidationcost)) \n\n minimumtrainingcost = costfunction(R_min, traininglinesets)\n print(\"minimized training cost %f\" % minimumtrainingcost)\n\n minimumvalidationcost = costfunction(R_min, validationlinesets)\n print(\"minimized validation cost = %f\" % minimumvalidationcost)\n\n realpointcost = linePointCostMetric(R_real, R_min, 10)\n print(\"Averaged cost for points a: %f, a + m: %f, a + 10m: %f, a + 100m: %f, a + 1000m: %f\" % tuple(realpointcost))\n\n print(\"\")\n print(\"R_real= %s\" % str(R_real/np.sign(float(R_real(0)))))\n print(\"R_min = %s\" % str(R_min/np.sign(float(R_min(0)))))\n\n B_real = ga_log(R_real/np.sign(float(R_real(0))))\n B_min = ga_log(R_min/np.sign(float(R_min(0))))\n\n print(\"B_real= %s\" % str(B_real))\n print(\"B_min = %s\" % str(B_min))\n\n print(\"\")\n print(\"C(R(B_real - B_min)) = %f\" % rotorAbsCostFunction(ga_exp(B_min - B_real)))\n\n\n plot.addLine(R_min * testline * ~R_min, color = mapping.color)\n\n t_end = time.time()\n\n print(\"\")\n print(\"Running time for extracting best rotor for %d line pairs is %f s\" % (N_train, t_end - t0))\n print(\"\\n\\n\")\n plot.show(False)\n\n @unittest.skip(\"Incorrect\")\n def testExtendedLineRotation(self):\n print(\"\\nRunning testExtendedLineRotation\")\n print(\"\")\n np.random.seed(1)\n #Test extreme values\n sigma_R = 0.1 \n sigma_T = 1\n N_train = 100\n N_val = 20\n line_scale = 40\n\n mapping = ExtendedBivectorMapping\n\n B_real = (0.21381*e12) + (0.64143*e13) + (2.73*e14) + (2.73*e15) + (0.42762*e23) + (3.14*e24) + (3.14*e25) + (0.8*e45)\n R_real = ga_exp_complicated(B_real)\n\n print(R_real)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_train, scale = line_scale)\n validationlinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_val, scale = line_scale)\n\n\n plot = Plot3D()\n\n testline = validationlinesets[0][0]\n plot.addLine(R_real * testline * ~R_real, color='r')\n\n\n\n t0 = time.time()\n print(\"Running %s\" % mapping.name)\n\n #Test various minimazation algorithms \n R_min, nit = minimizeError(traininglinesets, mapping, x0 = None)\n\n costfunction = mapping.costfunction\n\n #Finding the cost if we used the actual rotor used to generate the matrix\n realtrainingcost = costfunction(R_real, traininglinesets)\n print(\"Real training cost is %s\" % str(realtrainingcost)) \n\n realvalidationcost = costfunction(R_real, validationlinesets)\n print(\"Real validation cost is %s\" % str(realvalidationcost)) \n\n minimumtrainingcost = costfunction(R_min, traininglinesets)\n print(\"minimized training cost %f\" % minimumtrainingcost)\n\n minimumvalidationcost = costfunction(R_min, validationlinesets)\n print(\"minimized validation cost = %f\" % minimumvalidationcost)\n\n realpointcost = linePointCostMetric(R_real, R_min, 10)\n print(\"Averaged cost for points a: %f, a + m: %f, a + 10m: %f, a + 100m: %f, a + 1000m: %f\" % tuple(realpointcost))\n\n print(\"\")\n print(\"R_real= %s\" % str(R_real/np.sign(float(R_real(0)))))\n print(\"R_min = %s\" % str(R_min/np.sign(float(R_min(0)))))\n\n B_real = ga_log(R_real/np.sign(float(R_real(0))))\n B_min = ga_log(R_min/np.sign(float(R_min(0))))\n\n print(\"B_real= %s\" % str(B_real))\n print(\"B_min = %s\" % str(B_min))\n\n print(\"\")\n print(\"C(R(B_real - B_min)) = %f\" % rotorAbsCostFunction(ga_exp(B_min - B_real)))\n\n\n plot.addLine(R_min * testline * ~R_min, color = mapping.color)\n\n t_end = time.time()\n\n print(\"\")\n print(\"Running time for extracting best rotor for %d line pairs is %f s\" % (N_train, t_end - t0))\n print(\"\\n\\n\")\n \n plot.show(False)\n\n def testWeigthingFunction(self):\n print(\"\\nRunning testWeigthingFunction\")\n print(\"\")\n #np.random.seed(5)\n #Test on some extreme values\n sigma_R = 0.01 \n sigma_T = 0.1\n N_train = 100\n N_val = 20\n line_scale = 10\n translation_scale = 100\n\n line1, line2 = createRandomLines(2)\n a = createRandomVector(scale = translation_scale)\n print(\"Translated lineA by \", a)\n\n b = createRandomVector(scale = translation_scale)\n print(\"Translated lineB by \", b)\n\n T_a = Translator(a)\n T_b = Translator(b)\n\n #Move them far away from the origin\n lineA = T_a * line1 * ~T_a \n lineB = T_b * line2 * ~T_b \n\n R_real = RotorLine2Line(lineA, lineB)\n\n #x0 = BivectorWeightedLineMapping.inverserotorconversion(R_real) \n\n #x0 += np.array([15, 21, -11, 0.1, -0.1, 0.21])\n #R_start = BivectorWeightedLineMapping.\n #R_start = BivectorWeightedLineMapping.rotorconversion(x0)\n\n traininglinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_train, scale = line_scale)\n validationlinesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_val, scale = line_scale)\n\n mapping = BivectorWeightedLineMapping\n\n weightList = [1e-4 , 1e-2, 1, 1e2, 1e4]\n #mappingList = [BivectorLineMapping, LinePropertyBivectorMapping, BivectorLogCostLineMapping]\n\n plot = Plot3D()\n\n testline = validationlinesets[0][0]\n plot.addLine(R_real * testline * ~R_real, color='r')\n\n for weight in weightList:\n\n\n t0 = time.time()\n print(\"Running %s\" % mapping.name)\n print(\"Weight = %e\" %weight)\n\n #Test various minimazation algorithms \n mapping = BivectorWeightedLineMapping\n mapping.costfunction = sumWeightedLineSquaredErrorCost(weight)\n\n R_min, nit = minimizeError(traininglinesets, mapping, x0 = None)\n\n #mapping.costfunction = logSumWeightedLineSquaredErrorCost(weight)\n\n #Finding the cost if we used the actual rotor used to generate the matrix\n realtrainingcost = mapping.costfunction(R_real, traininglinesets)\n print(\"Real training cost is %s\" % str(realtrainingcost)) \n\n realvalidationcost = mapping.costfunction(R_real, validationlinesets)\n print(\"Real validation cost is %s\" % str(realvalidationcost)) \n\n minimumtrainingcost = mapping.costfunction(R_min, traininglinesets)\n print(\"minimized training cost %f\" % minimumtrainingcost)\n\n minimumvalidationcost = mapping.costfunction(R_min, validationlinesets)\n print(\"minimized validation cost = %f\" % minimumvalidationcost)\n\n realpointcost = linePointCostMetric(R_real, R_min, 10)\n print(\"Averaged cost for points a: %f, a + m: %f, a + 10m: %f, a + 100m: %f, a + 1000m: %f\" % tuple(realpointcost))\n\n print(\"\")\n print(\"R_real= %s\" % str(R_real/np.sign(float(R_real(0)))))\n print(\"R_min = %s\" % str(R_min/np.sign(float(R_min(0)))))\n #print(\"R_start = %s\" % str(R_start/np.sign(float(R_start(0)))))\n\n\n B_real = ga_log(R_real/np.sign(float(R_real(0))))\n B_min = ga_log(R_min/np.sign(float(R_min(0))))\n #B_start = ga_log(R_start/np.sign(float(R_start(0))))\n\n print(\"\")\n print(\"B_real= %s\" % str(B_real))\n print(\"B_min = %s\" % str(B_min))\n #print(\"B_start = %s\" % str(B_start))\n\n\n print(\"\")\n print(\"C(R(B_real - B_min)) = %f\" % rotorAbsCostFunction(ga_exp(B_min - B_real)))\n\n plot.addLine(R_min * testline * ~R_min, color = mapping.color)\n\n t_end = time.time()\n\n print(\"\")\n print(\"Running time for extracting best rotor for %d line pairs is %f s\" % (N_train, t_end - t0))\n print(\"\\n\\n\")\n\n\n plot.show(False)\n\n\n def testLineAveraging(self):\n seed = 1\n sigma_T = 0.005\n sigma_R = 0.002\n N = 100 \n\n mapping = BivectorLineMapping\n\n line_start, line_target = createRandomLines(2)\n R_real_min, N_int = minimizeError([(line_start, line_target)], mapping = BivectorLineMapping)\n R_real = RotorLine2Line(line_start, line_target)\n\n print(\"R_real \", R_real)\n print(\"R_real_min\", R_real_min)\n print(\"B_real_min\", ga_log(R_real_min))\n print(\"B_real \", ga_log(R_real))\n\n print(\"L_real_min\", R_real_min * line_start * ~R_real_min)\n\n\n traingingdata = [line_start, [perturbeObject(line_target, sigma_R, sigma_T) for _ in range(N)]]\n validationdata = [line_start, [perturbeObject(line_target, sigma_R, sigma_T) for _ in range(N)]]\n\n print(\"Training and validation sets created with sig_r = %f and sig_t = %f, N = %d\" % (sigma_R, sigma_T, N))\n\n map_list = [BivectorLineEstimationMapping]\n\n for map_obj in map_list:\n np.random.seed(seed)\n print(map_obj.name)\n realtrainingcost, minimumvalidationcost, R_min = benchmarkMinimizeError(R_real, traingingdata, validationdata, N = N, fileout = None, mapping = map_obj)\n print(\"L_real = \", line_target)\n print(\"L_min = \", R_min*line_start*~R_min)\n print(\"L_example= \", validationdata[0])\n\n\ndef plotCostFunctionEffect():\n print(\"\\nRunning plotCostFunctionEffect\")\n print(\"\")\n np.random.seed(1)\n\n\n N_train = 20\n\n line_scale = 10\n translation_scale = 10\n\n #Test extreme values\n sigma_R = 0.01 \n sigma_T = 1\n\n line1, line2 = createRandomLines(2)\n a = createRandomVector(scale = translation_scale)\n print(\"Translated lineA by \", a)\n\n b = createRandomVector(scale = translation_scale)\n print(\"Translated lineB by \", b)\n\n T_a = Translator(a)\n T_b = Translator(b)\n\n #Move them far away from the origin\n lineA = T_a * line1 * ~T_a \n lineB = T_b * line2 * ~T_b \n\n R_real = RotorLine2Line(lineA, lineB)\n\n linesets = createNoisyLineSet(R_real, sigma_R, sigma_T, N_train, scale = line_scale)\n\n mapping = BivectorWeightedLineMapping\n mapping.costfunction = sumWeightedLineSquaredErrorCost(1./translation_scale)\n\n x0 = mapping.inverserotorconversion(R_real) \n x_test = x0[0]\n y_test = x0[3]\n\n N_rot = 50\n N_tran = 50\n rot_range = 0.4\n translation_range = 10\n\n rotation = np.linspace(-rot_range, rot_range, N_rot)\n translation = np.linspace(-translation_range, translation_range, N_tran)\n\n ans = np.zeros((N_rot, N_tran))\n\n for i, rot in enumerate(rotation):\n for j, tran in enumerate(translation):\n x0[0] = x_test + tran\n x0[3] = y_test + rot\n ans[i, j] = np.log(mapping.costfunction(mapping.rotorconversion(x0), linesets))\n\n xv, yv = np.meshgrid(translation, rotation)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n\n ax.set_xlabel(\"Translation error\")\n ax.set_ylabel(\"Rotation error\")\n ax.set_zlabel(\"log(objective function)\")\n\n ax.plot_wireframe(xv, yv, ans)\n plt.show() \n\nif __name__ == '__main__': \n unittest.main()\n #plotCostFunctionEffect()" ]
[ [ "numpy.random.seed", "numpy.linspace", "numpy.array", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
b06611016/tade_final
[ "0ba5d63a478580497a168d5814dc4c2b0b06f53b" ]
[ "model/metric.py" ]
[ "import torch\r\n\r\ndef accuracy(output, target, return_length=False):\r\n with torch.no_grad():\r\n pred = torch.argmax(output, dim=1)\r\n assert pred.shape[0] == len(target)\r\n correct = 0\r\n correct += torch.sum(pred == target).item()\r\n if return_length:\r\n return correct / len(target), len(target)\r\n else:\r\n return correct / len(target)\r\n \r\ndef top_k_acc(output, target, k=5, return_length=False):\r\n with torch.no_grad():\r\n pred = torch.topk(output, k, dim=1)[1]\r\n assert pred.shape[0] == len(target)\r\n correct = 0\r\n for i in range(k):\r\n correct += torch.sum(pred[:, i] == target).item()\r\n if return_length:\r\n return correct / len(target), len(target)\r\n else:\r\n return correct / len(target)\r\n" ]
[ [ "torch.topk", "torch.sum", "torch.no_grad", "torch.argmax" ] ]
Kiri23/facial_landmark-detection
[ "6377b0f99b5537abaf5a77252ebe2b18dbaff875" ]
[ "facial_landmark_detection.py" ]
[ "# pyimage search tutorial #http://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/\n# import the necessary packages\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\n\ndef rect_to_bb(rect):\n\t# take a bounding predicted by dlib and convert it\n\t# to the format (x, y, w, h) as we would normally do\n\t# with OpenCV\n\tx = rect.left()\n\ty = rect.top()\n\tw = rect.right() - x\n\th = rect.bottom() - y\n\n\t# return a tuple of (x, y, w, h)\n\treturn (x, y, w, h)\n\ndef shape_to_np(shape, dtype=\"int\"):\n\t# initialize the list of (x, y)-coordinates\n\tcoords = np.zeros((68, 2), dtype=dtype)\n\n\t# loop over the 68 facial landmarks and convert them\n\t# to a 2-tuple of (x, y)-coordinates\n\tfor i in range(0, 68):\n\t\tcoords[i] = (shape.part(i).x, shape.part(i).y)\n\n\t# return the list of (x, y)-coordinates\n\treturn coords\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\n# aqui estoy haciendo que cuando corra este programa puedo pasarle el path como argumento\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n\thelp=\"path to facial landmark predictor\")\nap.add_argument(\"-i\", \"--image\", required=True,\n\thelp=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\n# ya esto es un modelo pre-trained para detectar lo que dice el nombre del metodo\ndetector = dlib.get_frontal_face_detector()\n# este es el file que descargue\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n# load the input image especificada en el args parametro, resize it, and convert it to grayscale\nimage = cv2.imread(args[\"image\"])\n# resize it using this library made by the author https://github.com/jrosebr1/imutils\nimage = imutils.resize(image, width=500)\n#convert it to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# detect faces in the grayscale image\n#Esta funcion es de dlib line 46 y es la que detecta en el trained modelo\n# le pasa la imagen como gray y el 1 significa la resolucion - pyramid layer\nrects = detector(gray, 1) # detector - detect las faces en la imagen\n\n# Given the (x, y)-coordinates of the faces in the image, we can now apply facial landmark detection to each of the face regions\n# loop over the face detections i guess que es el index y rect x,y de la cara\nfor (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region (del modelo que se le pasa\n # como parametro)\n\tshape = predictor(gray, rect)\n # convert the facial landmark (x, y)-coordinates to a NumPy array - metodo que esta definido arrriba\n\tshape = face_utils.shape_to_np(shape)\n\n # convert dlib's rectangle to a OpenCV-style bounding box\n\t# [i.e., (x, y, w, h)], then draw the face bounding box - metodo defnido arriba\n\t(x, y, w, h) = face_utils.rect_to_bb(rect)\n # opencv method to create rectangle\n\tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # show the face number - opencv methodto put text on a image\n\tcv2.putText(image, \"Face #{}\".format(i + 1), (x - 10, y - 10),\n\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n\n # loop over the (x, y)-coordinates for the facial landmarks\n\t# and draw them on the image\n\tfor (x, y) in shape:\n # draw a circle on every landmark found opencv method\n\t\tcv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n\n\n# show the output image with the face detections + facial landmarks\n# opencv method to show an image\ncv2.imshow(\"Output\", image)\ncv2.waitKey(0)\n" ]
[ [ "numpy.zeros" ] ]
TradeRES/toolbox-amiris-emlab
[ "11e6e7101bfbc0d71753e3892d4463c4955d2c34" ]
[ "scripts/combine_AMIRIS_results_ingrid.py" ]
[ "import sys\nimport os\nfrom pathlib import Path\nfrom glob import glob\n# from datetime import timedelta\nimport datetime as dt\nimport pandas as pd\nfrom fameio.scripts.convert_results import run as convert_results\nfrom fameio.source.cli import Config\nfrom fameio.source.time import FameTime, Constants\nimport math\n\nCONFIG = {\n Config.LOG_LEVEL: \"info\",\n Config.LOG_FILE: None,\n Config.AGENT_LIST: None,\n Config.OUTPUT: 'FameResults_converted',\n Config.SINGLE_AGENT_EXPORT: False,\n}\n\n\ndef process_file(filepath: str) -> pd.DataFrame:\n \"\"\"Process single AMIRIS csv file\"\"\"\n df = pd.read_csv(filepath, sep=';')\n object_class = Path(filepath).stem\n assert df.columns[1] == 'TimeStep'\n assert df.columns[0] == 'AgentId'\n # Convert times steps\n df['TimeStamp'] = df['TimeStep'].apply(roundup)\n df['TimeStamp'] = df['TimeStamp'].apply(convert_fame_time_step_to_datetime)\n df['ObjectClass'] = object_class\n return df.drop('TimeStep', axis=1).melt(['ObjectClass', 'AgentId', 'TimeStamp']).dropna()\n\n\ndef roundup(x):\n return round(x / 1000.0) * 1000\n\n\ndef convert_fame_time_step_to_datetime(fame_time_steps: int) -> str:\n \"\"\"Converts given `fame_time_steps` to corresponding real-world datetime string\"\"\"\n years_since_start_time = math.floor(fame_time_steps / Constants.STEPS_PER_YEAR)\n current_year = years_since_start_time + Constants.FIRST_YEAR\n beginning_of_year = dt.datetime(year=current_year, month=1, day=1, hour=0, minute=0, second=0)\n steps_in_current_year = (fame_time_steps - years_since_start_time * Constants.STEPS_PER_YEAR)\n seconds_in_current_year = steps_in_current_year / Constants.STEPS_PER_SECOND\n simulatedtime = beginning_of_year + dt.timedelta(seconds=seconds_in_current_year)\n timerounded = simulatedtime.replace(second=0, microsecond=0, minute=0, hour=simulatedtime.hour) + dt.timedelta(\n hours=simulatedtime.minute // 30)\n return timerounded.strftime('%Y-%m-%dT%H:%M:%S')\n\n\n# Get input file from cmd line arguments\n\nprint(\"PATH\")\nprint(os.getcwd())\nprint(\"PB\")\nprint(sys.argv[1])\ninput_pb_file = sys.argv[1]\n\n#parent = os.path.basename(os.getcwd())\n#complete = os.path.join(Path(os.getcwd()).parent, \"data\", input_pb_file)\n\n# Convert Proto Buffer file to csv's\nconvert_results(input_pb_file, CONFIG)\n# Combine csv files into one data frame\ncsv_files = glob(f'{CONFIG[Config.OUTPUT]}/*.csv')\ndata = pd.concat(map(process_file, csv_files))\ngrouped = data.groupby([\"ObjectClass\", \"variable\", \"AgentId\"]).sum()\n# Write results\n#data.to_csv('AMIRIS_combined.csv2', index=False)\ngrouped.to_csv('AMIRIS_combined.csv', index=True)\n" ]
[ [ "pandas.read_csv" ] ]
janbolle/neptune-client
[ "33b1876b361d9a7184f557d7bd6e016cb08bd59f" ]
[ "neptune/new/internal/utils/images.py" ]
[ "#\n# Copyright (c) 2020, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport base64\nimport io\nimport logging\nimport pickle\nimport warnings\nfrom io import StringIO, BytesIO\nfrom typing import Optional\n\nfrom packaging import version\nfrom pandas import DataFrame\n\nfrom neptune.new.exceptions import PlotlyIncompatibilityException\n\n_logger = logging.getLogger(__name__)\n\ntry:\n from numpy import ndarray as numpy_ndarray, array as numpy_array, uint8 as numpy_uint8\nexcept ImportError:\n numpy_ndarray = None\n numpy_array = None\n numpy_uint8 = None\n\ntry:\n from PIL.Image import Image as PILImage, fromarray as pilimage_fromarray\nexcept ImportError:\n PILImage = None\n\n def pilimage_fromarray():\n pass\n\n\nIMAGE_SIZE_LIMIT_MB = 15\n\n\ndef get_image_content(image) -> Optional[bytes]:\n content = _image_to_bytes(image)\n\n if len(content) > IMAGE_SIZE_LIMIT_MB * 1024 * 1024:\n _logger.warning('Your image is larger than %dMB. Neptune supports logging images smaller than %dMB. '\n 'Resize or increase compression of this image',\n IMAGE_SIZE_LIMIT_MB,\n IMAGE_SIZE_LIMIT_MB)\n return None\n\n return content\n\n\ndef get_html_content(chart) -> Optional[str]:\n content = _to_html(chart)\n\n if len(content) > IMAGE_SIZE_LIMIT_MB * 1024 * 1024:\n _logger.warning('Your file is larger than %dMB. '\n 'Neptune supports logging files in-memory objects smaller than %dMB. '\n 'Resize or increase compression of this object',\n IMAGE_SIZE_LIMIT_MB,\n IMAGE_SIZE_LIMIT_MB)\n return None\n\n return content\n\n\ndef get_pickle_content(obj) -> Optional[bytes]:\n content = _export_pickle(obj)\n\n if len(content) > IMAGE_SIZE_LIMIT_MB * 1024 * 1024:\n _logger.warning('Your file is larger than %dMB. '\n 'Neptune supports logging files in-memory objects smaller than %dMB. '\n 'Resize or increase compression of this object',\n IMAGE_SIZE_LIMIT_MB,\n IMAGE_SIZE_LIMIT_MB)\n return None\n\n return content\n\n\ndef _image_to_bytes(image) -> bytes:\n if image is None:\n raise ValueError(\"image is None\")\n\n elif is_numpy_array(image):\n return _get_numpy_as_image(image)\n\n elif is_pil_image(image):\n return _get_pil_image_data(image)\n\n elif is_matplotlib_figure(image):\n return _get_figure_image_data(image)\n\n elif _is_torch_tensor(image):\n return _get_numpy_as_image(image.detach().numpy())\n\n elif _is_tensorflow_tensor(image):\n return _get_numpy_as_image(image.numpy())\n\n raise TypeError(\"image is {}\".format(type(image)))\n\n\ndef _to_html(chart) -> str:\n if _is_matplotlib_pyplot(chart):\n chart = chart.gcf()\n\n if is_matplotlib_figure(chart):\n try:\n chart = _matplotlib_to_plotly(chart)\n return _export_plotly_figure(chart)\n except ImportError:\n print(\"Plotly not installed. Logging plot as an image.\")\n return _image_content_to_html(_get_figure_image_data(chart))\n except UserWarning:\n print(\"Couldn't convert Matplotlib plot to interactive Plotly plot. Logging plot as an image instead.\")\n return _image_content_to_html(_get_figure_image_data(chart))\n\n elif is_pandas_dataframe(chart):\n return _export_pandas_dataframe_to_html(chart)\n\n elif is_plotly_figure(chart):\n return _export_plotly_figure(chart)\n\n elif is_altair_chart(chart):\n return _export_altair_chart(chart)\n\n elif is_bokeh_figure(chart):\n return _export_bokeh_figure(chart)\n\n else:\n raise ValueError(\"Currently supported are matplotlib, plotly, altair, and bokeh figures\")\n\n\ndef _matplotlib_to_plotly(chart):\n # pylint: disable=import-outside-toplevel\n import plotly\n import matplotlib\n\n # When Plotly cannot accurately convert a matplotlib plot, it emits a warning.\n # Then we want to fallback on logging the plot as an image.\n #\n # E.g. when trying to convert a Seaborn confusion matrix or a hist2d, it emits a UserWarning with message\n # \"Dang! That path collection is out of this world. I totally don't know what to do with it yet!\n # Plotly can only import path collections linked to 'data' coordinates\"\n plotly_version = plotly.__version__\n matplotlib_version = matplotlib.__version__\n if version.parse(matplotlib_version) >= version.parse(\"3.3.0\"):\n raise PlotlyIncompatibilityException(matplotlib_version, plotly_version)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"error\",\n category=UserWarning,\n message=\".*Plotly can only import path collections linked to 'data' coordinates.*\")\n chart = plotly.tools.mpl_to_plotly(chart)\n\n return chart\n\n\ndef _image_content_to_html(content: bytes) -> str:\n str_equivalent_image = base64.b64encode(content).decode()\n return \"<img src='data:image/png;base64,\" + str_equivalent_image + \"'/>\"\n\n\ndef _get_numpy_as_image(array):\n array *= 255\n shape = array.shape\n if len(shape) == 2:\n return _get_pil_image_data(pilimage_fromarray(array.astype(numpy_uint8)))\n if len(shape) == 3:\n if shape[2] == 1:\n array2d = numpy_array([[col[0] for col in row] for row in array])\n return _get_pil_image_data(pilimage_fromarray(array2d.astype(numpy_uint8)))\n if shape[2] in (3, 4):\n return _get_pil_image_data(pilimage_fromarray(array.astype(numpy_uint8)))\n raise ValueError(\"Incorrect size of numpy.ndarray. Should be 2-dimensional or\"\n \"3-dimensional with 3rd dimension of size 1, 3 or 4.\")\n\n\ndef _get_pil_image_data(image: PILImage) -> bytes:\n with io.BytesIO() as image_buffer:\n image.save(image_buffer, format='PNG')\n return image_buffer.getvalue()\n\n\ndef _get_figure_image_data(figure) -> bytes:\n with io.BytesIO() as image_buffer:\n figure.savefig(image_buffer, format='png', bbox_inches=\"tight\")\n return image_buffer.getvalue()\n\n\ndef _is_torch_tensor(image):\n return image.__class__.__module__.startswith('torch')\\\n and image.__class__.__name__ == 'Tensor'\\\n and hasattr(image, \"numpy\")\n\n\ndef _is_tensorflow_tensor(image):\n return image.__class__.__module__.startswith('tensorflow.')\\\n and 'Tensor' in image.__class__.__name__\\\n and hasattr(image, \"numpy\")\n\n\ndef _is_matplotlib_pyplot(chart):\n return chart.__class__.__module__.startswith('matplotlib.pyplot')\n\n\ndef is_numpy_array(image) -> bool:\n return numpy_ndarray is not None and isinstance(image, numpy_ndarray)\n\n\ndef is_pil_image(image) -> bool:\n return PILImage is not None and isinstance(image, PILImage)\n\n\ndef is_matplotlib_figure(image):\n return image.__class__.__module__.startswith('matplotlib.') and image.__class__.__name__ == 'Figure'\n\n\ndef is_plotly_figure(chart):\n return chart.__class__.__module__.startswith('plotly.') and chart.__class__.__name__ == 'Figure'\n\n\ndef is_altair_chart(chart):\n return chart.__class__.__module__.startswith('altair.') and 'Chart' in chart.__class__.__name__\n\n\ndef is_bokeh_figure(chart):\n return chart.__class__.__module__.startswith('bokeh.') and chart.__class__.__name__ == 'Figure'\n\n\ndef is_pandas_dataframe(table):\n return isinstance(table, DataFrame)\n\n\ndef _export_pandas_dataframe_to_html(table):\n buffer = StringIO(table.to_html())\n buffer.seek(0)\n return buffer.getvalue()\n\n\ndef _export_plotly_figure(image):\n buffer = StringIO()\n image.write_html(buffer)\n buffer.seek(0)\n return buffer.getvalue()\n\n\ndef _export_altair_chart(chart):\n buffer = StringIO()\n chart.save(buffer, format='html')\n buffer.seek(0)\n return buffer.getvalue()\n\n\ndef _export_bokeh_figure(chart):\n from bokeh.resources import CDN\n from bokeh.embed import file_html\n\n html = file_html(chart, CDN)\n buffer = StringIO(html)\n buffer.seek(0)\n return buffer.getvalue()\n\n\ndef _export_pickle(obj):\n buffer = BytesIO()\n pickle.dump(obj, buffer)\n buffer.seek(0)\n return buffer.getvalue()\n" ]
[ [ "numpy.array" ] ]
Supeking/darts_sk
[ "d87b5624d2f6795e842af74664aa42d85a41fa0b" ]
[ "cnn/architect.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\nclass Architect(object):\n\n def __init__(self, model, args):\n self.network_momentum = args.momentum\n self.network_weight_decay = args.weight_decay\n self.model = model\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(),\n lr=args.arch_learning_rate, betas=(0.5, 0.999),\n weight_decay=args.arch_weight_decay)\n\n def _compute_unrolled_model(self, input, target, eta, network_optimizer):\n loss = self.model._loss(input, target)\n theta = _concat(self.model.parameters()).data\n try:\n moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(\n self.network_momentum)\n except:\n moment = torch.zeros_like(theta)\n dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay * theta\n unrolled_model = self._construct_model_from_theta(theta.sub(eta*(moment + dtheta)))\n return unrolled_model\n\n def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):\n self.optimizer.zero_grad()\n if unrolled:\n self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer)\n else:\n self._backward_step(input_valid, target_valid)\n self.optimizer.step()\n\n def _backward_step(self, input_valid, target_valid):\n loss = self.model._loss(input_valid, target_valid)\n loss.backward()\n\n def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):\n unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer)\n unrolled_loss = unrolled_model._loss(input_valid, target_valid)\n\n unrolled_loss.backward()\n dalpha = [v.grad for v in unrolled_model.arch_parameters()]\n vector = [v.grad.data for v in unrolled_model.parameters()]\n implicit_grads = self._hessian_vector_product(vector, input_train, target_train)\n\n for g, ig in zip(dalpha, implicit_grads):\n g.data.sub_(eta*ig.data)\n\n for v, g in zip(self.model.arch_parameters(), dalpha):\n if v.grad is None:\n v.grad = Variable(g.data)\n else:\n v.grad.data.copy_(g.data)\n\n def _construct_model_from_theta(self, theta):\n model_new = self.model.new()\n model_dict = self.model.state_dict()\n\n params, offset = {}, 0\n for k, v in self.model.named_parameters():\n v_length = np.prod(v.size())\n params[k] = theta[offset: offset + v_length].view(v.size())\n offset += v_length\n\n assert offset == len(theta)\n model_dict.update(params)\n model_new.load_state_dict(model_dict)\n return model_new.cuda()\n\n def _hessian_vector_product(self, vector, input, target, r=1e-2):\n R = r / _concat(vector).norm()\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R*v)\n loss = self.model._loss(input, target)\n grads_p = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.sub_(2 * R*v)\n loss = self.model._loss(input, target)\n grads_n = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R*v)\n\n return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]\n" ]
[ [ "torch.zeros_like", "torch.autograd.Variable" ] ]
NSaal/seiresnet
[ "2633f780c2e7bd66b722a1727227a6e4e8f6529d" ]
[ "models/resstage.py" ]
[ "import torch\nimport torch.nn as nn\nimport os\nfrom div.download_from_url import download_from_url\n\ntry:\n from torch.hub import _get_torch_home\n torch_cache_home = _get_torch_home()\nexcept ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv('TORCH_HOME', os.path.join(\n os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))\ndefault_cache_path = os.path.join(torch_cache_home, 'pretrained')\n\n__all__ = ['ResStage', 'resstage18', 'resstage34', 'resstage50', 'resstage101',\n 'resstage152', 'resstage200']\n\n\nmodel_urls = {\n 'resstage18': 'Trained model not available yet!!',\n 'resstage34': 'Trained model not available yet!!',\n 'resstage50': 'https://drive.google.com/uc?export=download&id=1r2GvTm50xF6euU4Z6A_MYoMkiKbYRO3a',\n 'resstage101': 'https://drive.google.com/uc?export=download&id=16qGLSElXet4ByQfG3a-zO9fNxGofRsLt',\n 'resstage152': 'https://drive.google.com/uc?export=download&id=1m798qbvw8g-rW4aORIV9JY8c1JGzoVCI',\n 'resstage200': 'https://drive.google.com/uc?export=download&id=16ZYVIkMfycnSjof4BzP312Gk3XNw1xwg',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None,\n start_block=False, end_block=False, exclude_bn0=False):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n if not start_block and not exclude_bn0:\n self.bn0 = norm_layer(inplanes)\n\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n\n if start_block:\n self.bn2 = norm_layer(planes)\n\n if end_block:\n self.bn2 = norm_layer(planes)\n\n self.downsample = downsample\n self.stride = stride\n\n self.start_block = start_block\n self.end_block = end_block\n self.exclude_bn0 = exclude_bn0\n\n def forward(self, x):\n identity = x\n\n if self.start_block:\n out = self.conv1(x)\n elif self.exclude_bn0:\n out = self.relu(x)\n out = self.conv1(out)\n else:\n out = self.bn0(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n\n if self.start_block:\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n if self.end_block:\n out = self.bn2(out)\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None,\n start_block=False, end_block=False, exclude_bn0=False):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n\n if not start_block and not exclude_bn0:\n self.bn0 = norm_layer(inplanes)\n\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = norm_layer(planes)\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = norm_layer(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n\n if start_block:\n self.bn3 = norm_layer(planes * self.expansion)\n\n if end_block:\n self.bn3 = norm_layer(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n self.start_block = start_block\n self.end_block = end_block\n self.exclude_bn0 = exclude_bn0\n\n def forward(self, x):\n identity = x\n\n if self.start_block:\n out = self.conv1(x)\n elif self.exclude_bn0:\n out = self.relu(x)\n out = self.conv1(out)\n else:\n out = self.bn0(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n\n if self.start_block:\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n if self.end_block:\n out = self.bn3(out)\n out = self.relu(out)\n\n return out\n\n\nclass ResStage(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):\n super(ResStage, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, norm_layer,\n start_block=True))\n self.inplanes = planes * block.expansion\n exclude_bn0 = True\n for _ in range(1, (blocks-1)):\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer,\n exclude_bn0=exclude_bn0))\n exclude_bn0 = False\n\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer, end_block=True, exclude_bn0=exclude_bn0))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resstage18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage18'],\n root=default_cache_path)))\n return model\n\n\ndef resstage34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage34'],\n root=default_cache_path)))\n return model\n\n\ndef resstage50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage50'],\n root=default_cache_path)))\n return model\n\n\ndef resstage101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage101'],\n root=default_cache_path)))\n return model\n\n\ndef resstage152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage152'],\n root=default_cache_path)))\n return model\n\n\ndef resstage200(pretrained=False, **kwargs):\n \"\"\"Constructs a ResStage-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResStage(Bottleneck, [3, 24, 36, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(torch.load(download_from_url(model_urls['resstage200'],\n root=default_cache_path)))\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.hub._get_torch_home", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
HeyLucasLeao/HeyLucasLeao.github.io
[ "5c5da105763bbaeb3391ca716a09e87490b6eb1f" ]
[ "raspagem_dos_boletins_diarios/updating.py" ]
[ "from shutil import Error\nimport pandas as pd\nimport urllib.request\nfrom os import listdir\nfrom datetime import datetime\nimport csv\nimport numpy as np\nfrom tabula import read_pdf\nimport requests\nfrom datetime import timedelta\nfrom time import sleep\n\nPATH_PDF = r'../raspagem_dos_boletins_diarios/relatorios'\nPATH_CSV = r'../raspagem_dos_boletins_diarios/raw_csvs'\n\ndef atualizar_csvs():\n \n data_csv = pd.to_datetime(data, format=\"%d_%m_%y_\")\n data_csv = str(data_csv)[2:10]\n \n def change_rows(x):\n dici_csv = {'REDE PÚBLICA': 'Rede Publica',\n 'Cardíaco': 'Cardiaco',\n 'REDE PRIVADA': 'Rede Privada',\n 'TOTAL': 'Total'}\n if x in dici_csv.keys():\n return dici_csv[x]\n return x\n \n def download_file(url):\n response = urllib.request.urlopen(url)\n data_download = pd.to_datetime(data, format=\"%d_%m_%y_\")\n data_download = str(data_download)[2:10]\n \n for files in listdir(PATH_PDF):\n if data_download in files[files.index('_') + 1:files.index('.')]:\n return\n else:\n continue\n path = r'C:\\Users\\heylu\\Documents\\github\\HeyLucasLeao.github.io\\raspagem_dos_boletins_diarios\\relatorios'\n\n with open(path + '\\\\' + f'relatorio_{data_download}.pdf', mode='wb') as file:\n file.write(response.read())\n \n for files in listdir(PATH_PDF):\n if data_csv in files[files.index('_') + 1:files.index('.')]:\n return\n else:\n continue\n \n download_file(link)\n \n atualizacao_de_csvs = taxa_de_ocupacao.copy()\n atualizacao_de_csvs['unidade'] = atualizacao_de_csvs['unidade'].apply(change_rows) \n \n atualizacao_de_csvs = atualizacao_de_csvs.T\n\n atualizacao_de_csvs.insert(loc=0, \n column='Data', \n value=data_csv)\n\n \n for files in listdir(PATH_CSV):\n with open(PATH_CSV + '\\\\' + files, 'a+', newline='') as f:\n writer = csv.writer(f)\n dados = np.array(atualizacao_de_csvs.loc[[files[:files.index('.')]]]).ravel()\n writer.writerow(dados)\n \n for file_name in listdir(PATH_CSV):\n df = pd.read_csv(PATH_CSV + \"\\\\\" + file_name, index_col='Data')\n for col in df.columns:\n for i in range(len(df[col])):\n if isinstance(df[col].iloc[i], str):\n df[col].iloc[i] = df[col].iloc[i][:-1]\n df[col].iloc[i] = df[col].iloc[i].replace(',', '.')\n df[col].iloc[i] = float(df[col].iloc[i])\n df[col].iloc[i] = round(df[col].iloc[i] / 100, 2)\n df[col].iloc[i] = \"{:.2f}\".format(df[col].iloc[i])\n df.to_csv(r'../raspagem_dos_boletins_diarios/normalized_csvs' + \"/\"+ file_name)\n\ndata = str(datetime.now())[2:10]\ndata = data.split('-')\ndata.reverse()\ndata = [x + \"_\" for x in data]\ndata = \"\".join(data)\ndata_url_acento = data\ndata_url_sem_acento = data\nAREA = [265.022,\n413.384,\n373.253,\n829.774]\n\nwhile True:\n try:\n link = f'https://www.fvs.am.gov.br/media/publicacao/{data_url_acento}BOLETIM_DI%C3%81RIO_DE_CASOS_COVID-19.pdf'\n response = requests.get(link)\n response.raise_for_status()\n break\n except requests.HTTPError:\n data_url_acento = pd.to_datetime(data_url_acento, format=\"%d_%m_%y_\")\n data_url_acento = data_url_acento - timedelta(1)\n data_url_acento = str(data_url_acento)[2:10]\n data_url_acento = data_url_acento.split('-')\n data_url_acento.reverse()\n data_url_acento = [x + \"_\" for x in data_url_acento]\n data_url_acento = \"\".join(data_url_acento)\n continue\n\nwhile True:\n try:\n link = f'https://www.fvs.am.gov.br/media/publicacao/{data_url_sem_acento}BOLETIM_DIARIO_DE_CASOS_COVID-19.pdf'\n response = requests.get(link)\n response.raise_for_status()\n break\n except requests.HTTPError:\n data_url_sem_acento = pd.to_datetime(data_url_sem_acento, format=\"%d_%m_%y_\")\n data_url_sem_acento = data_url_sem_acento - timedelta(1)\n data_url_sem_acento = str(data_url_sem_acento)[2:10]\n data_url_sem_acento = data_url_sem_acento.split('-')\n data_url_sem_acento.reverse()\n data_url_sem_acento = [x + \"_\" for x in data_url_sem_acento]\n data_url_sem_acento = \"\".join(data_url_sem_acento)\n continue\n \nif pd.to_datetime(data_url_acento, format=\"%d_%m_%y_\") > pd.to_datetime(data_url_sem_acento, format=\"%d_%m_%y_\"):\n taxa_de_ocupacao = read_pdf(f'https://www.fvs.am.gov.br/media/publicacao/{data_url_acento}BOLETIM_DI%C3%81RIO_DE_CASOS_COVID-19.pdf', pages=2, area=AREA, stream=True)[0]\n link = f'https://www.fvs.am.gov.br/media/publicacao/{data_url_acento}BOLETIM_DI%C3%81RIO_DE_CASOS_COVID-19.pdf'\n data = data_url_acento\nelse:\n taxa_de_ocupacao = read_pdf(f'https://www.fvs.am.gov.br/media/publicacao/{data_url_sem_acento}BOLETIM_DIARIO_DE_CASOS_COVID-19.pdf', pages=2, area=AREA, stream=True)[0]\n link = r'http://www.fvs.am.gov.br/media/publicacao/{data_url_sem_acento}BOLETIM_DIARIO_DE_CASOS_COVID-19.pdf'\n data = data_url_sem_acento\n\ntaxa_de_ocupacao.drop(index=[0, 1, 8],columns=['Unnamed: 5'], inplace=True)\n\ntaxa_de_ocupacao.rename(columns={'Unnamed: 0': 'unidade',\n 'Unnamed: 1': 'uti_geral',\n 'Unnamed: 2': 'uti_covid-19',\n 'Unnamed: 3': 'leitos_clinicos_geral',\n 'TAXA DE OCUPAÇÃO EM MANAUS': 'leitos_clinicos_covid-19',\n 'Unnamed: 4': 'sala_vermelha_geral',\n 'Unnamed: 6': 'sala_vermelha_covid-19'}, inplace=True)\n\n#taxa_de_ocupacao['uti_geral'] = [x.split()[-1] for x in taxa_de_ocupacao['unidade']]\n#taxa_de_ocupacao['unidade'] = [\" \".join(x.split()[:-1]) for x in taxa_de_ocupacao['unidade']]\ntaxa_de_ocupacao['uti_covid-19'] = taxa_de_ocupacao['leitos_clinicos_geral']\ntaxa_de_ocupacao['leitos_clinicos_geral'] = [x.split()[:-1][0] for x in taxa_de_ocupacao['leitos_clinicos_covid-19']]\ntaxa_de_ocupacao['leitos_clinicos_covid-19'] = [x.split()[-1] for x in taxa_de_ocupacao['leitos_clinicos_covid-19']]\n\nif len(taxa_de_ocupacao.columns) == 7 and taxa_de_ocupacao.isnull().sum().sum() == 0:\n atualizar_csvs()\n print('CSVs atualizados.')\nelse:\n print('Taxa não extraída corretamente, favor verificar.')\n sleep(10)\n raise Error\n " ]
[ [ "pandas.to_datetime", "pandas.read_csv" ] ]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
[ "ef9f8a77f096acbdeb941014791f8eda1c1bc35b" ]
[ "tools/SeeDot/seedot/compiler/converter/test.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\nimport math\nimport numpy as np\n\n'''\nTest codes for the converter.\n'''\n\ndef getScale(maxabs: float, bits):\n return int(np.ceil(np.log2(maxabs) - np.log2((1 << (bits - 2)) - 1)))\n\ndef printUspsRNN():\n ite = 16\n print(\"let a0 = (XX[0] * W) in\")\n print(\"let c0 = a0 in\")\n print(\"let g0 = sigmoid(c0 + Bg) in\")\n print(\"let h0 = tanh(c0 + Bh) in\")\n print(\"let H0 = (zeta * (1.0 - g0) + nu) <*> h0 in \")\n print(\"\")\n\n for i in range(1, ite):\n print(\"let a%d = (XX[%d] * W) in\" % (i, i))\n print(\"let b%d = (H%d * U) in\" % (i, i-1))\n print(\"let c%d = a%d + b%d in\" % (i, i, i))\n print(\"let g%d = sigmoid(c%d + Bg) in\" % (i, i))\n print(\"let h%d = tanh(c%d + Bh) in\" % (i, i))\n print(\"let H%d = (g%d <*> H%d) + (zeta * (1.0 - g%d) + nu) <*> h%d in \" %\n (i, i, i-1, i, i))\n print(\"\\n\")\n\n print(\"let score = (H%d * FC) + FCbias in\" % (ite-1))\n print(\"argmax(score)\")\n\ndef printDsaRNN():\n ite = 125\n print(\"let a0 = (XX[0] * W1) * W2 in\")\n print(\"let c0 = a0 in\")\n print(\"let g0 = sigmoid(c0 + Bg) in\")\n print(\"let h0 = tanh(c0 + Bh) in\")\n print(\"let H0 = (zeta * (1.0 - g0) + nu) <*> h0 in \")\n print(\"\")\n\n for i in range(1, ite):\n print(\"let a%d = (XX[%d] * W1) * W2 in\" % (i, i))\n print(\"let b%d = (H%d * U1) * U2 in\" % (i, i-1))\n print(\"let c%d = a%d + b%d in\" % (i, i, i))\n print(\"let g%d = sigmoid(c%d + Bg) in\" % (i, i))\n print(\"let h%d = tanh(c%d + Bh) in\" % (i, i))\n print(\"let H%d = (g%d <*> H%d) + (zeta * (1.0 - g%d) + nu) <*> h%d in \" %\n (i, i, i-1, i, i))\n print(\"\\n\")\n\n print(\"let score = (H%d * FC) + FCbias in\" % (ite-1))\n print(\"argmax(score)\")\n\ndef printSpectakomRNN():\n ite = 7\n print(\"let a0 = (XX[0] * W1) * W2 in\")\n print(\"let c0 = a0 in\")\n print(\"let g0 = sigmoid(c0 + Bg) in\")\n print(\"let h0 = tanh(c0 + Bh) in\")\n print(\"let H0 = (zeta * (1.0 - g0) + nu) <*> h0 in \")\n print(\"\")\n\n for i in range(1, ite):\n print(\"let a%d = (XX[%d] * W1) * W2 in\" % (i, i))\n print(\"let b%d = (H%d * U1) * U2 in\" % (i, i-1))\n print(\"let c%d = a%d + b%d in\" % (i, i, i))\n print(\"let g%d = sigmoid(c%d + Bg) in\" % (i, i))\n print(\"let h%d = tanh(c%d + Bh) in\" % (i, i))\n print(\"let H%d = (g%d <*> H%d) + (zeta * (1.0 - g%d) + nu) <*> h%d in \" %\n (i, i, i-1, i, i))\n print(\"\\n\")\n\n print(\"let score = ((H%d * FC1) * FC2) + FCBias in\" % (ite-1))\n print(\"argmax(score)\")\n\ndef treeSum(tmp, length, height_shr, height_noshr):\n count = length\n depth = 0\n shr = True\n\n while depth < (height_shr + height_noshr):\n if depth >= height_shr:\n shr = False\n\n for p in range(int(length / 2) + 1):\n if p < (count >> 1):\n sum = tmp[2 * p] + tmp[(2 * p) + 1]\n elif (p == (count >> 1)) and ((count & 1) == 1):\n sum = tmp[2 * p]\n else:\n sum = 0\n\n if shr:\n tmp[p] = sum / 2\n else:\n tmp[p] = sum\n\n count = (count + 1) >> 1\n depth += 1\n print(tmp)\n\n return tmp[0]\n\ndef treeSumNew(tmp, count, height_shr, height_noshr):\n if count == 1:\n return tmp[0]\n\n shr = True\n\n for depth in range(height_shr + height_noshr):\n if depth >= height_shr:\n shr = False\n\n for p in range(count // 2):\n sum = tmp[2 * p] + tmp[(2 * p) + 1]\n\n if shr:\n tmp[p] = sum / 2\n else:\n tmp[p] = sum\n\n if count % 2 == 1:\n index = count // 2 + 1\n if shr:\n tmp[index - 1] = tmp[count - 1] / 2\n else:\n tmp[index - 1] = tmp[count - 1]\n\n tmp[index - 1 + 1] = 0\n else:\n tmp[count // 2] = 0\n\n count = (count + 1) >> 1\n print(tmp)\n\n return tmp[0]\n\n# printUspsRNN()\n# printDsaRNN()\n# printSpectakomRNN()\n\ndef treeSumTest():\n #tmp = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]\n tmp = [0.1]\n tmpNew = list(tmp)\n\n sum = treeSum(tmp, 1, 1, 0)\n print(sum)\n\n print('\\n\\n')\n\n sum = treeSumNew(tmpNew, 1, 0, 1)\n print(sum)\n\ndef computeScale(val, bits):\n l = np.log2(val)\n if int(l) == l:\n c = l + 1\n else:\n c = np.ceil(l)\n\n return -int((bits - 1) - c)\n\ndef test(n, bits):\n print(np.log2(n))\n s1 = computeScale(n, bits)\n s2 = getScale(n, bits)\n v1 = int(np.ldexp(n, -s1))\n v2 = int(np.ldexp(n, -s2))\n print(\"%f scale = %d int = %d\" % (n, s1, v1))\n print(\"%f scale = %d int = %d\" % (n, s2, v2))\n\ndef getShrForMul(scale_A, scale_B):\n bits = 16\n MAX_SCALE = -9\n\n shr1, shr2 = bits // 2, (bits // 2) - 1\n pRes = (scale_A + shr1) + (scale_B + shr2)\n\n if pRes <= MAX_SCALE:\n if scale_A <= scale_B:\n shrA, shrB = shr1, shr2\n else:\n shrA, shrB = shr2, shr1\n\n return [shrA, shrB]\n else:\n save = abs(abs(pRes) - abs(MAX_SCALE))\n if save % 2 == 1:\n shr1 -= 1\n save -= 1\n\n save = save // 2\n\n if scale_A <= scale_B:\n shrA = max(shr1 - save, 0)\n shrB = max(shr2 - save, 0)\n else:\n shrA = max(shr2 - save, 0)\n shrB = max(shr1 - save, 0)\n\n return [shrA, shrB]\n\nx = getShrForMul(-12, -12)\nprint(x)\n" ]
[ [ "numpy.ceil", "numpy.log2", "numpy.ldexp" ] ]
ChaseKnowlden/airflow
[ "6b71eac1997a7c0db3b8e3aed6b4e65d01871440" ]
[ "airflow/providers/presto/hooks/presto.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nfrom typing import Any, Iterable, Optional\n\nimport prestodb\nfrom prestodb.exceptions import DatabaseError\nfrom prestodb.transaction import IsolationLevel\n\nfrom airflow import AirflowException\nfrom airflow.configuration import conf\nfrom airflow.hooks.dbapi import DbApiHook\nfrom airflow.models import Connection\n\n\nclass PrestoException(Exception):\n \"\"\"Presto exception\"\"\"\n\n\ndef _boolify(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, str):\n if value.lower() == 'false':\n return False\n elif value.lower() == 'true':\n return True\n return value\n\n\nclass PrestoHook(DbApiHook):\n \"\"\"\n Interact with Presto through prestodb.\n\n >>> ph = PrestoHook()\n >>> sql = \"SELECT count(1) AS num FROM airflow.static_babynames\"\n >>> ph.get_records(sql)\n [[340698]]\n \"\"\"\n\n conn_name_attr = 'presto_conn_id'\n default_conn_name = 'presto_default'\n conn_type = 'presto'\n hook_name = 'Presto'\n\n def get_conn(self) -> Connection:\n \"\"\"Returns a connection object\"\"\"\n db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]\n extra = db.extra_dejson\n auth = None\n if db.password and extra.get('auth') == 'kerberos':\n raise AirflowException(\"Kerberos authorization doesn't support password.\")\n elif db.password:\n auth = prestodb.auth.BasicAuthentication(db.login, db.password)\n elif extra.get('auth') == 'kerberos':\n auth = prestodb.auth.KerberosAuthentication(\n config=extra.get('kerberos__config', os.environ.get('KRB5_CONFIG')),\n service_name=extra.get('kerberos__service_name'),\n mutual_authentication=_boolify(extra.get('kerberos__mutual_authentication', False)),\n force_preemptive=_boolify(extra.get('kerberos__force_preemptive', False)),\n hostname_override=extra.get('kerberos__hostname_override'),\n sanitize_mutual_error_response=_boolify(\n extra.get('kerberos__sanitize_mutual_error_response', True)\n ),\n principal=extra.get('kerberos__principal', conf.get('kerberos', 'principal')),\n delegate=_boolify(extra.get('kerberos__delegate', False)),\n ca_bundle=extra.get('kerberos__ca_bundle'),\n )\n\n presto_conn = prestodb.dbapi.connect(\n host=db.host,\n port=db.port,\n user=db.login,\n source=db.extra_dejson.get('source', 'airflow'),\n http_scheme=db.extra_dejson.get('protocol', 'http'),\n catalog=db.extra_dejson.get('catalog', 'hive'),\n schema=db.schema,\n auth=auth,\n isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]\n )\n if extra.get('verify') is not None:\n # Unfortunately verify parameter is available via public API.\n # The PR is merged in the presto library, but has not been released.\n # See: https://github.com/prestosql/presto-python-client/pull/31\n presto_conn._http_session.verify = _boolify(extra['verify'])\n\n return presto_conn\n\n def get_isolation_level(self) -> Any:\n \"\"\"Returns an isolation level\"\"\"\n db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]\n isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()\n return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)\n\n @staticmethod\n def _strip_sql(sql: str) -> str:\n return sql.strip().rstrip(';')\n\n def get_records(self, hql, parameters: Optional[dict] = None):\n \"\"\"Get a set of records from Presto\"\"\"\n try:\n return super().get_records(self._strip_sql(hql), parameters)\n except DatabaseError as e:\n raise PrestoException(e)\n\n def get_first(self, hql: str, parameters: Optional[dict] = None) -> Any:\n \"\"\"Returns only the first row, regardless of how many rows the query returns.\"\"\"\n try:\n return super().get_first(self._strip_sql(hql), parameters)\n except DatabaseError as e:\n raise PrestoException(e)\n\n def get_pandas_df(self, hql, parameters=None, **kwargs):\n \"\"\"Get a pandas dataframe from a sql query.\"\"\"\n import pandas\n\n cursor = self.get_cursor()\n try:\n cursor.execute(self._strip_sql(hql), parameters)\n data = cursor.fetchall()\n except DatabaseError as e:\n raise PrestoException(e)\n column_descriptions = cursor.description\n if data:\n df = pandas.DataFrame(data, **kwargs)\n df.columns = [c[0] for c in column_descriptions]\n else:\n df = pandas.DataFrame(**kwargs)\n return df\n\n def run(\n self,\n hql,\n autocommit: bool = False,\n parameters: Optional[dict] = None,\n ) -> None:\n \"\"\"Execute the statement against Presto. Can be used to create views.\"\"\"\n return super().run(sql=self._strip_sql(hql), parameters=parameters)\n\n def insert_rows(\n self,\n table: str,\n rows: Iterable[tuple],\n target_fields: Optional[Iterable[str]] = None,\n commit_every: int = 0,\n replace: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"\n A generic way to insert a set of tuples into a table.\n\n :param table: Name of the target table\n :type table: str\n :param rows: The rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: The names of the columns to fill in the table\n :type target_fields: iterable of strings\n :param commit_every: The maximum number of rows to insert in one\n transaction. Set to 0 to insert all rows in one transaction.\n :type commit_every: int\n :param replace: Whether to replace instead of insert\n :type replace: bool\n \"\"\"\n if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:\n self.log.info(\n 'Transactions are not enable in presto connection. '\n 'Please use the isolation_level property to enable it. '\n 'Falling back to insert all rows in one transaction.'\n )\n commit_every = 0\n\n super().insert_rows(table, rows, target_fields, commit_every)\n" ]
[ [ "pandas.DataFrame" ] ]
MikyasDesta/NeMo
[ "4995477e6ce49de55b123723e42021c9eff8e2c0" ]
[ "nemo/collections/tts/modules/radtts.py" ]
[ "# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n###############################################################################\nimport torch\nfrom torch import nn\n\nfrom nemo.collections.tts.helpers.common import (\n AffineTransformationLayer,\n ConvAttention,\n Encoder,\n ExponentialClass,\n Invertible1x1Conv,\n Invertible1x1ConvLUS,\n LengthRegulator,\n LinearNorm,\n get_mask_from_lengths,\n)\nfrom nemo.collections.tts.helpers.helpers import mas_width1 as mas\nfrom nemo.collections.tts.modules.attribute_prediction_model import get_attribute_prediction_model\n\n\nclass FlowStep(nn.Module):\n def __init__(\n self,\n n_mel_channels,\n n_context_dim,\n n_layers,\n affine_model='simple_conv',\n scaling_fn='exp',\n matrix_decomposition='',\n affine_activation='softplus',\n use_partial_padding=False,\n ):\n super(FlowStep, self).__init__()\n if matrix_decomposition == 'LUS':\n self.invtbl_conv = Invertible1x1ConvLUS(n_mel_channels)\n else:\n self.invtbl_conv = Invertible1x1Conv(n_mel_channels)\n\n self.affine_tfn = AffineTransformationLayer(\n n_mel_channels,\n n_context_dim,\n n_layers,\n affine_model=affine_model,\n scaling_fn=scaling_fn,\n affine_activation=affine_activation,\n use_partial_padding=use_partial_padding,\n )\n\n def forward(self, z, context, inverse=False, seq_lens=None):\n if inverse: # for inference z-> mel\n z = self.affine_tfn(z, context, inverse, seq_lens=seq_lens)\n z = self.invtbl_conv(z, inverse)\n return z\n else: # training mel->z\n z, log_det_W = self.invtbl_conv(z)\n z, log_s = self.affine_tfn(z, context, seq_lens=seq_lens)\n return z, log_det_W, log_s\n\n\nclass RadTTSModule(torch.nn.Module):\n def __init__(\n self,\n n_speakers,\n n_speaker_dim,\n n_text,\n n_text_dim,\n n_flows,\n n_conv_layers_per_step,\n n_mel_channels,\n n_hidden,\n mel_encoder_n_hidden,\n dummy_speaker_embedding,\n n_early_size,\n n_early_every,\n n_group_size,\n affine_model,\n dur_model_config,\n f0_model_config,\n energy_model_config,\n v_model_config=None,\n include_modules='dec',\n scaling_fn='exp',\n matrix_decomposition='',\n learn_alignments=False,\n affine_activation='softplus',\n attn_use_CTC=True,\n use_context_lstm=False,\n context_lstm_norm=None,\n text_encoder_lstm_norm=None,\n n_f0_dims=0,\n n_energy_avg_dims=0,\n context_lstm_w_f0_and_energy=True,\n use_first_order_features=False,\n unvoiced_bias_activation='',\n ap_pred_log_f0=False,\n **kwargs\n ):\n super(RadTTSModule, self).__init__()\n assert n_early_size % 2 == 0\n self.n_mel_channels = n_mel_channels\n self.n_f0_dims = n_f0_dims # >= 1 to trains with f0\n self.n_energy_avg_dims = n_energy_avg_dims # >= 1 trains with energy\n self.decoder_use_partial_padding = kwargs['decoder_use_partial_padding']\n self.n_speaker_dim = n_speaker_dim\n assert self.n_speaker_dim % 2 == 0\n self.speaker_embedding = torch.nn.Embedding(n_speakers, self.n_speaker_dim)\n self.embedding = torch.nn.Embedding(n_text, n_text_dim)\n self.flows = torch.nn.ModuleList()\n self.encoder = Encoder(\n encoder_embedding_dim=n_text_dim, norm_fn=nn.InstanceNorm1d, lstm_norm_fn=text_encoder_lstm_norm\n )\n self.dummy_speaker_embedding = dummy_speaker_embedding\n self.learn_alignments = learn_alignments\n self.affine_activation = affine_activation\n self.include_modules = include_modules\n self.attn_use_CTC = bool(attn_use_CTC)\n self.use_context_lstm = bool(use_context_lstm)\n self.context_lstm_norm = context_lstm_norm\n self.context_lstm_w_f0_and_energy = context_lstm_w_f0_and_energy\n self.length_regulator = LengthRegulator()\n self.use_first_order_features = bool(use_first_order_features)\n self.decoder_use_unvoiced_bias = kwargs['decoder_use_unvoiced_bias']\n self.ap_pred_log_f0 = ap_pred_log_f0\n self.ap_use_unvoiced_bias = kwargs['ap_use_unvoiced_bias']\n if 'atn' in include_modules or 'dec' in include_modules:\n if self.learn_alignments:\n self.attention = ConvAttention(n_mel_channels, self.n_speaker_dim, n_text_dim)\n\n self.n_flows = n_flows\n self.n_group_size = n_group_size\n\n n_flowstep_cond_dims = self.n_speaker_dim + (n_text_dim + n_f0_dims + n_energy_avg_dims) * n_group_size\n\n if self.use_context_lstm:\n n_in_context_lstm = self.n_speaker_dim + n_text_dim * n_group_size\n n_context_lstm_hidden = int((self.n_speaker_dim + n_text_dim * n_group_size) / 2)\n\n if self.context_lstm_w_f0_and_energy:\n n_in_context_lstm = n_f0_dims + n_energy_avg_dims + n_text_dim\n n_in_context_lstm *= n_group_size\n n_in_context_lstm += self.n_speaker_dim\n\n n_context_hidden = n_f0_dims + n_energy_avg_dims + n_text_dim\n n_context_hidden = n_context_hidden * n_group_size / 2\n n_context_hidden = self.n_speaker_dim + n_context_hidden\n n_context_hidden = int(n_context_hidden)\n\n n_flowstep_cond_dims = self.n_speaker_dim + n_text_dim * n_group_size\n\n self.context_lstm = torch.nn.LSTM(\n input_size=n_in_context_lstm,\n hidden_size=n_context_lstm_hidden,\n num_layers=1,\n batch_first=True,\n bidirectional=True,\n )\n\n if context_lstm_norm is not None:\n if 'spectral' in context_lstm_norm:\n print(\"Applying spectral norm to context encoder LSTM\")\n lstm_norm_fn_pntr = torch.nn.utils.spectral_norm\n elif 'weight' in context_lstm_norm:\n print(\"Applying weight norm to context encoder LSTM\")\n lstm_norm_fn_pntr = torch.nn.utils.weight_norm\n\n self.context_lstm = lstm_norm_fn_pntr(self.context_lstm, 'weight_hh_l0')\n self.context_lstm = lstm_norm_fn_pntr(self.context_lstm, 'weight_hh_l0_reverse')\n\n if self.n_group_size > 1:\n self.unfold_params = {\n 'kernel_size': (n_group_size, 1),\n 'stride': n_group_size,\n 'padding': 0,\n 'dilation': 1,\n }\n self.unfold = nn.Unfold(**self.unfold_params)\n\n self.exit_steps = []\n self.n_early_size = n_early_size\n n_mel_channels = n_mel_channels * n_group_size\n\n for i in range(self.n_flows):\n if i > 0 and i % n_early_every == 0: # early exitting\n n_mel_channels -= self.n_early_size\n self.exit_steps.append(i)\n\n self.flows.append(\n FlowStep(\n n_mel_channels,\n n_flowstep_cond_dims,\n n_conv_layers_per_step,\n affine_model,\n scaling_fn,\n matrix_decomposition,\n affine_activation=affine_activation,\n use_partial_padding=self.decoder_use_partial_padding,\n )\n )\n\n if 'dpm' in include_modules:\n dur_model_config['hparams']['n_speaker_dim'] = n_speaker_dim\n self.dur_pred_layer = get_attribute_prediction_model(dur_model_config)\n\n self.use_unvoiced_bias = False\n self.use_vpred_module = False\n self.ap_use_voiced_embeddings = kwargs['ap_use_voiced_embeddings']\n\n if self.decoder_use_unvoiced_bias or self.ap_use_unvoiced_bias:\n assert unvoiced_bias_activation in {'relu', 'exp'}\n self.use_unvoiced_bias = True\n if unvoiced_bias_activation == 'relu':\n unvbias_nonlin = nn.ReLU()\n elif unvoiced_bias_activation == 'exp':\n unvbias_nonlin = ExponentialClass()\n else:\n exit(1) # we won't reach here anyway due to the assertion\n self.unvoiced_bias_module = nn.Sequential(LinearNorm(n_text_dim, 1), unvbias_nonlin)\n\n # all situations in which the vpred module is necessary\n if self.ap_use_voiced_embeddings or self.use_unvoiced_bias or 'vpred' in include_modules:\n self.use_vpred_module = True\n\n if self.use_vpred_module:\n v_model_config['hparams']['n_speaker_dim'] = n_speaker_dim\n self.v_pred_module = get_attribute_prediction_model(v_model_config)\n # 4 embeddings, first two are scales, second two are biases\n if self.ap_use_voiced_embeddings:\n self.v_embeddings = torch.nn.Embedding(4, n_text_dim)\n\n if 'apm' in include_modules:\n f0_model_config['hparams']['n_speaker_dim'] = n_speaker_dim\n energy_model_config['hparams']['n_speaker_dim'] = n_speaker_dim\n if self.use_first_order_features:\n f0_model_config['hparams']['n_in_dim'] = 2\n energy_model_config['hparams']['n_in_dim'] = 2\n if (\n 'spline_flow_params' in f0_model_config['hparams']\n and f0_model_config['hparams']['spline_flow_params'] is not None\n ):\n f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2\n if (\n 'spline_flow_params' in energy_model_config['hparams']\n and energy_model_config['hparams']['spline_flow_params'] is not None\n ):\n energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2\n else:\n if (\n 'spline_flow_params' in f0_model_config['hparams']\n and f0_model_config['hparams']['spline_flow_params'] is not None\n ):\n f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = f0_model_config['hparams'][\n 'n_in_dim'\n ]\n if (\n 'spline_flow_params' in energy_model_config['hparams']\n and energy_model_config['hparams']['spline_flow_params'] is not None\n ):\n energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = energy_model_config[\n 'hparams'\n ]['n_in_dim']\n\n self.f0_pred_module = get_attribute_prediction_model(f0_model_config)\n self.energy_pred_module = get_attribute_prediction_model(energy_model_config)\n\n def encode_speaker(self, spk_ids):\n spk_ids = spk_ids * 0 if self.dummy_speaker_embedding else spk_ids\n spk_vecs = self.speaker_embedding(spk_ids)\n return spk_vecs\n\n def encode_text(self, text, in_lens):\n # text_embeddings: b x len_text x n_text_dim\n text_embeddings = self.embedding(text).transpose(1, 2)\n # text_enc: b x n_text_dim x encoder_dim (512)\n if in_lens is None:\n text_enc = self.encoder.infer(text_embeddings).transpose(1, 2)\n else:\n text_enc = self.encoder(text_embeddings, in_lens).transpose(1, 2)\n\n return text_enc, text_embeddings\n\n def preprocess_context(self, context, speaker_vecs, out_lens=None, f0=None, energy_avg=None):\n\n if self.n_group_size > 1:\n context = self.unfold(context.unsqueeze(-1))\n # (todo): fix unfolding zero-padded values\n if f0 is not None:\n f0 = self.unfold(f0[:, None, :, None])\n if energy_avg is not None:\n energy_avg = self.unfold(energy_avg[:, None, :, None])\n speaker_vecs = speaker_vecs[..., None].expand(-1, -1, context.shape[2])\n context_w_spkvec = torch.cat((context, speaker_vecs), 1)\n\n if self.use_context_lstm:\n if self.context_lstm_w_f0_and_energy:\n if f0 is not None:\n context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)\n\n if energy_avg is not None:\n context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1)\n\n unfolded_out_lens = (out_lens // self.n_group_size).long().cpu()\n unfolded_out_lens_packed = nn.utils.rnn.pack_padded_sequence(\n context_w_spkvec.transpose(1, 2), unfolded_out_lens, batch_first=True, enforce_sorted=False\n )\n self.context_lstm.flatten_parameters()\n context_lstm_packed_output, _ = self.context_lstm(unfolded_out_lens_packed)\n context_lstm_padded_output, _ = nn.utils.rnn.pad_packed_sequence(\n context_lstm_packed_output, batch_first=True\n )\n context_w_spkvec = context_lstm_padded_output.transpose(1, 2)\n\n if not self.context_lstm_w_f0_and_energy:\n if f0 is not None:\n context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)\n\n if energy_avg is not None:\n context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1)\n\n return context_w_spkvec\n\n def fold(self, mel):\n \"\"\"Inverse of the self.unfold(mel.unsqueeze(-1)) operation used for the\n grouping or \"squeeze\" operation on input\n\n Args:\n mel: B x C x T tensor of temporal data\n \"\"\"\n mel = nn.functional.fold(mel, output_size=(mel.shape[2] * self.n_group_size, 1), **self.unfold_params).squeeze(\n -1\n )\n return mel\n\n def binarize_attention(self, attn, in_lens, out_lens):\n \"\"\"For training purposes only. Binarizes attention with MAS. These will\n no longer recieve a gradient\n Args:\n attn: B x 1 x max_mel_len x max_text_len\n \"\"\"\n b_size = attn.shape[0]\n with torch.no_grad():\n attn_cpu = attn.data.cpu().numpy()\n attn_out = torch.zeros_like(attn)\n for ind in range(b_size):\n hard_attn = mas(attn_cpu[ind, 0, : out_lens[ind], : in_lens[ind]])\n attn_out[ind, 0, : out_lens[ind], : in_lens[ind]] = torch.tensor(hard_attn, device=attn.get_device())\n return attn_out\n\n def get_first_order_features(self, feats, out_lens, dilation=1):\n \"\"\"\n feats: b x max_length\n out_lens: b-dim\n \"\"\"\n # add an extra column\n feats_extended_R = torch.cat((feats, torch.zeros_like(feats[:, 0:dilation])), dim=1)\n feats_extended_L = torch.cat((torch.zeros_like(feats[:, 0:dilation]), feats), dim=1)\n dfeats_R = feats_extended_R[:, dilation:] - feats\n dfeats_L = feats - feats_extended_L[:, 0:-dilation]\n\n return (dfeats_R + dfeats_L) * 0.5\n\n def apply_voice_mask_to_text(self, text_enc, voiced_mask):\n \"\"\"\n text_enc: b x C x N\n voiced_mask: b x N\n \"\"\"\n voiced_mask = voiced_mask.unsqueeze(1)\n voiced_embedding_s = self.v_embeddings.weight[0:1, :, None]\n unvoiced_embedding_s = self.v_embeddings.weight[1:2, :, None]\n voiced_embedding_b = self.v_embeddings.weight[2:3, :, None]\n unvoiced_embedding_b = self.v_embeddings.weight[3:4, :, None]\n scale = torch.sigmoid(voiced_embedding_s * voiced_mask + unvoiced_embedding_s * (1 - voiced_mask))\n bias = 0.1 * torch.tanh(voiced_embedding_b * voiced_mask + unvoiced_embedding_b * (1 - voiced_mask))\n return text_enc * scale + bias\n\n def forward(\n self,\n mel,\n speaker_ids,\n text,\n in_lens,\n out_lens,\n binarize_attention=False,\n attn_prior=None,\n f0=None,\n energy_avg=None,\n voiced_mask=None,\n p_voiced=None,\n ):\n speaker_vecs = self.encode_speaker(speaker_ids)\n text_enc, text_embeddings = self.encode_text(text, in_lens)\n\n log_s_list, log_det_W_list, z_mel = [], [], []\n attn = None\n attn_soft = None\n attn_hard = None\n if 'atn' in self.include_modules or 'dec' in self.include_modules:\n # make sure to do the alignments before folding\n attn_mask = get_mask_from_lengths(in_lens)[..., None] == 0\n # attn_mask shld be 1 for unsd t-steps in text_enc_w_spkvec tensor\n attn_soft, attn_logprob = self.attention(\n mel, text_embeddings, out_lens, attn_mask, key_lens=in_lens, attn_prior=attn_prior\n )\n\n if binarize_attention:\n attn = self.binarize_attention(attn_soft, in_lens, out_lens)\n attn_hard = attn\n else:\n attn = attn_soft\n\n context = torch.bmm(text_enc, attn.squeeze(1).transpose(1, 2))\n\n f0_bias = 0\n # unvoiced bias forward pass\n if self.use_unvoiced_bias:\n f0_bias = self.unvoiced_bias_module(context.permute(0, 2, 1))\n f0_bias = -f0_bias[..., 0]\n f0_bias = f0_bias * (~voiced_mask.bool()).float()\n\n # mel decoder forward pass\n if 'dec' in self.include_modules:\n if self.n_group_size > 1:\n # might truncate some frames at the end, but that's ok\n # sometimes referred to as the \"squeeeze\" operation\n # invert this by calling self.fold(mel_or_z)\n mel = self.unfold(mel.unsqueeze(-1))\n z_out = []\n # where context is folded\n # mask f0 in case values are interpolated\n if self.decoder_use_unvoiced_bias:\n context_w_spkvec = self.preprocess_context(\n context, speaker_vecs, out_lens, f0 * voiced_mask + f0_bias, energy_avg\n )\n else:\n context_w_spkvec = self.preprocess_context(\n context, speaker_vecs, out_lens, f0 * voiced_mask, energy_avg\n )\n\n log_s_list, log_det_W_list, z_out = [], [], []\n unfolded_seq_lens = out_lens // self.n_group_size\n for i, flow_step in enumerate(self.flows):\n if i in self.exit_steps:\n z = mel[:, : self.n_early_size]\n z_out.append(z)\n mel = mel[:, self.n_early_size :]\n mel, log_det_W, log_s = flow_step(mel, context_w_spkvec, seq_lens=unfolded_seq_lens)\n log_s_list.append(log_s)\n log_det_W_list.append(log_det_W)\n\n z_out.append(mel)\n z_mel = torch.cat(z_out, 1)\n\n # duration predictor forward pass\n duration_model_outputs = None\n if 'dpm' in self.include_modules:\n if attn_hard is None:\n attn_hard = self.binarize_attention(attn_soft, in_lens, out_lens)\n\n # convert hard attention to durations\n attn_hard_reduced = attn_hard.sum(2)[:, 0, :]\n duration_model_outputs = self.dur_pred_layer(\n torch.detach(text_enc), torch.detach(speaker_vecs), torch.detach(attn_hard_reduced.float()), in_lens\n )\n\n # f0, energy, vpred predictors forward pass\n f0_model_outputs = None\n energy_model_outputs = None\n vpred_model_outputs = None\n if 'apm' in self.include_modules:\n if attn_hard is None:\n attn_hard = self.binarize_attention(attn_soft, in_lens, out_lens)\n\n # convert hard attention to durations\n if binarize_attention:\n text_enc_time_expanded = context.clone()\n else:\n text_enc_time_expanded = torch.bmm(text_enc, attn_hard.squeeze(1).transpose(1, 2))\n\n if self.use_vpred_module:\n # unvoiced bias requires voiced mask prediction\n vpred_model_outputs = self.v_pred_module(\n torch.detach(text_enc_time_expanded),\n torch.detach(speaker_vecs),\n torch.detach(voiced_mask),\n out_lens,\n )\n\n # affine transform context using voiced mask\n if self.ap_use_voiced_embeddings:\n text_enc_time_expanded = self.apply_voice_mask_to_text(text_enc_time_expanded, voiced_mask)\n if self.ap_use_unvoiced_bias: # whether to use the unvoiced bias in the attribute predictor\n f0_target = torch.detach(f0 * voiced_mask + f0_bias)\n else:\n f0_target = torch.detach(f0)\n # fit to log f0 in f0 predictor\n f0_target[voiced_mask.bool()] = torch.log(f0_target[voiced_mask.bool()])\n f0_target = f0_target / 6 # scale to ~ [0, 1] in log space\n energy_avg = energy_avg * 2 - 1 # scale to ~ [-1, 1]\n\n if self.use_first_order_features:\n df0 = self.get_first_order_features(f0_target, out_lens)\n denergy_avg = self.get_first_order_features(energy_avg, out_lens)\n\n f0_voiced = torch.cat((f0_target[:, None], df0[:, None]), dim=1)\n energy_avg = torch.cat((energy_avg[:, None], denergy_avg[:, None]), dim=1)\n\n f0_voiced = f0_voiced * 3 # scale to ~ 1 std\n energy_avg = energy_avg * 3 # scale to ~ 1 std\n else:\n f0_voiced = f0_target * 2 # scale to ~ 1 std\n energy_avg = energy_avg * 1.4 # scale to ~ 1 std\n f0_model_outputs = self.f0_pred_module(\n text_enc_time_expanded, torch.detach(speaker_vecs), f0_voiced, out_lens\n )\n\n energy_model_outputs = self.energy_pred_module(\n text_enc_time_expanded, torch.detach(speaker_vecs), energy_avg, out_lens\n )\n\n outputs = {\n 'z_mel': z_mel,\n 'log_det_W_list': log_det_W_list,\n 'log_s_list': log_s_list,\n 'duration_model_outputs': duration_model_outputs,\n 'f0_model_outputs': f0_model_outputs,\n 'energy_model_outputs': energy_model_outputs,\n 'vpred_model_outputs': vpred_model_outputs,\n 'attn_soft': attn_soft,\n 'attn': attn,\n 'text_embeddings': text_embeddings,\n 'attn_logprob': attn_logprob,\n }\n\n return outputs\n\n def infer(\n self,\n speaker_id,\n text,\n sigma,\n sigma_txt=0.8,\n sigma_f0=0.8,\n sigma_energy=0.8,\n token_dur_scaling=1.0,\n token_duration_max=100,\n dur=None,\n f0=None,\n energy_avg=None,\n voiced_mask=None,\n ):\n\n n_tokens = text.shape[1]\n spk_vec = self.encode_speaker(speaker_id)\n txt_enc, txt_emb = self.encode_text(text, None)\n\n if dur is None:\n # get token durations\n z_dur = torch.cuda.FloatTensor(1, 1, n_tokens)\n z_dur = z_dur.normal_() * sigma_txt\n\n dur = self.dur_pred_layer.infer(z_dur, txt_enc, spk_vec)\n if dur.shape[-1] < txt_enc.shape[-1]:\n to_pad = txt_enc.shape[-1] - dur.shape[2]\n pad_fn = nn.ReplicationPad1d((0, to_pad))\n dur = pad_fn(dur)\n dur = dur[:, 0]\n dur = dur.clamp(0, token_duration_max)\n dur = dur * token_dur_scaling if token_dur_scaling > 0 else dur\n dur = (dur + 0.5).floor().int()\n\n n_frames = dur.sum().item()\n out_lens = torch.LongTensor([n_frames]).to(txt_enc.device)\n\n # get attributes f0, energy, vpred, etc)\n txt_enc_time_expanded = self.length_regulator(txt_enc.transpose(1, 2), dur).transpose(1, 2)\n\n if voiced_mask is None:\n if self.use_vpred_module:\n # get logits\n voiced_mask = self.v_pred_module.infer(None, txt_enc_time_expanded, spk_vec)\n voiced_mask = torch.sigmoid(voiced_mask[:, 0]) > 0.5\n voiced_mask = voiced_mask.float()\n\n ap_txt_enc_time_expanded = txt_enc_time_expanded\n # voice mask augmentation only used for attribute prediction\n if self.ap_use_voiced_embeddings:\n ap_txt_enc_time_expanded = self.apply_voice_mask_to_text(txt_enc_time_expanded, voiced_mask)\n\n f0_bias = 0\n # unvoiced bias forward pass\n if self.use_unvoiced_bias:\n f0_bias = self.unvoiced_bias_module(txt_enc_time_expanded.permute(0, 2, 1))\n f0_bias = -f0_bias[..., 0]\n f0_bias = f0_bias * (~voiced_mask.bool()).float()\n\n if f0 is None:\n n_f0_feature_channels = 2 if self.use_first_order_features else 1\n z_f0 = torch.cuda.FloatTensor(1, n_f0_feature_channels, n_frames).normal_() * sigma_f0\n f0 = self.infer_f0(z_f0, ap_txt_enc_time_expanded, spk_vec, voiced_mask, out_lens)[:, 0]\n\n if energy_avg is None:\n n_energy_feature_channels = 2 if self.use_first_order_features else 1\n z_energy_avg = torch.cuda.FloatTensor(1, n_energy_feature_channels, n_frames).normal_() * sigma_energy\n energy_avg = self.infer_energy(z_energy_avg, ap_txt_enc_time_expanded, spk_vec, out_lens)[:, 0]\n\n # replication pad, because ungrouping with different group sizes\n # may lead to mismatched lengths\n if energy_avg.shape[1] < out_lens[0]:\n to_pad = out_lens[0] - energy_avg.shape[1]\n pad_fn = nn.ReplicationPad1d((0, to_pad))\n # f0 = pad_fn(f0[None])[0]\n energy_avg = pad_fn(energy_avg[None])[0]\n if f0.shape[1] < out_lens[0]:\n to_pad = out_lens[0] - f0.shape[1]\n pad_fn = nn.ReplicationPad1d((0, to_pad))\n f0 = pad_fn(f0[None])[0]\n\n if self.decoder_use_unvoiced_bias:\n context_w_spkvec = self.preprocess_context(\n txt_enc_time_expanded, spk_vec, out_lens, f0 * voiced_mask + f0_bias, energy_avg\n )\n\n else:\n context_w_spkvec = self.preprocess_context(\n txt_enc_time_expanded, spk_vec, out_lens, f0 * voiced_mask, energy_avg\n )\n\n residual = torch.cuda.FloatTensor(1, 80 * self.n_group_size, n_frames // self.n_group_size)\n residual = residual.normal_() * sigma\n\n # map from z sample to data\n exit_steps_stack = self.exit_steps.copy()\n mel = residual[:, len(exit_steps_stack) * self.n_early_size :]\n remaining_residual = residual[:, : len(exit_steps_stack) * self.n_early_size]\n unfolded_seq_lens = out_lens // self.n_group_size\n for i, flow_step in enumerate(reversed(self.flows)):\n curr_step = len(self.flows) - i - 1\n mel = flow_step(mel, context_w_spkvec, inverse=True, seq_lens=unfolded_seq_lens)\n if len(exit_steps_stack) > 0 and curr_step == exit_steps_stack[-1]:\n # concatenate the next chunk of z\n exit_steps_stack.pop()\n residual_to_add = remaining_residual[:, len(exit_steps_stack) * self.n_early_size :]\n remaining_residual = remaining_residual[:, : len(exit_steps_stack) * self.n_early_size]\n mel = torch.cat((residual_to_add, mel), 1)\n\n if self.n_group_size > 1:\n mel = self.fold(mel)\n\n return {'mel': mel, 'dur': dur, 'f0': f0, 'energy_avg': energy_avg}\n\n def infer_f0(self, residual, txt_enc_time_expanded, spk_vec, voiced_mask=None, lens=None):\n print(\"txt_enc_time_expanded\", txt_enc_time_expanded.size())\n print(\"spk_vec\", spk_vec.size())\n f0 = self.f0_pred_module.infer(residual, txt_enc_time_expanded, spk_vec, lens)\n\n if voiced_mask is not None and len(voiced_mask.shape) == 2:\n voiced_mask = voiced_mask[:, None]\n # constants\n if self.ap_pred_log_f0:\n if self.use_first_order_features:\n f0 = f0[:, 0:1, :] / 3\n else:\n f0 = f0 / 2\n f0 = f0 * 6\n else:\n f0 = f0 / 6\n f0 = f0 / 640\n\n if voiced_mask is None:\n voiced_mask = f0 > 0.0\n else:\n voiced_mask = voiced_mask.bool()\n # due to grouping, f0 might be 1 frame short\n voiced_mask = voiced_mask[:, :, : f0.shape[-1]]\n if self.ap_pred_log_f0:\n # if variable is set, decoder sees linear f0\n # mask = f0 > 0.0 if voiced_mask is None else voiced_mask.bool()\n f0[voiced_mask] = torch.exp(f0[voiced_mask])\n f0[~voiced_mask] = 0.0\n return f0\n\n def infer_energy(self, residual, txt_enc_time_expanded, spk_vec, lens):\n energy = self.energy_pred_module.infer(residual, txt_enc_time_expanded, spk_vec, lens)\n\n # magic constants\n if self.use_first_order_features:\n energy = energy / 3\n else:\n energy = energy / 1.4\n energy = (energy + 1) / 2\n return energy\n\n def remove_norms(self):\n \"\"\"Removes spectral and weightnorms from model. Call before inference\n \"\"\"\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n" ]
[ [ "torch.cat", "torch.nn.Embedding", "torch.tanh", "torch.detach", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.nn.utils.remove_weight_norm", "torch.sigmoid", "torch.LongTensor", "torch.nn.ReplicationPad1d", "torch.nn.ModuleList", "torch.zeros_like", "torch.cuda.FloatTensor", "torch.exp", "torch.nn.utils.remove_spectral_norm", "torch.nn.functional.fold", "torch.nn.LSTM", "torch.nn.Unfold", "torch.nn.ReLU" ] ]
gergely-flamich/relative-entropy-coding
[ "c99d90cabec4395de2d01d889bd2b7ed7b7453d7", "c99d90cabec4395de2d01d889bd2b7ed7b7453d7" ]
[ "rec/models/resnet_vae.py", "rec/io/tests/coding_test.py" ]
[ "from typing import Tuple\n\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom rec.models.custom_modules import ReparameterizedConv2D, ReparameterizedConv2DTranspose, AutoRegressiveMultiConv2D, \\\n SignalConv2D\nfrom rec.coding import GaussianCoder, BeamSearchCoder\nfrom rec.coding.samplers import RejectionSampler, ImportanceSampler\n\ntfl = tf.keras.layers\ntfk = tf.keras\ntfd = tfp.distributions\n\n\nclass ModelError(Exception):\n pass\n\n\nclass BidirectionalResidualBlock(tfl.Layer):\n \"\"\"\n Implements a bidirectional Resnet Block\n \"\"\"\n\n AVAILABLE_DISTRIBUTIONS = {\n \"gaussian\": tfd.Normal,\n \"cauchy\": tfd.Cauchy,\n }\n\n def __init__(self,\n stochastic_filters: int,\n deterministic_filters: int,\n sampler: str,\n sampler_args: dict = {},\n coder_args: dict = {},\n distribution: str = \"gaussian\",\n kernel_size: Tuple[int, int] = (3, 3),\n use_iaf: bool = False,\n is_last: bool = False,\n kl_per_partition=8.,\n use_sig_convs=False,\n name: str = \"bidirectional_resnet_block\",\n **kwargs):\n super().__init__(name=name,\n **kwargs)\n\n if distribution not in self.AVAILABLE_DISTRIBUTIONS:\n raise ValueError(f\"Distribution must be one of {self.AVAILABLE_DISTRIBUTIONS}, \"\n f\"but {distribution} was given!\")\n\n self.distribution_type = distribution\n self.distribution = self.AVAILABLE_DISTRIBUTIONS[distribution]\n\n # Number of filters for the stochastic layers\n self.stochastic_filters = stochastic_filters\n\n # Number of filters for the deterministic residual features\n self.deterministic_filters = deterministic_filters\n self.kernel_size = kernel_size\n\n # If the resnet block is the last one in the VAE, we won't use the final bit of the residual block.\n self.is_last = is_last\n\n # Use inverse autoregressive flows as the posterior?\n self.use_iaf = use_iaf\n\n self.use_sig_convs = use_sig_convs\n\n # ---------------------------------------------------------------------\n # Declare layers\n # ---------------------------------------------------------------------\n # Infernce block parts\n self.infer_conv1 = None\n self.infer_conv2 = None\n\n self.infer_posterior_loc_head = None\n self.infer_posterior_log_scale_head = None\n\n # Generative block parts\n self.gen_conv1 = None\n self.gen_conv2 = None\n\n self.prior_loc_head = None\n self.prior_log_scale_head = None\n\n self.gen_posterior_loc_head = None\n self.gen_posterior_log_scale_head = None\n\n # ---------------------------------------------------------------------\n # Declare Bidirectional inference components\n # ---------------------------------------------------------------------\n self.infer_posterior_loc = 0.\n self.infer_posterior_log_scale = 0.\n\n self.gen_posterior_loc = 0.\n self.gen_posterior_log_scale = 0.\n\n self.prior_loc = 0.\n self.prior_scale = 1.\n\n # ---------------------------------------------------------------------\n # Distributions associated with the current residual block\n # ---------------------------------------------------------------------\n self.posterior = None\n self.prior = None\n\n self.infer_iaf_autoregressive_context_conv = None\n self.gen_iaf_autoregressive_context_conv = None\n self.iaf_posterior_multiconv = None\n\n self.infer_iaf_autoregressive_context = None\n self.gen_iaf_autoregressive_context = None\n\n self.empirical_kld = 0.\n\n # ---------------------------------------------------------------------\n # Stuff for compression\n # ---------------------------------------------------------------------\n if sampler == \"rejection\":\n self.coder = GaussianCoder(sampler=RejectionSampler(**sampler_args),\n kl_per_partition=kl_per_partition,\n name=f\"encoder_for_{self.name}\",\n **coder_args)\n elif sampler == \"importance\":\n # Setting alpha=inf will select the sample with\n # the best importance weights\n self.coder = GaussianCoder(sampler=ImportanceSampler(**sampler_args),\n kl_per_partition=kl_per_partition,\n name=f\"encoder_for_{self.name}\",\n **coder_args)\n elif sampler == \"beam_search\":\n self.coder = BeamSearchCoder(kl_per_partition=kl_per_partition,\n n_beams=sampler_args['n_beams'],\n extra_samples=sampler_args['extra_samples'],\n name=f\"encoder_for_{self.name}\",\n **coder_args)\n else:\n raise ModelError(\"Sampler must be one of ['rejection', 'importance', 'beam_search'],\"\n f\"but got {sampler}!\")\n\n # ---------------------------------------------------------------------\n # Initialization flag\n # ---------------------------------------------------------------------\n self._initialized = tf.Variable(False, name=\"resnet_block_initialized\", trainable=False)\n\n @property\n def posterior_loc(self):\n return self.infer_posterior_loc + self.gen_posterior_loc\n\n @property\n def posterior_scale(self):\n return tf.exp(self.infer_posterior_log_scale + self.gen_posterior_log_scale)\n\n @property\n def iaf_autoregressive_context(self):\n if not self.use_iaf:\n raise ModelError(\"IAF contexts only exist when model is in IAF mode!\")\n\n return self.infer_iaf_autoregressive_context + self.gen_iaf_autoregressive_context\n\n def kl_divergence(self, empirical=False, minimum_kl=0.):\n\n if self.use_iaf and not empirical:\n raise ModelError(\"KL divergence cannot be computed analytically when\"\n \"using IAFs as posterior!\")\n\n if empirical:\n kld = self.empirical_kld\n else:\n if self.distribution_type == \"gaussian\":\n kld = tfd.kl_divergence(self.posterior, self.prior)\n elif self.distribution_type == \"cauchy\":\n kld = (tf.math.log(tf.math.square(self.prior.scale + self.posterior.scale) +\n tf.math.squared_difference(self.prior.loc, self.posterior.loc))\n - tf.math.log(4. * self.prior.scale) - tf.math.log(self.posterior.scale))\n\n else:\n raise NotImplementedError\n\n # The parameters are shared per channel, so we first calculate the average\n # across the batch, width and height axes, then apply the minimum KL constraint,\n # and finally sum across the filters\n kld = tf.reduce_mean(tf.reduce_sum(kld, axis=[1, 2]), axis=[0])\n\n kld = tf.maximum(kld, minimum_kl)\n\n kld = tf.reduce_sum(kld)\n\n return kld\n\n def build(self, input_shape):\n # ---------------------------------------------------------------------\n # Stuff for the inference side\n # ---------------------------------------------------------------------\n\n if not self.is_last:\n self.infer_conv1 = (SignalConv2D(filters=self.deterministic_filters,\n kernel=self.kernel_size,\n strides_down=1,\n corr=True,\n padding=\"reflect\",\n use_bias=True,\n name=\"infer_conv_0\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"infer_conv_0\"))\n\n self.infer_conv2 = (SignalConv2D(filters=self.deterministic_filters,\n kernel=self.kernel_size,\n strides_down=1,\n corr=True,\n padding=\"reflect\",\n use_bias=True,\n name=\"infer_conv_1\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"infer_conv_1\"))\n\n self.infer_posterior_loc_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_down=1,\n corr=True,\n padding=\"reflect\",\n use_bias=not self.is_last,\n name=\"infer_posterior_loc_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"infer_posterior_loc_head\"))\n\n self.infer_posterior_log_scale_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_down=1,\n corr=True,\n padding=\"reflect\",\n use_bias=not self.is_last,\n name=\"infer_posterior_log_scale_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"infer_posterior_log_scale_head\"))\n\n # ---------------------------------------------------------------------\n # Stuff for the generative side\n # Note: In the general case, these should technically be deconvolutions, but\n # in the original implementation the dimensions within a single block do not\n # decrease, hence there is not much point in using the more expensive operation\n # ---------------------------------------------------------------------\n self.gen_conv1 = (SignalConv2D(filters=self.deterministic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"gen_conv_0\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"gen_conv_0\"))\n\n self.gen_conv2 = (SignalConv2D(filters=self.deterministic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"gen_conv_1\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"gen_conv_1\"))\n\n self.prior_loc_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"prior_loc_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"prior_loc_head\"))\n\n self.prior_log_scale_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"prior_log_scale_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"prior_log_scale_head\"))\n\n self.gen_posterior_loc_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"gen_posterior_loc_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"gen_posterior_loc_head\"))\n\n self.gen_posterior_log_scale_head = (SignalConv2D(filters=self.stochastic_filters,\n kernel=self.kernel_size,\n strides_up=1,\n corr=False,\n padding=\"reflect\",\n use_bias=True,\n name=\"gen_posterior_log_scale_head\")\n if self.use_sig_convs else\n ReparameterizedConv2D(filters=self.stochastic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\",\n name=\"gen_posterior_log_scale_head\"))\n\n # ---------------------------------------------------------------------\n # If we use IAF posteriors, we need some additional layers\n # ---------------------------------------------------------------------\n if self.use_iaf:\n self.infer_iaf_autoregressive_context_conv = ReparameterizedConv2D(\n filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\"\n )\n\n self.gen_iaf_autoregressive_context_conv = ReparameterizedConv2D(\n filters=self.deterministic_filters,\n kernel_size=self.kernel_size,\n strides=(1, 1),\n padding=\"same\"\n )\n\n self.iaf_posterior_multiconv = AutoRegressiveMultiConv2D(\n convolution_filters=[self.deterministic_filters,\n self.deterministic_filters],\n head_filters=[self.stochastic_filters,\n self.stochastic_filters]\n )\n\n super().build(input_shape=input_shape)\n\n def call(self, tensor, inference_pass=True, encoder_args=None, decoder_args=None, eps=1e-7):\n \"\"\"\n\n :param tensor: data to be passed through the residual block\n :param inference_pass:\n :return:\n \"\"\"\n\n input = tensor\n\n # First layer in block\n tensor = tf.nn.elu(tensor)\n # ---------------------------------------------------------------------\n # Inference pass\n # ---------------------------------------------------------------------\n if inference_pass:\n\n # Calculate first part of posterior statistics\n self.infer_posterior_loc = self.infer_posterior_loc_head(tensor)\n self.infer_posterior_log_scale = self.infer_posterior_log_scale_head(tensor)\n\n if self.use_iaf:\n self.infer_iaf_autoregressive_context = self.infer_iaf_autoregressive_context_conv(tensor)\n\n # Calculate next set of deterministic features\n if not self.is_last:\n tensor = self.infer_conv1(tensor)\n tensor = tf.nn.elu(tensor)\n\n tensor = self.infer_conv2(tensor)\n\n # ---------------------------------------------------------------------\n # Generative pass\n # ---------------------------------------------------------------------\n else:\n\n # Calculate prior parameters\n self.prior_loc = self.prior_loc_head(tensor)\n self.prior_scale = tf.exp(self.prior_log_scale_head(tensor))\n\n self.prior = self.distribution(loc=self.prior_loc,\n scale=self.prior_scale)\n # -----------------------------------------------------------------\n # Training\n # -----------------------------------------------------------------\n\n # If no latent code is provided, we need to create it\n if encoder_args is None and decoder_args is None:\n # Calculate second part of posterior statistics\n self.gen_posterior_loc = self.gen_posterior_loc_head(tensor)\n self.gen_posterior_log_scale = self.gen_posterior_log_scale_head(tensor)\n\n # Sample from posterior. The loc and scale are automagically calculated using property methods\n self.posterior = tfd.Normal(loc=self.posterior_loc,\n scale=self.posterior_scale)\n\n if self._initialized:\n latent_code = self.posterior.sample()\n else:\n latent_code = self.prior.sample()\n self._initialized.assign(True)\n\n post_log_prob = self.posterior.log_prob(latent_code)\n\n if self.use_iaf:\n self.gen_iaf_autoregressive_context = self.gen_iaf_autoregressive_context_conv(tensor)\n\n context = self.iaf_autoregressive_context\n\n iaf_mean, iaf_log_scale = self.iaf_posterior_multiconv(latent_code,\n context=context)\n\n iaf_mean = 0.1 * iaf_mean\n iaf_log_scale = 0.1 * iaf_log_scale\n\n # Update latent code\n latent_code = (latent_code - iaf_mean) / tf.exp(iaf_log_scale)\n\n # Update posterior log probability with IAF's Jacobian logdet\n post_log_prob = post_log_prob + iaf_log_scale\n\n # Note: prior log probability needs to be calculated once we passed the latent\n # code through the IAF, since we care about the transformed sample!\n prior_log_prob = self.prior.log_prob(latent_code)\n\n self.empirical_kld = post_log_prob - prior_log_prob\n\n # -----------------------------------------------------------------\n # Compression\n # -----------------------------------------------------------------\n if encoder_args is not None:\n # Calculate second part of posterior statistics\n self.gen_posterior_loc = self.gen_posterior_loc_head(tensor)\n self.gen_posterior_log_scale = self.gen_posterior_log_scale_head(tensor)\n\n # The loc and scale are automagically calculated using property methods\n self.posterior = tfd.Normal(loc=self.posterior_loc,\n scale=self.posterior_scale)\n indices, latent_code = self.coder.encode(self.posterior, self.prior, **encoder_args)\n\n # -----------------------------------------------------------------\n # Decompression\n # -----------------------------------------------------------------\n if decoder_args is not None:\n latent_code = self.coder.decode(self.prior, **decoder_args)\n\n # Calculate next set of deterministic features for residual block\n tensor = self.gen_conv1(tensor)\n\n # Concatenate code and generative features. The channels are always the last axis\n tensor = tf.concat([tensor, latent_code], axis=-1)\n\n tensor = tf.nn.elu(tensor)\n\n tensor = self.gen_conv2(tensor)\n\n # Add residual connection. Scaling factor taken from\n # https://github.com/hilloc-submission/hilloc/blob/b89e9c983e3764798e7c6f81f5cfc1d11b349d96/experiments/rvae/model/__init__.py#L116\n tensor = input + 0.1 * tensor\n\n if encoder_args is not None:\n return indices, tensor\n\n return tensor\n\n def update_coders(self):\n self.coder.update_auxiliary_variance_ratios(target_dist=self.posterior,\n coding_dist=self.prior)\n\n def posterior_log_prob(self, tensor):\n if self.use_iaf:\n raise NotImplementedError\n\n else:\n return tf.reduce_sum(self.posterior.log_prob(tensor))\n\n def prior_log_prob(self, tensor):\n return tf.reduce_sum(self.prior.log_prob(tensor))\n\n\nclass BidirectionalResNetVAE(tfk.Model):\n \"\"\"\n Implements the bidirectional ResNetVAE as described in:\n D. P. Kingma, T. Salimans, R. Jozefowicz, X. Chen, I. Sutskever, and M. Welling.\n Improved variational inference with inverse autoregressive flow.\n In Advances in Neural Information ProcessingSystems (NIPS), 2016.\n \"\"\"\n\n AVAILABLE_LIKELIHOODS = [\n \"discretized_logistic\",\n \"gaussian\",\n \"laplace\",\n \"ms-ssim\"\n ]\n\n def __init__(self,\n num_res_blocks,\n sampler,\n sampler_args={},\n coder_args={},\n likelihood_function=\"discretized_logistic\",\n learn_likelihood_scale=True,\n first_kernel_size=(5, 5),\n first_strides=(2, 2),\n kernel_size=(3, 3),\n strides=(1, 1),\n deterministic_filters=160,\n stochastic_filters=32,\n use_iaf=False,\n kl_per_partition=8.,\n latent_size=\"variable\",\n ema_decay=0.999,\n name=\"resnet_vae\",\n **kwargs):\n super().__init__(name=name,\n **kwargs)\n\n # ---------------------------------------------------------------------\n # Assign hyperparamteres\n # ---------------------------------------------------------------------\n self.sampler_name = str(sampler)\n\n self.num_res_blocks = num_res_blocks\n\n self.learn_likelihood_scale = learn_likelihood_scale\n\n if likelihood_function not in self.AVAILABLE_LIKELIHOODS:\n raise ModelError(f\"Likelihood function must be one of: {self.AVAILABLE_LIKELIHOODS}! \"\n f\"({likelihood_function} was given).\")\n\n self._likelihood_function = likelihood_function\n\n self.first_kernel_size = first_kernel_size\n self.first_strides = first_strides\n\n self.kernel_size = kernel_size\n self.strides = strides\n self.stochastic_filters = stochastic_filters\n self.deterministic_filters = deterministic_filters\n\n self.use_iaf = use_iaf\n\n self.kl_per_partition = kl_per_partition\n # Decay for exponential moving average update to variables\n self.ema_decay = tf.cast(ema_decay, tf.float32)\n\n # ---------------------------------------------------------------------\n # Create parameters\n # ---------------------------------------------------------------------\n self.likelihood_log_scale = tf.Variable(0.,\n name=\"likelihood_log_scale\",\n trainable=self.learn_likelihood_scale)\n\n # ---------------------------------------------------------------------\n # Create ResNet Layers\n # ---------------------------------------------------------------------\n self.first_infer_conv = ReparameterizedConv2D(kernel_size=self.first_kernel_size,\n strides=self.first_strides,\n filters=self.deterministic_filters,\n padding=\"same\")\n\n self.last_gen_conv = ReparameterizedConv2DTranspose(kernel_size=self.first_kernel_size,\n strides=self.first_strides,\n filters=3,\n padding=\"same\")\n\n # We create these in topological order.\n # This means that residual_blocks[0] will have the bottom-most stochastic layer\n # And residual_blocks[-1] will have the top-most one, the output of which should be passed to last_gen_conv\n self.residual_blocks = [BidirectionalResidualBlock(stochastic_filters=self.stochastic_filters,\n deterministic_filters=self.deterministic_filters,\n sampler=self.sampler_name,\n sampler_args=sampler_args,\n coder_args=coder_args,\n kernel_size=self.kernel_size,\n is_last=res_block_idx == 0, # Declare last residual block\n use_iaf=self.use_iaf,\n kl_per_partition=self.kl_per_partition,\n name=f\"resnet_block_{res_block_idx}\")\n for res_block_idx in range(self.num_res_blocks)]\n\n # Likelihood distribution\n self.likelihood_dist = None\n\n # Likelihood of the most recent sample\n self.log_likelihood = -np.inf\n\n # this variable will allow us to perform Empirical Bayes on the first prior\n # Referred to as \"h_top\" in both the Kingma and Townsend implementations\n self._generative_base = tf.Variable(tf.zeros(self.deterministic_filters),\n name=\"generative_base\")\n\n # ---------------------------------------------------------------------\n # EMA shadow variables\n # ---------------------------------------------------------------------\n self._ema_shadow_variables = {}\n\n def generative_base(self, batch_size, height, width):\n\n base = tf.reshape(self._generative_base, [1, 1, 1, self.deterministic_filters])\n\n return tf.tile(base, [batch_size, height // 2, width // 2, 1])\n\n @property\n def likelihood_function(self):\n\n likelihood_scale = tf.math.exp(self.likelihood_log_scale)\n\n def discretized_logistic(reference, reconstruction, binsize=1. / 256.):\n\n # Discretize the output\n discretized_input = tf.math.floor(reference / binsize) * binsize\n discretized_input = (discretized_input - reconstruction) / likelihood_scale\n\n log_likelihood = tf.nn.sigmoid(discretized_input + binsize / likelihood_scale)\n log_likelihood = log_likelihood - tf.nn.sigmoid(discretized_input)\n\n log_likelihood = tf.math.log(log_likelihood + 1e-7)\n return tf.reduce_sum(log_likelihood, [1, 2, 3])\n\n def gaussian_log_prob(reference, reconstruction):\n likelihood = tfd.Normal(loc=reconstruction, scale=likelihood_scale)\n return tf.reduce_sum(likelihood.log_prob(reference), [1, 2, 3])\n\n def laplace_log_prob(reference, reconstruction):\n likelihood = tfd.Laplace(loc=reconstruction, scale=likelihood_scale)\n\n return tf.reduce_sum(likelihood.log_prob(reference), [1, 2, 3])\n\n # TODO\n def discretized_laplace_log_prob(reference, reconstruction, binsize=1. / 256.):\n\n # Discretize the output\n discretized_input = tf.math.floor(reference / binsize) * binsize\n\n def ms_ssim_pseudo_log_prob(reference, reconstruction):\n return 1. / likelihood_scale * tf.image.ssim_multiscale(reference / likelihood_scale,\n reconstruction / likelihood_scale,\n max_val=1.0)\n\n if self._likelihood_function == \"discretized_logistic\":\n return discretized_logistic\n\n elif self._likelihood_function == \"gaussian\":\n return gaussian_log_prob\n\n elif self._likelihood_function == \"laplace\":\n return laplace_log_prob\n\n elif self._likelihood_function == \"ms-ssim\":\n return ms_ssim_pseudo_log_prob\n\n else:\n raise NotImplementedError\n\n def call(self, tensor, binsize=1 / 256.0):\n input = tensor\n batch_size, height, width, _ = input.shape\n\n # ---------------------------------------------------------------------\n # Perform Inference Pass\n # ---------------------------------------------------------------------\n tensor = self.first_infer_conv(tensor)\n\n # We go through the residual blocks in reverse topological order for the inference pass\n for res_block in self.residual_blocks[::-1]:\n tensor = res_block(tensor, inference_pass=True)\n\n # ---------------------------------------------------------------------\n # Perform Generative Pass\n # ---------------------------------------------------------------------\n tensor = self.generative_base(batch_size, height, width)\n\n # We go through the residual blocks in topological order for the generative pass\n for res_block in self.residual_blocks:\n tensor = res_block(tensor, inference_pass=False)\n\n reconstruction = tf.nn.elu(tensor)\n reconstruction = self.last_gen_conv(reconstruction)\n reconstruction = tf.clip_by_value(reconstruction, -0.5 + 1. / 512., 0.5 - 1. / 512.)\n\n # Gaussian Likelihood\n # self.likelihood_dist = tfd.Normal(loc=tensor,\n # scale=1.)\n #\n # self.log_likelihood = self.likelihood_dist.log_prob(original_tensor)\n\n # Discretized Logistic Likelihood\n log_likelihood = self.likelihood_function(input, reconstruction)\n self.log_likelihood = tf.reduce_mean(log_likelihood)\n\n # If it's the initialization round, create our EMA shadow variables\n if not self.is_ema_variables_initialized:\n self.create_ema_variables()\n\n return reconstruction + 0.5\n\n def kl_divergence(self, empirical=False, minimum_kl=0., reduce=True):\n\n if self.use_iaf and not empirical:\n raise ModelError(\"KL divergence cannot be computed analytically when\"\n \"using IAFs as posterior!\")\n\n kls = [res_block.kl_divergence(empirical=empirical, minimum_kl=minimum_kl)\n for res_block in self.residual_blocks]\n\n if reduce:\n return tf.reduce_sum(kls)\n else:\n return kls\n\n @property\n def is_ema_variables_initialized(self):\n return len(self._ema_shadow_variables) > 0\n\n def create_ema_variables(self):\n \"\"\"\n Creates a shadow copy of every trainable variable. These shadow variables are updated at every training\n iteration using an exponential moving average rule. The EMA variables can then be swapped in for the\n real values at evaluation time, as they supposedly give better performance.\n :return:\n \"\"\"\n\n # If the EMA variables have been created already, just skip\n if self.is_ema_variables_initialized:\n return\n\n self._ema_shadow_variables = {v.name: tf.Variable(v,\n name=f\"{v.name}/exponential_moving_average\",\n trainable=False)\n for v in self.trainable_variables}\n\n def update_ema_variables(self):\n \"\"\"\n Update the EMA variables with the latest value of all the current trainable variables.\n\n This implementation is based on tf.compat.v1.train.ExponentialMovingAverage:\n https://github.com/tensorflow/tensorflow/blob/e5bf8de410005de06a7ff5393fafdf832ef1d4ad/tensorflow/python/training/moving_averages.py#L35\n :return:\n \"\"\"\n if not self.is_ema_variables_initialized:\n raise ModelError(\"EMA variables haven't been created yet, since the model has not been initialized yet!\")\n\n for v in self.trainable_variables:\n ema_var = self._ema_shadow_variables[v.name]\n ema_var.assign_sub((1.0 - self.ema_decay) * (ema_var - v))\n\n def swap_in_ema_variables(self):\n \"\"\"\n Swap in the EMA shadow variables in place of the real ones for evaluation.\n NOTE: Once the EMA variables have been swapped in, there is no way of swapping back!\n :return:\n \"\"\"\n if not self.is_ema_variables_initialized:\n raise ModelError(\"EMA variables haven't been created yet, since the model has not been initialized yet!\")\n\n for v in self.trainable_variables:\n v.assign(self._ema_shadow_variables[v.name])\n\n # =========================================================================\n # Compression\n # =========================================================================\n\n def update_coders(self, images):\n # To initialize the coders, we first perform a forward pass with the supplied images.\n # This will set the posteriors and priors in the residual blocks\n self.call(images)\n\n for res_block in self.residual_blocks:\n res_block.update_coders()\n\n def compress(self, image, seed, update_sampler=False):\n batch_size, height, width, _ = image.shape\n tensor = image\n\n tensor = self.first_infer_conv(tensor)\n\n # We first calculate the inference statistics of the image.\n # Note that the ResNet blocks are ordered according to the order of a generative pass,\n # so we iterate the list in reverse\n for resnet_block in self.residual_blocks[::-1]:\n tensor = resnet_block(tensor, inference_pass=True, )\n\n # Once the inference pass is complete, we code each of the blocks sequentially\n tensor = self.generative_base(batch_size=batch_size,\n width=width,\n height=height)\n\n block_indices = []\n for resnet_block in self.residual_blocks:\n indices, tensor = resnet_block(tensor,\n inference_pass=False,\n encoder_args={\"seed\": seed, \"update_sampler\": update_sampler})\n\n block_indices.append(indices)\n\n reconstruction = tf.nn.elu(tensor)\n reconstruction = self.last_gen_conv(reconstruction)\n reconstruction = tf.clip_by_value(reconstruction, -0.5 + 1. / 512., 0.5 - 1. / 512.)\n\n # Discretized Logistic Likelihood\n log_likelihood = self.likelihood_function(image, reconstruction)\n self.log_likelihood = tf.reduce_mean(log_likelihood)\n\n return block_indices, reconstruction\n\n def get_codelength(self, compressed_codes):\n codelength = 0.\n for resnet_block, compressed_code in zip(self.residual_blocks, compressed_codes):\n codelength += resnet_block.coder.get_codelength(compressed_code)\n return codelength\n\n def decompress(self, compressed_codes, seed, lossless=True):\n\n # TODO\n batch_size, height, width, _ = 1, 16, 16, None\n\n tensor = self.generative_base()\n\n # We sequentially decode through the resnet blocks\n for resnet_block, compressed_code in zip(self.residual_blocks, compressed_codes):\n tensor = resnet_block(tensor, inference_pass=False, decoder_args={\"seed\": seed,\n \"indices\": compressed_code})\n\n reconstruction = tf.nn.elu(tensor)\n reconstruction = self.last_gen_conv(reconstruction)\n reconstruction = tf.clip_by_value(reconstruction, -0.5 + 1. / 512., 0.5 - 1. / 512.)\n\n return reconstruction + 0.5\n", "import time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom rec.io.entropy_coding import ArithmeticCoder\n\n\nnum_symbols = 2**6\nmessage_length = 2000\ntest_file_path = \"scratch_compression_test.miracle\"\n\nP = np.ones(num_symbols + 1, dtype=np.int32)\nP[1:] = np.random.choice(100, size=num_symbols) + 1\n\nmessage = np.zeros(message_length, dtype=np.int32)\n\nmessage[:-1] = np.random.choice(num_symbols, size=message_length - 1) + 1\n\nac = ArithmeticCoder(P, precision=32)\n\nstart = time.time()\n\nprint(\"Coding..\")\ncode = ac.encode(message)\nprint(\"Coded in {:.4f}s\".format(time.time() - start))\n\nlog_P = np.log(P)\nlog_P_normed = log_P - tf.reduce_logsumexp(tf.cast(log_P, tf.float64))\nlog_P_normed = log_P_normed / np.log(2)\n\nexpected_message_length = 0\nfor m in message:\n expected_message_length -= log_P_normed[m]\n\nprint(f\"Expected message length: {expected_message_length:.2f} bits!\")\nprint(f\"Actual message length: {len(''.join(code))} bits!\")\n\nstart = time.time()\n\nprint(\"Fast Decoding...\")\ndecompressed = ac.decode_fast(code)\nprint(\"Decoded in {:.4f}s\".format(time.time() - start))\n\n\nprint(np.all(decompressed == message))\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.Variable", "tensorflow.tile", "tensorflow.nn.elu", "tensorflow.nn.sigmoid", "tensorflow.exp", "tensorflow.math.exp", "tensorflow.math.square", "tensorflow.math.floor", "tensorflow.clip_by_value", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.math.squared_difference", "tensorflow.math.log", "tensorflow.image.ssim_multiscale" ], [ "numpy.log", "numpy.random.choice", "tensorflow.cast", "numpy.ones", "numpy.all", "numpy.zeros" ] ]
vpaliy/CIFAR-10-Image-Classification
[ "f05d31b8fcbd20c1ec7baf2a1b4f35b51b41a376" ]
[ "main.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\n\nimport pickle as pickle\nimport numpy as np\nimport os\n\nfrom softmax import SGDSoftmaxClassifier, SoftmaxClassifier\n\n\ndef load_data(train_paths, test_path):\n X_train, y_train = get_batch(train_paths[0])\n for i, path in enumerate(train_paths[1:]):\n data, labels = get_batch(path)\n X_train = np.concatenate([X_train, data], axis=0)\n y_train = np.concatenate([y_train, labels], axis=0)\n # load the test \n X_test, y_test = get_batch(test_path)\n y_test = np.array(y_test)\n y_train = np.array(y_train)\n return X_train, y_train, X_test, y_test\n\n\ndef get_batch(path):\n with open(path, 'rb') as fo:\n data = pickle.load(fo, encoding='bytes')\n return data[b'data'], data[b'labels']\n\n\ndef normalize(X_train, X_test):\n mean = np.mean(X_train, axis=0)\n X_train = X_train - mean\n X_test = X_test - mean\n X_train = np.divide(X_train, 255.)\n X_test = np.divide(X_test, 255.)\n return X_train, X_test\n\n\nif __name__ == '__main__':\n path = os.getcwd() + '/cifar-10-batches-py/'\n TRAIN_FILENAMES = [os.path.join(path, 'data_batch_' + str(i)) for i in range(1, 6)]\n TEST_FILENAME = os.path.join(path, 'test_batch') \n\n X_train, y_train, X_test, y_test = load_data(TRAIN_FILENAMES, TEST_FILENAME)\n X_train, X_test = normalize(X_train, X_test)\n\n model = SGDSoftmaxClassifier()\n errors = model.fit(X_train, y_train, 5)\n \n y_pred = model.predict(X_test)\n\n\n print('Accuracy score:', accuracy_score(y_pred, y_test))\n\n plt.plot(range(len(errors)), errors, 'b-')\n plt.xlabel('Iteration')\n plt.ylabel('Error')\n plt.show()\n\n weights = model.W[:,:-1].T\n weights = weights.reshape(10, 32, 32, 3)\n\n w_min, w_max = np.min(weights), np.max(weights)\n\n classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n for i in range(10):\n plt.subplot(2, 5, i + 1)\n\n # Rescale the weights to be between 0 and 255 for image representation\n w_img = 255.0 * (weights[i].squeeze() - w_min) / (w_max - w_min)\n plt.imshow(w_img.astype('uint8'))\n plt.axis('off')\n plt.title(classes[i])\n plt.show()\n\n " ]
[ [ "matplotlib.pyplot.title", "numpy.min", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.show", "numpy.concatenate", "numpy.max", "matplotlib.pyplot.subplot", "numpy.mean", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.divide", "matplotlib.pyplot.ylabel" ] ]
LzVv123456/Deep-Reinforced-Tree-Traversal
[ "8e117590c8cd51c9fc9c033232658876160fa638", "8e117590c8cd51c9fc9c033232658876160fa638" ]
[ "tracer/validation.py", "tracer/model.py" ]
[ "import torch\nimport copy\nimport time\nimport collections\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage.feature import peak_local_max\nfrom utilities import *\nfrom agent_env import *\n\n\nclass Evaluate_Agent():\n\n def __init__(self, cfgs, val_loader, target_net, classifier, device):\n\n self.val_loader = val_loader\n self.target_net = target_net\n self.classifier = classifier\n self.device = device\n\n self.region_mode = cfgs.region_mode\n self.start_idx = cfgs.start_idx\n self.input_size = cfgs.input_size\n self.delay_update = cfgs.delay_update\n self.output_channel = cfgs.output_channel\n self.step_size = cfgs.step_size\n self.match_dist = cfgs.match_dist\n self.plot = cfgs.plot\n self.scatter = cfgs.scatter\n self.dynamic = cfgs.dynamic_plot\n self.precise_metric = cfgs.precise_metric\n self.plot_mode = cfgs.plot_mode\n\n self.dataset_name = None\n self.bifurcation_draw = []\n self.momentum_len = 1\n self.q_history = collections.deque(maxlen = self.momentum_len)\n \n self.step_mm = self.delay_update*self.step_size*0.5 # distance for each step in mm\n self.regress_bif_value = collections.deque(maxlen = int(10/self.step_mm))\n self.regress_bif_coord = collections.deque(maxlen = int(10/self.step_mm))\n self.stop_count = int(6/self.step_mm) # average entropy with in 6mm \n self.stop_cache = collections.deque(maxlen = self.stop_count)\n self.stop_threshold = cfgs.stop_threshold # threshold for stop\n self.bif_threshold = cfgs.bif_threshold # threshold for bifurcation detection\n self.bif_maximum = 200 # maixmum sampled bifurcation num\n self.bif_count = 0 # current bifurcation num\n self.max_step = int(300/self.step_mm) # maximum step for single branch\n self.ref_orientation = get_ref_orientation(self.output_channel, self.step_size)\n self.stop_dist = 6\n\n\n def evaluate(self):\n\n print('Start Validation')\n all_point_val_result = []\n all_point_rate_result = []\n all_time_list = []\n\n for _, sample in enumerate(self.val_loader):\n \n tpr, tpm, fn, fp, ai_gt, ai_infer, tpr_of, tpm_of, fn_of, fp_of = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n instance_parameter_tuple = (tpr, tpm, fn, fp, ai_gt, ai_infer, tpr_of, tpm_of, fn_of, fp_of)\n\n env, tree, start_pts, name = tensor_to_numpy(sample)\n print('dataset name:', name)\n ref_env = copy.deepcopy(env)\n\n # prepare all regions need to be validated\n val_list = prepare_training_area(start_pts)\n\n # instance level plot\n trajectory_draw = []\n start_location_draw = []\n self.bifurcation_draw = []\n\n instance_time = 0\n for item in val_list:\n\n print('validation information', item)\n\n s_time = time.time() # get start time\n\n start_num, region = item\n\n tree_gt, tree_gt_segment = get_region_tree(start_num, tree)\n\n end_list, bifurcation_list, jump_reference, start_location = self.prepare_bifurcation_end(tree_gt_segment, tree, start_num)\n\n trajectory = self.trace_vessel(env, ref_env, end_list, tree_gt, bifurcation_list, jump_reference, start_location)\n\n e_time = time.time() # get end time\n print('time consuming:', e_time - s_time) # time used for tracing current tree\n instance_time += (e_time - s_time)\n\n # prepare plot\n trajectory_draw.append(trajectory)\n start_location_draw.append(start_location)\n \n instance_parameter_tuple = self.calculate_loss(instance_parameter_tuple, tree_gt, tree_gt_segment, trajectory)\n\n point_result_tuple, rate_result_tuple = self.calculate_metric(instance_parameter_tuple)\n all_point_val_result.append(list(point_result_tuple)) # this collect detail result of all tree structure \n all_point_rate_result.append(list(rate_result_tuple))\n all_time_list.append(instance_time)\n\n if self.plot:\n self.plot_trajectory(tree, trajectory_draw, start_location_draw)\n\n self.overal_result(all_point_val_result, all_point_rate_result, all_time_list)\n\n return all_point_val_result, all_point_rate_result, all_time_list\n\n\n def trace_vessel(self, env, ref_env, end_list, tree_gt, bifurcation_list, jump_reference, start_location):\n \n trace_trajectory = []\n jump_list = []\n self.bif_count = 0\n \n out_of_range = False\n first_branch = True\n jump_list.append(start_location)\n\n while jump_list:\n # clear cache\n self.q_history.clear()\n self.stop_cache.clear()\n self.regress_bif_coord.clear()\n self.regress_bif_value.clear()\n\n segment_trajectory = []\n current_location = jump_list.pop(0)\n\n self.bif_count += 1\n if self.bif_count >= self.bif_maximum:\n break\n\n current_state = crop_3d_volume(self.device, env, current_location, int(self.input_size[0]))\n\n last_action = None\n for step in range(self.max_step):\n current_location = list(np.round(current_location, 2))\n if (current_location[0] <= 0) or (current_location[0] >= np.shape(env)[0]):\n out_of_range = True\n break\n if (current_location[1] <= 0) or (current_location[1] >= np.shape(env)[1]):\n out_of_range = True\n break\n if (current_location[2] <= 0) or (current_location[2] >= np.shape(env)[2]):\n out_of_range = True\n break\n \n # save trajectory and solve loop\n if current_location not in segment_trajectory:\n segment_trajectory.append(current_location)\n \n # propose next location\n next_location, last_action = self.delay_action(env, current_state, current_location, last_action)\n\n # Observe new state\n next_state = crop_3d_volume(self.device, env, next_location, patch_size=int(self.input_size[0]))\n\n # prepare jump\n ref_state = crop_3d_volume(self.device, ref_env, current_location, patch_size=int(self.input_size[0]))\n jump_list = self.auto_joint_discrimitor(jump_list, current_location, ref_state)\n \n # prepare stop\n if first_branch:\n if step >= 50: # safe zone for first vessel\n stop = self.auto_stop_function(current_location, end_list, step)\n if stop:\n break\n else:\n if step >= 5: # safe zone for a new branch\n stop = self.auto_stop_function(current_location, end_list, step)\n if stop:\n break\n\n # move to the next state \n current_state = next_state\n current_location = next_location\n\n first_branch = False\n\n trace_trajectory, jump_list = self.prepare_trace_trajectory(trace_trajectory, segment_trajectory, jump_list)\n \n # trace go out of env\n if out_of_range:\n break\n \n return trace_trajectory\n\n\n def prepare_trace_trajectory(self, trace_trajectory, segment_trajectory, jump_list):\n end_idx = -1\n new_jump_list = copy.deepcopy(jump_list)\n\n if len(trace_trajectory)>1:\n if len(segment_trajectory) >= int(5/self.step_mm):\n cover_count = 0\n for pt in segment_trajectory:\n for segment in trace_trajectory:\n dist = np.linalg.norm(np.asarray(segment)-np.asarray(pt), axis=1)\n min_dist = np.amin(dist)\n\n if min_dist <= 6:\n cover_count+=1\n break\n\n cover_rate = cover_count/len(segment_trajectory)\n\n if cover_rate < 0.5:\n trace_trajectory.append(segment_trajectory[:end_idx])\n else:\n # remove jump pt on removed segment\n for pt in jump_list:\n if pt in segment_trajectory:\n new_jump_list.remove(pt) \n else:\n # remove jump pt on removed segment\n for pt in jump_list:\n if pt in segment_trajectory:\n new_jump_list.remove(pt)\n else:\n trace_trajectory.append(segment_trajectory[:end_idx])\n\n return trace_trajectory, new_jump_list\n\n\n def calculate_loss(self, parameter_tuple, tree_list, tree_segment, trace_segment):\n \n # get trace list\n trace_list = []\n for segment in trace_segment:\n for pt in segment:\n if pt not in trace_list:\n trace_list.append(pt)\n\n tpr, tpm, fn, fp, ai_gt, ai_infer, tpr_of, tpm_of, fn_of, fp_of = parameter_tuple\n\n for segment in trace_segment:\n continious = True\n for pt in segment:\n dist = np.linalg.norm(np.asarray(tree_list) - np.asarray(pt), axis=1)\n\n if self.precise_metric:\n min_idx = np.argmin(dist)\n min_idx_0 = min_idx - 1\n min_idx_1 = min_idx + 1\n\n if min_idx_0 < 0:\n min_idx_0 = 0\n if min_idx_1 > len(tree_list)-1:\n min_idx_1 = len(tree_list)-1\n \n tem_list = [tree_list[min_idx_0], tree_list[min_idx], tree_list[min_idx_1]]\n ensampled_list = self.ensample_centerline(tem_list)\n min_dist = np.amin(np.linalg.norm(np.asarray(ensampled_list) - np.asarray(pt), axis=1))\n else:\n min_dist = np.amin(dist)\n\n if min_dist < self.match_dist:\n tpm += 1\n ai_infer += min_dist\n if continious:\n tpm_of += 1\n else:\n fp_of += 1\n else:\n fp += 1\n fp_of += 1\n continious = False\n\n for segment in tree_segment:\n continious = True\n\n for pt in segment:\n\n dist = np.linalg.norm(np.asarray(trace_list) - np.asarray(pt), axis=1)\n\n if self.precise_metric:\n\n min_idx = np.argmin(dist)\n min_idx_0 = min_idx - 1\n min_idx_1 = min_idx + 1\n\n if min_idx_0 < 0:\n min_idx_0 = 0\n if min_idx_1 > len(trace_list)-1:\n min_idx_1 = len(trace_list)-1\n\n tem_list = [trace_list[min_idx_0], trace_list[min_idx], trace_list[min_idx_1]]\n ensampled_list = self.ensample_centerline(tem_list)\n min_dist = np.amin(np.linalg.norm(np.asarray(ensampled_list) - np.asarray(pt), axis=1))\n else:\n min_dist = np.amin(dist)\n\n if min_dist < self.match_dist:\n tpr += 1\n ai_gt += min_dist\n if continious:\n tpr_of += 1\n else:\n fn_of += 1\n else:\n fn += 1\n fn_of += 1\n continious = False\n \n return (tpr, tpm, fn, fp, ai_gt, ai_infer, tpr_of, tpm_of, fn_of, fp_of)\n\n\n def calculate_metric(self, parameter_tuple):\n\n tpr, tpm, fn, fp, ai_gt, ai_infer, tpr_of, tpm_of, fn_of, fp_of = parameter_tuple\n\n ov = np.round((tpr + tpm)/(tpr + tpm + fn + fp + 1e-8), 4)\n ov_single = np.round(tpr/(tpr + fn + 1e-8), 4)\n of = np.round((tpr_of + tpm_of)/(tpr_of + tpm_of + fn_of + fp_of + 1e-8), 4)\n ac = np.round(((ai_gt + ai_infer)/(tpr + tpm + 1e-8)) * 0.5, 4)\n ac_single = np.round((ai_gt/(tpr+1e-8)) * 0.5, 4)\n\n score = ov - ac\n print('Point Based ---------------------------------------------')\n print('Overlap_both: ', ov*100, '%')\n print('Overlap_single: ', ov_single*100, '%')\n print('Overlap Until First Error: ', of*100, '%')\n print('Inner Accuracy: ', ac, 'mm')\n print('Inner Accuracy Single: ', ac_single, 'mm')\n\n lambda1 = 0.5\n\n ov_gt = tpr/(tpr + fn + 1e-8)\n ov_infer = tpm/(tpm + fp + 1e-8)\n ov_rate = np.round(lambda1 * ov_gt + (1-lambda1) * ov_infer, 4)\n ov_single_rate = np.round(ov_gt, 4)\n\n ac_gt = (ai_gt/(tpr + 1e-8)) * 0.5\n ac_infer = (ai_infer/(tpm + 1e-8)) * 0.5\n ac_rate = np.round(lambda1 * ac_gt + (1-lambda1) * ac_infer, 4)\n ac_single_rate = np.round(ac_gt, 4)\n\n print('Rate Based ---------------------------------------------')\n print('Overlap_both: ', ov_rate*100, '%')\n print('Overlap_single: ', ov_single_rate*100, '%')\n print('Inner Accuracy: ', ac_rate, 'mm')\n print('Inner Accuracy Single: ', ac_single_rate, 'mm')\n print('----------------------------------------------------')\n return (score, ov, ov_single, of, ac, ac_single), (ov_rate, ov_single_rate, ac_rate, ac_single_rate)\n\n\n def overal_result(self, all_point_val_result, all_point_rate_result, all_time_list):\n\n assert len(all_point_val_result) == len(all_point_rate_result) == len(all_time_list), 'wrong metric!'\n\n p_ov, p_ov_single, p_of, p_ac, p_ac_single = [], [], [], [], []\n r_ov, r_ov_single, r_ac, r_ac_single = [], [], [], []\n\n for idx in range(len(all_point_val_result)):\n p_result = all_point_val_result[idx]\n r_result = all_point_rate_result[idx]\n\n p_ov.append(p_result[1])\n p_ov_single.append(p_result[2])\n p_of.append(p_result[3])\n p_ac.append(p_result[4])\n p_ac_single.append(p_result[5])\n\n r_ov.append(r_result[0])\n r_ov_single.append(r_result[1])\n r_ac.append(r_result[2])\n r_ac_single.append(r_result[3])\n\n print('Final Result: Max, Min, Mean !!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('Point Based ---------------------------------------------')\n print('Overlap both: {}% {}% {}%'.format(np.amax(p_ov)*100, np.amin(p_ov)*100, np.mean(p_ov)*100))\n print('Overlap single: {}% {}% {}%'.format(np.amax(p_ov_single)*100, np.amin(p_ov_single)*100, np.mean(p_ov_single)*100))\n print('Overlap Until First Error: {}% {}% {}%'.format(np.amax(p_of)*100, np.amin(p_of)*100, np.mean(p_of)*100))\n print('Inner Accuracy: {}mm {}mm {}mm'.format(np.amax(p_ac), np.amin(p_ac), np.mean(p_ac)))\n print('Inner Accuracy Single: {}mm {}mm {}mm'.format(np.amax(p_ac_single), np.amin(p_ac_single), np.mean(p_ac_single)))\n\n print('Rate Based ---------------------------------------------')\n print('Overlap both: {}% {}% {}%'.format(np.amax(r_ov)*100, np.amin(r_ov)*100, np.mean(r_ov)*100))\n print('Overlap single: {}% {}% {}%'.format(np.amax(r_ov_single)*100, np.amin(r_ov_single)*100, np.mean(r_ov_single)*100))\n print('Inner Accuracy: {}mm {}mm {}mm'.format(np.amax(r_ac), np.amin(r_ac), np.mean(r_ac)))\n print('Inner Accuracy Single: {}mm {}mm {}mm'.format(np.amax(r_ac_single), np.amin(r_ac_single), np.mean(r_ac_single)))\n\n print('Time ----------------------------------------------------')\n print('Time: {}% {}% {}%'.format(np.amax(all_time_list), np.amin(all_time_list), np.mean(all_time_list)))\n print('----------------------------------------------------') \n\n # with open(str(self.stop_threshold)+'-'+str(self.bif_threshold)+'-'+str(self.step_size)+'.txt', 'w') as f:\n # f.write('Final Result: Max, Min, Mean !!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n')\n # f.write('Point Based ---------------------------------------------\\n')\n # f.write('Overlap both: {}% {}% {}%\\n'.format(np.amax(p_ov)*100, np.amin(p_ov)*100, np.mean(p_ov)*100))\n # f.write('Overlap single: {}% {}% {}%\\n'.format(np.amax(p_ov_single)*100, np.amin(p_ov_single)*100, np.mean(p_ov_single)*100))\n # f.write('Overlap Until First Error: {}% {}% {}%\\n'.format(np.amax(p_of)*100, np.amin(p_of)*100, np.mean(p_of)*100))\n # f.write('Inner Accuracy: {}mm {}mm {}mm\\n'.format(np.amax(p_ac), np.amin(p_ac), np.mean(p_ac)))\n # f.write('Inner Accuracy Single: {}mm {}mm {}mm\\n'.format(np.amax(p_ac_single), np.amin(p_ac_single), np.mean(p_ac_single)))\n # f.write('Rate Based ---------------------------------------------\\n')\n # f.write('Overlap both: {}% {}% {}%\\n'.format(np.amax(r_ov)*100, np.amin(r_ov)*100, np.mean(r_ov)*100))\n # f.write('Overlap single: {}% {}% {}%\\n'.format(np.amax(r_ov_single)*100, np.amin(r_ov_single)*100, np.mean(r_ov_single)*100))\n # f.write('Inner Accuracy: {}mm {}mm {}mm\\n'.format(np.amax(r_ac), np.amin(r_ac), np.mean(r_ac)))\n # f.write('Inner Accuracy Single: {}mm {}mm {}mm\\n'.format(np.amax(r_ac_single), np.amin(r_ac_single), np.mean(r_ac_single)))\n # f.write('Time ----------------------------------------------------\\n')\n # f.write('Time: {}% {}% {}%\\n'.format(np.amax(all_time_list), np.amin(all_time_list), np.mean(all_time_list)))\n # f.write('----------------------------------------------------\\n') \n return None\n \n\n def ensample_centerline(self, vessel_centerline, resample_dist=0.05):\n\n centerline_resample = []\n centerline_resample.append(list(vessel_centerline[0]))\n current_coordinate = vessel_centerline[0]\n index = 0\n \n while index <= len(vessel_centerline)-1:\n\n if index == len(vessel_centerline)-1:\n break\n\n index_point = vessel_centerline[index]\n next_index_point = vessel_centerline[index+1]\n\n dist = np.linalg.norm(np.array(index_point) - np.array(current_coordinate))\n dist_next = np.linalg.norm(np.array(next_index_point) - np.array(index_point))\n\n if dist < dist_next:\n scale = resample_dist/dist_next\n x_dist = next_index_point[0] - index_point[0]\n y_dist = next_index_point[1] - index_point[1]\n z_dist = next_index_point[2] - index_point[2]\n next_coordinate = [current_coordinate[0]+scale*x_dist, current_coordinate[1]+scale*y_dist, current_coordinate[2]+scale*z_dist]\n centerline_resample.append(next_coordinate)\n current_coordinate = next_coordinate\n else:\n index += 1\n \n return centerline_resample\n\n\n def auto_stop_function(self, current_location, end_list, step):\n stop = False\n if step >= self.max_step - 1:\n stop = True\n if not stop:\n # remove largest value and smallest value\n ref_stop_cache = copy.deepcopy(self.stop_cache)\n ref_stop_cache.remove(np.amax(ref_stop_cache))\n ref_stop_cache.remove(np.amin(ref_stop_cache))\n\n avg_vessel_confidence = np.mean(ref_stop_cache)\n if avg_vessel_confidence <= self.stop_threshold:\n # print('not a vessel')\n stop = True\n\n return stop\n\n\n def auto_joint_discrimitor(self, jump_list, current_location, ref_state):\n outputs = self.classifier(ref_state.float())\n outputs = outputs.to('cpu').detach().numpy()[0]\n proximity_value = outputs[0].item()\n self.stop_cache.append(outputs[1].item())\n\n self.regress_bif_value.append(proximity_value)\n self.regress_bif_coord.append(current_location)\n\n min_dist = int(np.floor(1/self.step_mm)) # min_dist is always 1mm in real word \n indices = peak_local_max(np.asarray(self.regress_bif_value), min_distance=min_dist, \n threshold_abs=self.bif_threshold, indices=True)\n\n for idx in indices:\n bifurcation_coordinate = self.regress_bif_coord[idx[0]]\n\n if len(jump_list) > 0:\n if bifurcation_coordinate not in jump_list:\n jump_list.append(list(bifurcation_coordinate))\n self.bifurcation_draw.append(list(bifurcation_coordinate))\n else:\n jump_list.append(list(bifurcation_coordinate))\n self.bifurcation_draw.append(list(bifurcation_coordinate))\n\n return jump_list\n\n\n def prepare_bifurcation_end(self, tree_segment, tree, start_num):\n jump_reference = [] \n bifurcation_list = []\n end_list = []\n\n for idx, vessel_segment in enumerate(tree_segment):\n if idx == 0:\n jump_reference.append(vessel_segment[self.start_idx])\n else:\n jump_reference.append(vessel_segment[0])\n\n jump_reference.append(vessel_segment[-1])\n \n start_location = None\n # prepare start location\n for key in tree.keys():\n key_tuple = ast.literal_eval(key)\n if start_num in key_tuple:\n segment = tree[key]\n if key_tuple.index(start_num) == 0:\n pass\n else:\n segment = segment[::-1]\n\n start_location = segment[self.start_idx]\n\n # remove start location\n if start_location and (start_location in jump_reference):\n jump_reference.remove(start_location)\n\n # prepare bifurcations and end points\n for pt in jump_reference:\n if (jump_reference.count(pt) == 1) and (pt not in end_list):\n end_list.append(pt)\n jump_reference.remove(pt)\n if (jump_reference.count(pt) >= 3) and (pt not in bifurcation_list):\n bifurcation_list.append(pt)\n \n return end_list, bifurcation_list, jump_reference, start_location\n\n\n def simple_action(self, current_state, current_location, last_action_idx):\n q_values = self.target_net(current_state).to(device='cpu').detach().numpy()[0]\n q_values = list(q_values)\n\n if self.q_history:\n self.q_history.append(q_values)\n else:\n for i in range(self.momentum_len):\n self.q_history.append(q_values)\n\n final_q_value = np.zeros((len(q_values),))\n unit_weight = 1/sum(x+1 for x in range(len(self.q_history)))\n\n for idx, value in enumerate(self.q_history):\n final_q_value += (idx+1) * unit_weight * np.asarray(value)\n\n if last_action_idx:\n\n # select action with angle less than 60 degrees againist last action\n last_action = self.ref_orientation[last_action_idx]\n \n indexed = list(enumerate(final_q_value))\n sorted_indexed = sorted(indexed, key=itemgetter(1), reverse=True)\n tem_idx = 0\n\n for pair in sorted_indexed:\n action_idx = pair[0]\n cur_action = self.ref_orientation[action_idx]\n # calculate angle between 2 vectors less than 60 degree\n cos_theta = np.dot(last_action, cur_action)/(np.linalg.norm(last_action) * np.linalg.norm(cur_action))\n if cos_theta >= 1/2:\n break\n return torch.tensor(action_idx).to(self.device)\n\n else:\n action_idx = np.argmax(final_q_value)\n return torch.tensor(action_idx).to(self.device)\n\n\n def delay_action(self, env, current_state, current_location, last_action):\n tem_current_state = current_state\n tem_current_location = current_location\n\n for i in range(self.delay_update):\n tem_action = self.simple_action(tem_current_state, tem_current_location, last_action)\n\n last_action = tem_action\n\n env, tem_next_location = update_env(env, tem_current_location, \n tem_action, self.ref_orientation, step_size=self.step_size)\n\n tem_next_state = crop_3d_volume(self.device, env, tem_next_location, int(self.input_size[0]))\n\n tem_current_state = tem_next_state\n\n tem_current_location = tem_next_location\n\n next_location = tem_next_location\n \n return next_location, last_action\n\n\n def plot_trajectory(self, tree_draw, trajectory_draw, start_location_draw):\n plt.rcParams[\"figure.figsize\"] = 12.8, 9.6\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(32, -40)\n\n # plot gt centerline\n for key, gt_segment in tree_draw.items():\n x_g, y_g, z_g = [], [], []\n for pt in gt_segment:\n x_g.append(pt[0])\n y_g.append(pt[1])\n z_g.append(pt[2])\n ax.plot3D(x_g, y_g, z_g, zdir='z', c = 'b', linestyle='dashed')\n\n if 'b' in self.plot_mode:\n # plot bifurcation\n x_b, y_b, z_b = [], [], []\n for pt in self.bifurcation_draw:\n x_b.append(pt[0])\n y_b.append(pt[1])\n z_b.append(pt[2])\n ax.scatter3D(x_b, y_b, z_b, zdir='z', c = '#76cd26', s=10, alpha=1)\n\n # plot start location\n if 's' in self.plot_mode:\n for start_location in start_location_draw:\n ax.scatter3D(start_location[0], start_location[1], start_location[2], zdir='z', c = 'blueviolet', s=30, alpha=1)\n\n if 'tra' in self.plot_mode:\n # plot traced trajectory\n for trajectory in trajectory_draw:\n\n if self.dynamic:\n self.dynamic_plot(trajectory, fig, ax)\n else:\n for trace_segment in trajectory:\n x_p, y_p, z_p = [], [], []\n\n for pt in trace_segment:\n x_p.append(pt[0])\n y_p.append(pt[1])\n z_p.append(pt[2])\n\n if self.scatter:\n ax.scatter3D(x_p, y_p, z_p, zdir='z', c = 'r', s=0.7, alpha=0.7)\n else:\n ax.plot3D(x_p, y_p, z_p, zdir='z', c = 'r', alpha=0.7)\n plt.axis('off')\n plt.grid(False)\n plt.show(block=True)\n\n \n def dynamic_plot(self, all_trajectories, fig, ax):\n data = []\n for trace_trajectory in all_trajectories: \n x_dynamic, y_dynamic, z_dynamic = [], [], [] \n for pt in trace_trajectory:\n x_dynamic.append(pt[0])\n y_dynamic.append(pt[1])\n z_dynamic.append(pt[2])\n\n dat = []\n dat.append(x_dynamic)\n dat.append(y_dynamic)\n dat.append(z_dynamic)\n\n dat = np.asarray(dat)\n # Fifty lines of random 3-D lines\n data.append(dat)\n\n def update_lines(num, dataLines, lines):\n for line, data in zip(lines, dataLines):\n # NOTE: there is no .set_data() for 3 dim data...\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n return lines\n\n for dat in data:\n lines = [ax.plot3D(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1], c = 'r')[0]]\n line_ani = animation.FuncAnimation(fig, update_lines, fargs=([dat], lines), interval=0, blit=False)\n pause = (25 * len(list(dat)[0])) / 1000\n plt.pause(pause)\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utilities import *\n\n\nclass DQN_dila(nn.Module):\n\n def __init__(self, cfgs):\n super(DQN_dila, self).__init__()\n\n self.dueling_dqn = cfgs.dueling_dqn\n input_channel = cfgs.input_channel\n init_channels = cfgs.init_channels\n output_num = cfgs.output_channel\n\n self.features = nn.Sequential(\n nn.Conv3d(input_channel, init_channels, kernel_size=3),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3, dilation=2),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3, dilation=4),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels*2, kernel_size=3),\n nn.BatchNorm3d(init_channels*2),\n nn.ReLU(),\n\n nn.Conv3d(init_channels*2, init_channels*2, kernel_size=1),\n nn.BatchNorm3d(init_channels*2),\n nn.ReLU()\n )\n\n self.advantage = nn.Sequential(\n nn.Conv3d(init_channels*2, output_num, kernel_size=1),\n )\n\n if self.dueling_dqn:\n self.value = nn.Sequential(\n nn.Conv3d(init_channels*2, 1, kernel_size=1),\n )\n\n def forward(self, x):\n if self.dueling_dqn:\n x = self.features(x)\n advantage = self.advantage(x)\n value = self.value(x)\n output = value + advantage - advantage.mean()\n output = output.squeeze(-1).squeeze(-1).squeeze(-1)\n return output\n\n else:\n x = self.features(x)\n output = self.advantage(x)\n output = output.squeeze(-1).squeeze(-1).squeeze(-1)\n return output\n\n\nclass Classify_Dila(nn.Module):\n\n def __init__(self, cfgs):\n super(Classify_Dila, self).__init__()\n input_channel = cfgs.input_channel\n init_channels = cfgs.init_channels\n output_num = cfgs.output_num_dis\n\n self.features = nn.Sequential(\n nn.Conv3d(input_channel, init_channels, kernel_size=3),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3, dilation=2),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels, kernel_size=3, dilation=4),\n nn.BatchNorm3d(init_channels),\n nn.ReLU(),\n\n nn.Conv3d(init_channels, init_channels*2, kernel_size=3),\n nn.BatchNorm3d(init_channels*2),\n nn.ReLU(),\n\n nn.Conv3d(init_channels*2, init_channels*2, kernel_size=1),\n nn.BatchNorm3d(init_channels*2),\n nn.ReLU(),\n\n nn.Conv3d(init_channels*2, output_num, kernel_size=1),\n )\n\n\n def forward(self, x):\n output = self.features(x)\n output = output.squeeze(-1).squeeze(-1).squeeze(-1)\n return output\n\n\n\ndef optimize_model(device, policy_net, target_net, optimizer, scheduler, memory, \n batch_size, gamma, double_dqn=True, prioritized_replay=True):\n policy_net.train()\n target_net.eval()\n\n if prioritized_replay:\n # state, action, reward, next_state, indices, weights = memory.sample(batch_size)\n batch, indices, weights = memory.sample(batch_size)\n weights = torch.tensor(weights).cuda()\n else:\n if len(memory) < batch_size:\n return\n \n transitions = memory.sample(batch_size)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for\n # detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation ended)\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n batch.next_state)), device=device, dtype=torch.uint8)\n\n non_final_next_states = torch.cat([s for s in batch.next_state\n if s is not None]).to(device) \n state_batch = torch.cat(batch.state).to(device)\n action_batch = torch.cat(batch.action).to(device)\n reward_batch = torch.cat(batch.reward).to(device).float()\n\n\n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions t aken. These are the actions which would've been taken\n # for each batch state according to policy_net\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n state_action_values = state_action_values.sum(1).unsqueeze(1)\n\n # Compute V(s_{t+1}) for all next states.\n # Expected values of actions for non_final_next_states are computed based\n # on the \"older\" target_net; selecting their best reward with max(1)[0].\n # This is merged based on the mask, such that we'll have either the expected\n # state value or 0 in case the state was final.\n next_state_values = torch.zeros(batch_size, device=device).float()\n\n if double_dqn:\n next_q_values = policy_net(non_final_next_states)\n next_q_state_values = target_net(non_final_next_states)\n\n\n index = torch.max(next_q_values, 1)[1].unsqueeze(1)\n next_state_values[non_final_mask] = next_q_state_values.gather(1, index).squeeze(1).detach()\n\n else:\n next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()\n\n # Compute the expected Q values\n expected_state_action_values = (next_state_values * gamma) + reward_batch\n\n if prioritized_replay:\n prios = state_action_values - expected_state_action_values.unsqueeze(1)\n prios = prios.data.cpu().abs().numpy() + 1e-5\n memory.update_priorities(indices, prios)\n loss = F.mse_loss(torch.mul(state_action_values, weights), torch.mul(expected_state_action_values.unsqueeze(1), weights))\n else:\n loss = F.mse_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n scheduler.step()\n optimizer.step()\n del loss" ]
[ [ "torch.tensor", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ], [ "torch.max", "torch.zeros", "torch.cat", "torch.tensor", "torch.nn.Conv3d", "torch.mul", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
aliciasantos/molpy
[ "b3c5c12926882c33927c17e94696fde04b44b1d1" ]
[ "molpy/util.py" ]
[ "import numpy as np\n\n\ndef distance(point1, point2):\n \"\"\"\n Calculate distance between two points.\n\n Parameters\n ----------\n point1 : array_like\n The first point.\n point2 : array_like\n The second point.\n\n Returns\n -------\n float\n The distance between point1 and point2. \n \"\"\"\n \n point1 = np.asarray(point1) \n point2 = np.asarray(point2)\n return np.linalg.norm(point1 - point2) \n" ]
[ [ "numpy.asarray", "numpy.linalg.norm" ] ]
andrespp/uetl
[ "a7d59998fc98584c214158d2481b6f320a43aadf" ]
[ "src/uetl.py" ]
[ "\"\"\"utel.py\n\"\"\"\nimport pandas as pd\nimport pandas.io.sql as sqlio\nfrom sqlalchemy import create_engine\nimport psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\nclass DataWarehouse():\n \"\"\"DataWarehouse class\n \"\"\"\n\n def __init__(self, name, dbms, host, port, base, user, pswd):\n # Define class attributes\n self.name = name\n self.dbms = dbms\n self.host = host\n self.port = port\n self.base = base\n self.user = user\n self.pswd = pswd\n\n def postgres_conn(self, host, port, dbname, user, pwd, timeout=3):\n \"\"\"Connects to PostgreSQL database\n\n Parameters\n ----------\n host : str\n server name or ip address\n port : int\n server port\n dbname : srt\n database name\n user : srt\n database user\n pwd : srt\n database user's password\n timeout : integer\n connection timeout (defaults to 3 seconds)\n\n Returns\n -------\n conn :\n Database connection or -1 on error\n \"\"\"\n conn = \\\n psycopg2.connect(host=host,\n port=port,\n database=dbname,\n user=user,\n password=pwd,\n connect_timeout=timeout)\n return conn\n\n def get_conn(self):\n \"\"\"Connects to the Data Warehouse Database\n\n Returns\n -------\n conn :\n Database connection or -1 on error\n \"\"\"\n return self.postgres_conn(self.host, self.port, self.base,\n self.user, self.pswd)\n\n def test_conn(self):\n \"\"\"Checks if Data Warehouse's DBMS is reachable and DW's database\n exists. If DW DB does not exist, try to create it.\n\n Returns\n -------\n status : boolean\n True if database is reachable, False otherwise\n \"\"\"\n conn = self.get_conn()\n\n if conn == -1: # DW DB Unreachable\n # Try to create DW DB\n if self.create_database(): # DW DB created!\n print('Data Warehouse Database does not exist, creating... ' +\n 'Success!')\n return True\n else:\n # Give up\n return False\n\n else:\n conn.close()\n return True\n\n def create_database(self):\n \"\"\"Creates Data Warehouse's Database\n\n Returns\n -------\n status : boolean\n True if database was created successfuly, and False otherwise\n \"\"\"\n # Connect to 'postgres' database in order to be able to create new db\n conn = self.postgres_conn(self.host, self.port, 'postgres',\n self.user, self.pswd)\n if conn == -1:\n # DBMS Unreachable!\n return False\n\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\n cur = conn.cursor()\n cur.execute(\"CREATE DATABASE {} ;\".format(self.base))\n\n conn.close()\n\n return True\n\n def check_table(self, table_name):\n \"\"\"Check if Data Warehouse's table exists.\n\n Warning\n -------\n Table names are case sensitive!\n\n Parameters\n ----------\n table_name : srt\n\n Returns\n -------\n status : boolean\n True if table exist, or False otherwise\n \"\"\"\n conn = self.get_conn()\n if conn == -1: # DBMS Unreachable!\n return False\n else:\n cur = conn.cursor()\n cur.execute(\"\"\"\n select exists(\n select *\n from information_schema.tables\n where table_name=%s\n )\"\"\", (table_name,)\n )\n status = cur.fetchone()[0]\n conn.close()\n return status\n\n def create_tables(self, tables, verbose=False):\n \"\"\"Creates Data Warehouse's Tables, if it doesn't exist.\n\n Warning\n -------\n Table names are case sensitive!\n\n Parameters\n ----------\n tables : dict\n Dictionary containing table_name:sql pairs. Ex.:\n dict(dim_date='CREATE TABLE dim_data(DATE_SK DOUBLE PRECISION)\n\n verbose : boolean\n\n Returns\n -------\n status : boolean\n True if tables already exists or were created successfuly,\n and False otherwise\n \"\"\"\n for table in tables.keys():\n\n if not self.check_table(table): # table doesn't exist\n\n if(verbose):\n print('Table {} does not exist! Creating... '.format(table),\n end='')\n\n conn = self.get_conn()\n if conn == -1: # DBMS Unreachable!\n print('ERROR: Fail creating tables!')\n return False\n else:\n cur = conn.cursor()\n cur.execute(tables[table])\n conn.commit()\n if not self.check_table(table): # Fail to create table\n print('ERROR: Fail creating tables!')\n return False\n if(verbose): print('Success!')\n conn.close()\n\n return True\n\n def truncate(self, table_name, verbose=False):\n \"\"\"Truncate Table\n\n Parameters\n ----------\n table_name : str\n Table to be truncated\n\n verbose : boolean\n\n Returns\n -------\n status : boolean\n True on success, False otherwise\n \"\"\"\n if(verbose):\n print('{}: '.format(table_name), end='', flush=True)\n\n conn = self.get_conn()\n\n if conn == -1:\n print(\"ERROR: query(): Unable to connect to the database.\")\n return False\n else:\n cur = conn.cursor()\n cur.execute('TRUNCATE {}'.format(table_name))\n conn.commit()\n conn.close()\n if(verbose): print('table truncated.')\n return True\n\n def write(self, table_name, df, verbose=False, chunksize=None):\n \"\"\"Write dataframe to table. Dataframe's Index will be used as a column\n named 'table_name'_sk\n\n Parameters\n ----------\n table_name : str\n Table to be written\n\n df | Pandas DataFrame\n Data to be loaded\n\n verbose : boolean\n\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n\n Returns\n -------\n status : boolean\n True on success, False otherwise\n \"\"\"\n if(verbose):\n print('{}: '.format(table_name), end='', flush=True)\n\n ## psycopg2\n eng_str = 'postgresql+psycopg2://{}:{}@{}:{}/{}'.format(\n self.user, self.pswd, self.host, self.port, self.base)\n engine = create_engine(eng_str)\n conn = self.get_conn()\n\n if conn == -1:\n print(\"ERROR: query(): Unable to connect to the database.\")\n return False\n else:\n sk = table_name.split('_')[1]+'_sk' # remove 'dim_' prefix\n df.to_sql(name=table_name,\n con=engine,\n index=True,\n index_label=sk,\n chunksize=chunksize,\n if_exists='append')\n conn.close()\n if(verbose):\n print('{} registries loaded.'.format(len(df)))\n return True\n\n def write_table(self, table_name, df, verbose=False):\n \"\"\"Write dataframe to table.\n Parameters\n ----------\n table_name : str\n Table to be written\n\n df | Pandas DataFrame\n Data to be loaded\n\n verbose : boolean\n\n Returns\n -------\n status : boolean\n True on success, False otherwise\n \"\"\"\n if(verbose):\n print('{}: '.format(table_name), end='', flush=True)\n\n ## psycopg2\n eng_str = 'postgresql+psycopg2://{}:{}@{}:{}/{}'.format(\n self.user, self.pswd, self.host, self.port, self.base)\n engine = create_engine(eng_str)\n conn = self.get_conn()\n\n if conn == -1:\n print(\"ERROR: query(): Unable to connect to the database.\")\n return False\n else:\n df.to_sql(name=table_name,\n con=engine,\n index=False,\n if_exists='append')\n conn.close()\n if(verbose):\n print('{} registries loaded.'.format(len(df)))\n return True\n\n def query(self, query=\"SELECT\"):\n \"\"\"Connects to the Data Warehouse DB and run defined query\n\n Parameters\n ----------\n query : str\n Desired query\n\n Returns\n -------\n df : DataFrame\n Resulting Dataframe (Empty dataframe if unable to connect)\n \"\"\"\n\n # Connect to an existing database\n conn = self.get_conn()\n\n if conn == -1:\n print(\"ERROR: query(): Unable to connect to the database.\")\n return pd.DataFrame()\n\n # Perform query\n df = sqlio.read_sql_query(query, conn)\n\n # Close communication with the database\n conn.close()\n\n return df\n\nif __name__ == '__main__':\n print('Data Warehouse Class')\n" ]
[ [ "pandas.DataFrame", "pandas.io.sql.read_sql_query" ] ]
HDRUK/MedCAT
[ "69c36d1da484ad32520a9b3333adf8f6ebfcbde7" ]
[ "medcat/cat.py" ]
[ "import os\nimport json\nimport pandas\nimport spacy\nfrom time import sleep\nfrom functools import partial\nfrom multiprocessing import Process, Manager, Queue, Pool, Array\nfrom medcat.cdb import CDB\nfrom medcat.spacy_cat import SpacyCat\nfrom medcat.preprocessing.tokenizers import spacy_split_all\nfrom medcat.utils.spelling import CustomSpellChecker\nfrom medcat.utils.spacy_pipe import SpacyPipe\nfrom medcat.preprocessing.cleaners import spacy_tag_punct\nfrom medcat.utils.helpers import get_all_from_name, tkn_inds_from_doc\nfrom medcat.utils.loggers import basic_logger\nfrom medcat.utils.data_utils import make_mc_train_test\nimport time\nimport sys, traceback\nfrom tqdm.autonotebook import tqdm\n\nlog = basic_logger(\"CAT\")\n\nclass CAT(object):\n r'''\n The main MedCAT class used to annotate documents, it is built on top of spaCy\n and works as a spaCy pipline. Creates an instance of a spaCy pipline that can\n be used as a spacy nlp model.\n\n Args:\n cdb (medcat.cdb.CDB):\n The concept database that will be used for NER+L\n vocab (medcat.utils.vocab.Vocab, optional):\n Vocabulary used for vector embeddings and spelling. Default: None\n skip_stopwords (bool):\n If True the stopwords will be ignored and not detected in the pipeline.\n Default: True\n meta_cats (list of medcat.meta_cat.MetaCAT, optional):\n A list of models that will be applied sequentially on each\n detected annotation.\n\n Attributes (limited):\n cdb (medcat.cdb.CDB):\n Concept database used with this CAT instance, please do not assign\n this value directly.\n vocab (medcat.utils.vocab.Vocab):\n The vocabulary object used with this instance, please do not assign\n this value directly.\n config - WILL BE REMOVED - TEMPORARY PLACEHOLDER\n\n Examples:\n >>>cat = CAT(cdb, vocab)\n >>>spacy_doc = cat(\"Put some text here\")\n >>>print(spacy_doc.ents) # Detected entites\n '''\n def __init__(self, cdb, vocab=None, skip_stopwords=True, meta_cats=[], config={}, tokenizer=None):\n self.cdb = cdb\n self.vocab = vocab\n self.config = config\n\n # Build the spacy pipeline\n self.nlp = SpacyPipe(spacy_split_all)\n\n #self.nlp.add_punct_tagger(tagger=spacy_tag_punct)\n self.nlp.add_punct_tagger(tagger=partial(spacy_tag_punct,\n skip_stopwords=skip_stopwords,\n keep_punct=self.config.get(\"keep_punct\", [':', '.'])))\n\n # Add spell checker\n self.spell_checker = CustomSpellChecker(cdb_vocab=self.cdb.vocab, data_vocab=self.vocab)\n self.nlp.add_spell_checker(spell_checker=self.spell_checker)\n\n # Add them cat class that does entity detection\n self.spacy_cat = SpacyCat(cdb=self.cdb, vocab=self.vocab, tokenizer=tokenizer)\n self.nlp.add_cat(spacy_cat=self.spacy_cat)\n\n # Add meta_annotaiton classes if they exist\n self._meta_annotations = False\n for meta_cat in meta_cats:\n self.nlp.add_meta_cat(meta_cat, meta_cat.category_name)\n self._meta_annotations = True\n\n\n def __call__(self, text):\n r'''\n Push the text through the pipeline.\n\n Args:\n text (string):\n The text to be annotated\n\n Returns:\n A spacy document with the extracted entities\n '''\n return self.nlp(text)\n\n\n def add_concept_cntx(self, cui, text, tkn_inds, negative=False, lr=None, anneal=None, spacy_doc=None):\n if spacy_doc is None:\n spacy_doc = self(text)\n tkns = [spacy_doc[ind] for ind in range(tkn_inds[0], tkn_inds[-1] + 1)]\n self.spacy_cat._add_cntx_vec(cui=cui, doc=spacy_doc, tkns=tkns,\n negative=negative, lr=lr, anneal=anneal)\n\n\n def unlink_concept_name(self, cui, name, full_unlink=True):\n r'''\n Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from\n the Concept Database (CDB). As a consequence medcat will never again link the `name`\n to this CUI - meaning the name will not be detected as a concept in the future.\n\n Args:\n cui (str):\n The CUI from which the `name` will be removed\n name (str):\n The span of text to be removed from the linking dictionary\n full_unlink (boolean):\n If True, the `name` will not only be removed from the given `cui` but from\n each concept in the database that is associated with this name.\n Examples:\n >>> # To never again link C0020538 to HTN\n >>> cat.unlink_concept_name('C0020538', 'htn', False)\n '''\n names = [name, name.lower()]\n # Unlink a concept from a name\n p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='clean')\n # Add the clean version of the name\n names.append(p_name)\n # Get the raw version\n p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='raw')\n # Append the raw evrsion\n names.append(p_name)\n\n if tokens[-1].lower() == \"s\":\n # Remove last 's' - a stupid bug\n names.append(p_name[0:-1])\n\n for name in names:\n cuis = [cui]\n if full_unlink and name in self.cdb.name2cui:\n cuis = list(self.cdb.name2cui[name])\n\n for cui in cuis:\n if cui in self.cdb.cui2names and name in self.cdb.cui2names[cui]:\n self.cdb.cui2names[cui].remove(name)\n if len(self.cdb.cui2names[cui]) == 0:\n del self.cdb.cui2names[cui]\n\n if name in self.cdb.name2cui:\n if cui in self.cdb.name2cui[name]:\n self.cdb.name2cui[name].remove(cui)\n\n if len(self.cdb.name2cui[name]) == 0:\n del self.cdb.name2cui[name]\n\n\n def _add_name(self, cui, source_val, is_pref_name, only_new=False, desc=None, tui=None):\n r'''\n Please do not use directly. This function will add a name to a CUI (existing or new).\n\n Args:\n cui (str):\n The CUI to which to add the name\n source_val (str):\n The `name` or span or source_value that will be linked to the cui\n is_pref_name (boolean):\n Is this source_val the prefered `name` for this CUI (concept)\n only_new (bool):\n Only add the name if it does not exist in the current CDB and is not linked\n to any concept (CUI) in the current CDB.\n desc (str):\n Description for this concept\n tui (str):\n Semenantic Type identifer for this concept, should be a TUI that exisit in the\n current CDB. Have a look at cdb.tui2names - for a list of all existing TUIs\n in the current CDB.\n\n Examples:\n Do not use.\n '''\n onto = 'def'\n all_cuis = []\n\n if cui in self.cdb.cui2ontos and self.cdb.cui2ontos[cui]:\n onto = list(self.cdb.cui2ontos[cui])[0]\n\n # Add the original version of the name just lowercased\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='none')\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=False, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='clean')\n # This will add a new concept if the cui doesn't exist\n # or link the name to an existing concept if it exists.\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=False, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n # Add the raw also if needed\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='raw')\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui] or is_pref_name:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=is_pref_name, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n # Fix for ntkns in cdb\n if p_name in self.cdb.name2ntkns:\n if len(tokens) not in self.cdb.name2ntkns[p_name]:\n self.cdb.name2ntkns[p_name].add(len(tokens))\n\n return list(set(all_cuis))\n\n\n def add_name(self, cui, source_val, text=None, is_pref_name=False, tkn_inds=None, text_inds=None,\n spacy_doc=None, lr=None, anneal=None, negative=False, only_new=False, desc=None, tui=None,\n manually_created=False):\n r'''\n This function will add a `name` (source_val) to a CUI (existing or new). It will teach medcat\n that this source_val is linked to this CUI.\n\n Args:\n cui (str):\n The CUI to which to add the name\n source_val (str):\n The `name` or span or source_value that will be linked to the cui\n text (str, optional):\n Text in which an example of this source_val can be found. Used for supervised/online\n training. This is basically one sample in a dataset for supervised training.\n is_pref_name (boolean):\n Is this source_val the prefered `name` for this CUI (concept)\n tkn_inds (list of ints, optional):\n Should be in the form: [3, 4, 5, ...]. This should be used only if you are providing a spacy_doc also.\n It gives the indicies of the tokens in a spacy document where the source_val can be found.\n text_inds (list, optional):\n A list that has only two values the start index for this `source_val` in the `text` and the end index.\n Used if you are not providing a spacy_doc. But are providing a `text` - it is optional and if not provided\n medcat will try to automatically find the start and end index.\n spacy_doc ()\n TODO:\n lr (float):\n The learning rate that will be used if you are providing the `text` that will be used for supervised/active\n learning.\n\n only_new (bool):\n Only add the name if it does not exist in the current CDB and is not linked\n to any concept (CUI) in the current CDB.\n desc (str):\n Description for this concept\n tui (str):\n Semenantic Type identifer for this concept, should be a TUI that exisit in the\n current CDB. Have a look at cdb.tui2names - for a list of all existing TUIs\n in the current CDB.\n\n Examples:\n Do not use.\n '''\n # First add the name, get bac all cuis that link to this name\n all_cuis = self._add_name(cui, source_val, is_pref_name, only_new=only_new, desc=desc, tui=tui)\n\n # Now add context if text is present\n if (text is not None and (source_val in text or text_inds)) or \\\n (spacy_doc is not None and (text_inds or tkn_inds)):\n if spacy_doc is None:\n spacy_doc = self(text)\n\n if tkn_inds is None:\n tkn_inds = tkn_inds_from_doc(spacy_doc=spacy_doc, text_inds=text_inds,\n source_val=source_val)\n\n if tkn_inds is not None and len(tkn_inds) > 0:\n self.add_concept_cntx(cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal,\n negative=negative)\n\n if manually_created:\n all_cuis.remove(cui)\n for _cui in all_cuis:\n self.add_concept_cntx(_cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal,\n negative=True)\n\n\n def _print_stats(self, data, epoch=0, use_filters=False, use_overlaps=False, use_cui_doc_limit=False,\n use_groups=False):\n r'''\n Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.\n\n Args:\n data (list of dict):\n The json object that we get from MedCATtrainer on export.\n epoch (int):\n Used during training, so we know what epoch is it.\n use_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n use_overlaps (boolean):\n Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n\n Returns:\n fps (dict):\n False positives for each CUI\n fns (dict):\n False negatives for each CUI\n tps (dict):\n True positives for each CUI\n cui_prec (dict):\n Precision for each CUI\n cui_rec (dict):\n Recall for each CUI\n cui_f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n '''\n tp = 0\n fp = 0\n fn = 0\n fps = {}\n fns = {}\n tps = {}\n cui_prec = {}\n cui_rec = {}\n cui_f1 = {}\n cui_counts = {}\n examples = {'fp': {}, 'fn': {}, 'tp': {}}\n\n fp_docs = set()\n fn_docs = set()\n if self.spacy_cat.TUI_FILTER is None:\n _tui_filter = None\n else:\n _tui_filter = list(self.spacy_cat.TUI_FILTER)\n if self.spacy_cat.CUI_FILTER is None:\n _cui_filter = None\n else:\n _cui_filter = list(self.spacy_cat.CUI_FILTER)\n\n for pind, project in tqdm(enumerate(data['projects']), desc=\"Stats project\", total=len(data['projects']), leave=False):\n cui_filter = None\n tui_filter = None\n\n if use_filters:\n if 'cuis' in project and len(project['cuis'].strip()) > 0:\n cui_filter = set([x.strip() for x in project['cuis'].split(\",\")])\n if 'tuis' in project and len(project['tuis'].strip()) > 0:\n tui_filter = set([x.strip().upper() for x in project['tuis'].split(\",\")])\n\n self.spacy_cat.TUI_FILTER = tui_filter\n self.spacy_cat.CUI_FILTER = cui_filter\n\n start_time = time.time()\n for dind, doc in tqdm(enumerate(project['documents']), desc='Stats document', total=len(project['documents']), leave=False):\n spacy_doc = self(doc['text'])\n anns = doc['annotations']\n if use_overlaps:\n p_anns = spacy_doc._.ents\n else:\n p_anns = spacy_doc.ents\n\n anns_norm = []\n anns_norm_neg = []\n anns_examples = []\n anns_norm_cui = []\n for ann in anns:\n if (cui_filter is None and tui_filter is None) or (cui_filter is not None and ann['cui'] in cui_filter) or \\\n (tui_filter is not None and self.cdb.cui2tui.get(ann['cui'], 'unk') in tui_filter):\n cui = ann['cui']\n if use_groups:\n cui = self.cdb.cui2info.get(cui, {}).get(\"group\", cui)\n\n if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):\n anns_norm.append((ann['start'], cui))\n anns_examples.append({\"text\": doc['text'][max(0, ann['start']-60):ann['end']+60],\n \"cui\": cui,\n \"source value\": ann['value'],\n \"acc\": 1,\n \"project index\": pind,\n \"document inedex\": dind})\n elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):\n anns_norm_neg.append((ann['start'], cui))\n\n\n if ann.get(\"validated\", True):\n # This is used to test was someone annotating for this CUI in this document\n anns_norm_cui.append(cui)\n cui_counts[cui] = cui_counts.get(cui, 0) + 1\n\n p_anns_norm = []\n p_anns_examples = []\n for ann in p_anns:\n cui = ann._.cui\n if use_groups:\n cui = self.cdb.cui2info.get(cui, {}).get(\"group\", cui)\n p_anns_norm.append((ann.start_char, cui))\n p_anns_examples.append({\"text\": doc['text'][max(0, ann.start_char-60):ann.end_char+60],\n \"cui\": cui,\n \"source value\": ann.text,\n \"acc\": float(ann._.acc),\n \"project index\": pind,\n \"document inedex\": dind})\n\n\n for iann, ann in enumerate(p_anns_norm):\n if not use_cui_doc_limit or ann[1] in anns_norm_cui:\n cui = ann[1]\n if ann in anns_norm:\n tp += 1\n tps[cui] = tps.get(cui, 0) + 1\n\n example = p_anns_examples[iann]\n examples['tp'][cui] = examples['tp'].get(cui, []) + [example]\n else:\n fp += 1\n fps[cui] = fps.get(cui, 0) + 1\n fp_docs.add(doc.get('name', 'unk'))\n\n # Add example for this FP prediction\n example = p_anns_examples[iann]\n if ann in anns_norm_neg:\n # Means that it really was annotated as negative\n example['real_fp'] = True\n\n examples['fp'][cui] = examples['fp'].get(cui, []) + [example]\n\n for iann, ann in enumerate(anns_norm):\n if ann not in p_anns_norm:\n cui = ann[1]\n fn += 1\n fn_docs.add(doc.get('name', 'unk'))\n\n fns[cui] = fns.get(cui, 0) + 1\n examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]\n\n try:\n prec = tp / (tp + fp)\n rec = tp / (tp + fn)\n f1 = 2*(prec * rec)/(prec + rec)\n print(\"Epoch: {}, Prec: {}, Rec: {}, F1: {}\\n\".format(epoch, prec, rec, f1))\n print(\"Docs with false positives: {}\\n\".format(\"; \".join([str(x) for x in list(fp_docs)[0:10]])))\n print(\"Docs with false negatives: {}\\n\".format(\"; \".join([str(x) for x in list(fn_docs)[0:10]])))\n\n # Sort fns & prec\n fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}\n fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}\n tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}\n\n\n # F1 per concept\n for cui in tps.keys():\n prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))\n rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))\n f1 = 2*(prec * rec)/(prec + rec)\n cui_prec[cui] = prec\n cui_rec[cui] = rec\n cui_f1[cui] = f1\n\n\n # Get top 10\n pr_fps = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]\n pr_fns = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]\n pr_tps = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]\n\n\n print(\"\\n\\nFalse Positives\\n\")\n for one in pr_fps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nFalse Negatives\\n\")\n for one in pr_fns:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nTrue Positives\\n\")\n for one in pr_tps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"*\"*110 + \"\\n\")\n\n\n except Exception as e:\n traceback.print_exc()\n\n self.spacy_cat.TUI_FILTER = _tui_filter\n self.spacy_cat.CUI_FILTER = _cui_filter\n\n return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples\n\n\n def train_supervised(self, data_path, reset_cdb=False, reset_cui_count=False, nepochs=30, lr=None,\n anneal=None, print_stats=True, use_filters=False, terminate_last=False, use_overlaps=False,\n use_cui_doc_limit=False, test_size=0, force_manually_created=False, use_groups=False,\n never_terminate=False):\n r'''\n Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simiulated\n online training then supervised.\n\n Args:\n data_path (str):\n The path to the json file that we get from MedCATtrainer on export.\n reset_cdb (boolean):\n This will remove all concepts from the existing CDB and build a new CDB based on the\n concepts that appear in the training data. It will be impossible to get back the removed\n concepts.\n reset_cui_count (boolean):\n Used for training with weight_decay (annealing). Each concept has a count that is there\n from the beginning of the CDB, that count is used for annealing. Resetting the count will\n significantly incrase the training impact. This will reset the count only for concepts\n that exist in the the training data.\n nepochs (int):\n Number of epochs for which to run the training.\n lr (int):\n If set it will overwrite the global LR from config.\n anneal (boolean):\n If true annealing will be used when training.\n print_stats (boolean):\n If true stats will be printed during training (prints stats every 5 epochs).\n use_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n terminate_last (boolean):\n If true, concept termination will be done after all training.\n use_overlaps (boolean):\n Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n test_size (float):\n If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.\n Usually 0.1 is fine.\n force_manually_created (float):\n Check add_name for more details, if true all concepts in the dataset will be treated as manually\n created.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n never_terminate (boolean):\n If True no termination will be applied\n\n Returns:\n fp (dict):\n False positives for each CUI\n fn (dict):\n False negatives for each CUI\n tp (dict):\n True positives for each CUI\n p (dict):\n Precision for each CUI\n r (dict):\n Recall for each CUI\n f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n examples (dict):\n FP/FN examples of sentences for each CUI\n '''\n fp = fn = tp = p = r = f1 = cui_counts = examples = {}\n\n self.train = False\n data = json.load(open(data_path))\n cui_counts = {}\n\n if test_size == 0:\n test_set = data\n train_set = data\n else:\n train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)\n\n if print_stats:\n self._print_stats(test_set, use_filters=use_filters, use_cui_doc_limit=use_cui_doc_limit, use_overlaps=use_overlaps,\n use_groups=use_groups)\n\n if reset_cdb:\n self.cdb = CDB()\n self.spacy_cat.cdb = self.cdb\n self.spacy_cat.cat_ann.cdb = self.cdb\n\n if reset_cui_count:\n # Get all CUIs\n cuis = []\n for project in train_set['projects']:\n for doc in project['documents']:\n for ann in doc['annotations']:\n cuis.append(ann['cui'])\n for cui in set(cuis):\n if cui in self.cdb.cui_count:\n self.cdb.cui_count[cui] = 10\n\n # Remove entites that were terminated\n if not never_terminate:\n for project in train_set['projects']:\n for doc in project['documents']:\n for ann in doc['annotations']:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n for epoch in tqdm(range(nepochs), desc='Epoch', leave=False):\n # Print acc before training\n for project in tqdm(train_set['projects'], desc='Project', leave=False, total=len(train_set['projects'])):\n for i_doc, doc in tqdm(enumerate(project['documents']), desc='Document', leave=False, total=len(project['documents'])):\n spacy_doc = self(doc['text'])\n for ann in doc['annotations']:\n if not ann.get('killed', False):\n cui = ann['cui']\n start = ann['start']\n end = ann['end']\n deleted = ann.get('deleted', False)\n manually_created = False\n if force_manually_created or ann.get('manually_created', False) or ann.get('alternative', False):\n manually_created = True\n self.add_name(cui=cui,\n source_val=ann['value'],\n spacy_doc=spacy_doc,\n text_inds=[start, end],\n negative=deleted,\n lr=lr,\n anneal=anneal,\n manually_created=manually_created)\n if terminate_last and not never_terminate:\n # Remove entites that were terminated, but after all training is done\n for project in train_set['projects']:\n for doc in project['documents']:\n for ann in doc['annotations']:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n if epoch % 5 == 0:\n if print_stats:\n fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set, epoch=epoch+1,\n use_filters=use_filters,\n use_cui_doc_limit=use_cui_doc_limit,\n use_overlaps=use_overlaps,\n use_groups=use_groups)\n return fp, fn, tp, p, r, f1, cui_counts, examples\n\n @property\n def train(self):\n return self.spacy_cat.train\n\n @property\n def spacy_nlp(self):\n \"\"\" Returns the spacy nlp object utilized by cat\"\"\"\n return self.nlp.nlp \n\n\n @train.setter\n def train(self, val):\n self.spacy_cat.train = val\n\n\n def run_training(self, data_iterator, fine_tune=False):\n \"\"\" Runs training on the data\n\n data_iterator: Simple iterator over sentences/documents, e.g. a open file\n or an array or anything else that we can use in a for loop.\n fine_tune: If False old training will be removed\n \"\"\"\n self.train = True\n cnt = 0\n\n if not fine_tune:\n print(\"Removing old training data!\\n\")\n self.cdb.reset_training()\n self.cdb.coo_dict = {}\n self.spacy_cat._train_skip_names = {}\n\n for line in data_iterator:\n if line is not None:\n try:\n _ = self(line)\n except Exception as e:\n print(\"LINE: '{}' \\t WAS SKIPPED\".format(line))\n print(\"BECAUSE OF: \" + str(e))\n if cnt % 1000 == 0:\n print(\"DONE: \" + str(cnt))\n cnt += 1\n self.train = False\n\n\n def get_entities(self, text, cat_filter=None, only_cui=False, skip_info=False):\n \"\"\" Get entities\n\n text: text to be annotated\n return: entities\n \"\"\"\n doc = self(text)\n out = []\n\n if cat_filter:\n cat_filter(doc, self)\n\n out_ent = {}\n if self.config.get('nested_entities', False):\n _ents = doc._.ents\n else:\n _ents = doc.ents\n\n for ind, ent in enumerate(_ents):\n cui = str(ent._.cui)\n if not only_cui:\n out_ent['pretty_name'] = self.cdb.cui2pretty_name.get(cui, '')\n out_ent['cui'] = cui\n out_ent['tui'] = str(ent._.tui)\n out_ent['type'] = str(self.cdb.tui2name.get(out_ent['tui'], ''))\n out_ent['source_value'] = str(ent.text)\n out_ent['acc'] = str(ent._.acc)\n out_ent['start'] = ent.start_char\n out_ent['end'] = ent.end_char\n if not skip_info:\n out_ent['info'] = self.cdb.cui2info.get(cui, {})\n out_ent['id'] = str(ent._.id)\n out_ent['meta_anns'] = {}\n\n if hasattr(ent._, 'meta_anns') and ent._.meta_anns:\n for key in ent._.meta_anns.keys():\n one = {'name': key, 'value': ent._.meta_anns[key]}\n out_ent['meta_anns'][key] = one\n\n out.append(dict(out_ent))\n else:\n out.append(cui)\n\n return out\n\n\n def get_json(self, text, cat_filter=None, only_cui=False, skip_info=False):\n \"\"\" Get output in json format\n\n text: text to be annotated\n return: json with fields {'entities': <>, 'text': text}\n \"\"\"\n ents = self.get_entities(text, cat_filter, only_cui, skip_info=skip_info)\n out = {'entities': ents, 'text': text}\n\n return json.dumps(out)\n\n\n def multi_processing(self, in_data, nproc=8, batch_size=100, cat_filter=None, only_cui=False, skip_info=False):\n \"\"\" Run multiprocessing NOT FOR TRAINING\n in_data: an iterator or array with format: [(id, text), (id, text), ...]\n nproc: number of processors\n batch_size: obvious\n\n return: an list of tuples: [(id, doc_json), (id, doc_json), ...]\n \"\"\"\n\n if self._meta_annotations:\n # Hack for torch using multithreading, which is not good here\n import torch\n torch.set_num_threads(1)\n\n # Create the input output for MP\n in_q = Queue(maxsize=4*nproc)\n manager = Manager()\n out_dict = manager.dict()\n out_dict['processed'] = []\n\n # Create processes\n procs = []\n for i in range(nproc):\n p = Process(target=self._mp_cons, args=(in_q, out_dict, i, cat_filter, only_cui, skip_info))\n p.start()\n procs.append(p)\n\n data = []\n for id, text in in_data:\n data.append((id, text))\n if len(data) == batch_size:\n in_q.put(data)\n data = []\n # Put the last batch if it exists\n if len(data) > 0:\n in_q.put(data)\n\n for _ in range(nproc): # tell workers we're done\n in_q.put(None)\n\n for p in procs:\n p.join()\n\n # Close the queue as it can cause memory leaks\n in_q.close()\n\n out = []\n for key in out_dict.keys():\n if 'pid' in key:\n data = out_dict[key]\n out.extend(data)\n\n # Sometimes necessary to free memory\n out_dict.clear()\n del out_dict\n\n return out\n\n\n def _mp_cons(self, in_q, out_dict, pid=0, cat_filter=None, only_cui=False, skip_info=False):\n cnt = 0\n out = []\n while True:\n if not in_q.empty():\n data = in_q.get()\n if data is None:\n out_dict['pid: {}'.format(pid)] = out\n break\n\n for id, text in data:\n try:\n doc = json.loads(self.get_json(text, cat_filter, only_cui, skip_info))\n out.append((id, doc))\n except Exception as e:\n print(\"Exception in _mp_cons\")\n print(e)\n\n sleep(1)\n\n def add_cui_to_group(self, cui, group_name, reset_all_groups=False):\n r'''\n Ads a CUI to a group, will appear in cdb.cui2info['group']\n\n Args:\n cui (str):\n The concept to be added\n group_name (str):\n The group to whcih the concept will be added\n reset_all_groups (boolean):\n If True it will reset all existing groups and remove them.\n\n Examples:\n >>> cat.add_cui_to_group(\"S-17\", 'pain')\n '''\n\n # Reset if needed\n if reset_all_groups:\n for _cui in self.cdb.cui2info.keys():\n _ = self.cdb.cui2info[_cui].pop('group', None)\n\n # Add\n if cui in self.cdb.cui2info:\n self.cdb.cui2info[cui]['group'] = group_name\n else:\n self.cdb.cui2info[cui] = {'group': group_name}\n" ]
[ [ "torch.set_num_threads" ] ]
nikste/ClassMix
[ "fd0ec39c440cf8b10e40ce5ea0b69c5e4a60fa55" ]
[ "models/resnet.py" ]
[ "import torch\nimport torch.nn as nn\n\n__all__ = [\n \"ResNet\",\n \"resnet18\",\n \"resnet34\",\n \"resnet50\",\n \"resnet101\",\n \"resnet152\",\n \"resnext50_32x4d\",\n \"resnext101_32x8d\",\n \"wide_resnet50_2\",\n \"wide_resnet101_2\",\n \"resnext101_32x16d_wsl\",\n]\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation,\n )\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = [\"downsample\"]\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n groups=1,\n base_width=64,\n dilation=1,\n norm_layer=None,\n ):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = [\"downsample\"]\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n groups=1,\n base_width=64,\n dilation=1,\n norm_layer=None,\n ):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.0)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(\n self,\n block,\n layers,\n image_channels=2,\n num_classes=1000,\n zero_init_residual=False,\n groups=1,\n width_per_group=64,\n replace_stride_with_dilation=None,\n norm_layer=None,\n ):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\n \"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation)\n )\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(\n image_channels, self.inplanes, kernel_size=7, stride=4, padding=3, bias=False\n )\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(\n block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]\n )\n self.layer3 = self._make_layer(\n block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]\n )\n self.layer4 = self._make_layer(\n block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]\n )\n # self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n )\n )\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n )\n )\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x_l1 = self.layer1(x)\n x_l2 = self.layer2(x_l1)\n x = self.layer3(x_l2)\n x = self.layer4(x)\n\n return x_l1, x_l2, x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n # if pretrained:\n # state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n # model.load_state_dict(state_dict)\n return model\n\n\ndef resnet18(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet18\", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)\n\n\ndef resnet34(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet34\", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef resnet50(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet50\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef resnet101(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\n \"resnet101\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs\n )\n\n\ndef resnet152(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\n \"resnet152\", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs\n )\n\n\ndef resnext50_32x4d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 4\n return _resnet(\n \"resnext50_32x4d\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs\n )\n\n\ndef resnext101_32x8d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 8\n return _resnet(\n \"resnext101_32x8d\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs\n )\n\n\ndef resnext101_32x16d_wsl(pretrained=False, progress=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Args:\n progress (bool): If True, displays a progress bar of the download to stderr.\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 16\n return _resnet(\n \"resnext101_32x16d\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs\n )\n\n\ndef wide_resnet50_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"width_per_group\"] = 64 * 2\n return _resnet(\n \"wide_resnet50_2\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs\n )\n\n\ndef wide_resnet101_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"width_per_group\"] = 64 * 2\n return _resnet(\n \"wide_resnet101_2\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs\n )\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
Okery/PyTorch-Simple-MaskRCNN
[ "5e57a353f211c7130bfcf1d55cacd80057d81423" ]
[ "pytorch_mask_rcnn/utils.py" ]
[ "import os\nimport re\nimport random\nimport torch\n\n\n__all__ = [\"save_ckpt\", \"Meter\"]\n\ndef save_ckpt(model, optimizer, epochs, ckpt_path, **kwargs):\n checkpoint = {}\n checkpoint[\"model\"] = model.state_dict()\n checkpoint[\"optimizer\"] = optimizer.state_dict()\n checkpoint[\"epochs\"] = epochs\n \n for k, v in kwargs.items():\n checkpoint[k] = v\n \n prefix, ext = os.path.splitext(ckpt_path)\n ckpt_path = \"{}-{}{}\".format(prefix, epochs, ext)\n torch.save(checkpoint, ckpt_path)\n \n \nclass TextArea:\n def __init__(self):\n self.buffer = []\n \n def write(self, s):\n self.buffer.append(s)\n \n def __str__(self):\n return \"\".join(self.buffer)\n\n def get_AP(self):\n txt = str(self)\n values = re.findall(r\"(\\d{3})\\n\", txt)\n if len(values) > 0:\n values = [int(v) / 10 for v in values]\n result = {\"bbox AP\": values[0], \"mask AP\": values[12]}\n return result\n else:\n return txt\n \n \nclass Meter:\n def __init__(self, name):\n self.name = name\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = \"{name}:sum={sum:.2f}, avg={avg:.4f}, count={count}\"\n return fmtstr.format(**self.__dict__)\n \n \n" ]
[ [ "torch.save" ] ]
zhaotao1987/jcvi
[ "748fcdbbd1db5eb8a4ccfe19eec6072006ffd501" ]
[ "graphics/synteny.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n%prog mcscan.txt all.bed layout.csv\n\nIllustrate MCscan multiple collinearity alignments. Use layout.csv to indicate\nthe positions of tracks. For example:\n\n#x, y, rotation, ha, va, color, ratio\n0.5, 0.6, 0, left, center, g\n0.25, 0.7, 45, top, center, m\n\nWith the row ordering corresponding to the column ordering in the MCscan output.\n\"\"\"\n\n\nimport sys\nimport logging\nimport numpy as np\n\nfrom jcvi.compara.synteny import BlockFile\nfrom jcvi.formats.bed import Bed\nfrom jcvi.formats.base import DictFile\nfrom jcvi.utils.cbook import human_size\nfrom jcvi.apps.base import OptionParser\n\nfrom jcvi.graphics.glyph import Glyph, RoundLabel\nfrom jcvi.graphics.base import mpl, plt, savefig, markup, \\\n Path, PathPatch, AbstractLayout\n\n\nforward, backward = 'b', 'g' # Genes with different orientations\n\n\nclass LayoutLine (object):\n\n def __init__(self, row, delimiter=','):\n self.hidden = row[0] == '*'\n if self.hidden:\n row = row[1:]\n args = row.rstrip().split(delimiter)\n args = [x.strip() for x in args]\n self.x = float(args[0])\n self.y = float(args[1])\n self.rotation = int(args[2])\n self.ha = args[3]\n self.va = args[4]\n self.color = args[5]\n self.ratio = 1\n if len(args) > 6:\n self.ratio = float(args[6])\n if len(args) > 7:\n self.label = args[7].strip()\n else:\n self.label = None\n\n\nclass Layout (AbstractLayout):\n\n def __init__(self, filename, delimiter=','):\n super(Layout, self).__init__(filename)\n fp = open(filename)\n self.edges = []\n for row in fp:\n if row[0] == '#':\n continue\n if row[0] == 'e':\n args = row.rstrip().split(delimiter)\n args = [x.strip() for x in args]\n a, b = args[1:3]\n a, b = int(a), int(b)\n assert args[0] == 'e'\n self.edges.append((a, b))\n else:\n self.append(LayoutLine(row, delimiter=delimiter))\n\n if 3 <= len(self) <= 8:\n self.assign_colors()\n\n\nclass Shade (object):\n\n def __init__(self, ax, a, b, ymid, highlight=False, ec=\"k\", fc=\"k\",\n alpha=.2, lw=1, zorder=1):\n a1, a2 = a\n b1, b2 = b\n ax1, ay1 = a1\n ax2, ay2 = a2\n bx1, by1 = b1\n bx2, by2 = b2\n M, C4, L, CP = Path.MOVETO, Path.CURVE4, Path.LINETO, Path.CLOSEPOLY\n pathdata = \\\n [\n (M, a1),\n (C4, (ax1, ymid)),\n (C4, (bx1, ymid)),\n (C4, b1),\n (L, b2),\n (C4, (bx2, ymid)),\n (C4, (ax2, ymid)),\n (C4, a2),\n (CP, a1)\n ]\n codes, verts = zip(*pathdata)\n path = Path(verts, codes)\n if highlight:\n ec = fc = highlight\n\n pp = PathPatch(path, ec=ec, fc=fc, alpha=alpha,\n lw=lw, zorder=zorder)\n ax.add_patch(pp)\n\n\nclass Region (object):\n\n def __init__(self, ax, ext, layout, bed, scale, switch=None,\n chr_label=True, loc_label=True,\n pad=.04, vpad=.012, extra_features=None):\n x, y = layout.x, layout.y\n ratio = layout.ratio\n scale /= ratio\n self.y = y\n lr = layout.rotation\n tr = mpl.transforms.Affine2D().\\\n rotate_deg_around(x, y, lr) + ax.transAxes\n inv = ax.transAxes.inverted()\n\n start, end, si, ei, chr, orientation, span = ext\n flank = span / scale / 2\n xstart, xend = x - flank, x + flank\n self.xstart, self.xend = xstart, xend\n\n cv = lambda t: xstart + abs(t - startbp) / scale\n hidden = layout.hidden\n\n # Chromosome\n if not hidden:\n ax.plot((xstart, xend), (y, y), color=\"gray\", transform=tr, \\\n lw=2, zorder=1)\n\n self.genes = genes = bed[si: ei + 1]\n startbp, endbp = start.start, end.end\n if orientation == '-':\n startbp, endbp = endbp, startbp\n\n if switch:\n chr = switch.get(chr, chr)\n if layout.label:\n chr = layout.label\n\n label = \"-\".join((human_size(startbp, target=\"Mb\")[:-2],\n human_size(endbp, target=\"Mb\")))\n\n height = .012\n self.gg = {}\n # Genes\n for g in genes:\n gstart, gend = g.start, g.end\n strand = g.strand\n if strand == '-':\n gstart, gend = gend, gstart\n if orientation == '-':\n strand = \"+\" if strand == \"-\" else \"-\"\n\n x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)\n self.gg[g.accn] = (a, b)\n\n color = forward if strand == \"+\" else backward\n if not hidden:\n gp = Glyph(ax, x1, x2, y, height, gradient=False, fc=color, zorder=3)\n gp.set_transform(tr)\n\n # Extra features (like repeats)\n if extra_features:\n for g in extra_features:\n gstart, gend = g.start, g.end\n x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)\n gp = Glyph(ax, x1, x2, y, height * 3 / 4, gradient=False,\n fc='#ff7f00', zorder=2)\n gp.set_transform(tr)\n\n ha, va = layout.ha, layout.va\n\n hpad = .02\n if ha == \"left\":\n xx = xstart - hpad\n ha = \"right\"\n elif ha == \"right\":\n xx = xend + hpad\n ha = \"left\"\n else:\n xx = x\n ha = \"center\"\n\n # Tentative solution to labels stick into glyph\n magic = 40.\n cc = abs(lr) / magic if abs(lr) > magic else 1\n if va == \"top\":\n yy = y + cc * pad\n elif va == \"bottom\":\n yy = y - cc * pad\n else:\n yy = y\n\n l = np.array((xx, yy))\n trans_angle = ax.transAxes.transform_angles(np.array((lr, )),\n l.reshape((1, 2)))[0]\n lx, ly = l\n if not hidden:\n bbox = dict(boxstyle=\"round\", fc='w', ec='w', alpha=.5)\n kwargs = dict(ha=ha, va=\"center\",\n rotation=trans_angle, bbox=bbox, zorder=10)\n\n # TODO: I spent several hours on trying to make this work - with no\n # good solutions. To generate labels on multiple lines, each line\n # with a different style is difficult in matplotlib. The only way,\n # if you can tolerate an extra dot (.), is to use the recipe below.\n #chr_label = r\"\\noindent \" + markup(chr) + r\" \\\\ .\" if chr_label else None\n #loc_label = r\"\\noindent . \\\\ \" + label if loc_label else None\n\n chr_label = markup(chr) if chr_label else None\n loc_label = label if loc_label else None\n if chr_label:\n if loc_label:\n ax.text(lx, ly + vpad, chr_label, color=layout.color, **kwargs)\n ax.text(lx, ly - vpad, loc_label, color=\"lightslategrey\",\n size=10, **kwargs)\n else:\n ax.text(lx, ly, chr_label, color=layout.color, **kwargs)\n\n def get_coordinates(self, gstart, gend, y, cv, tr, inv):\n x1, x2 = cv(gstart), cv(gend)\n a, b = tr.transform((x1, y)), tr.transform((x2, y))\n a, b = inv.transform(a), inv.transform(b)\n return x1, x2, a, b\n\n\nclass Synteny (object):\n\n def __init__(self, fig, root, datafile, bedfile, layoutfile,\n switch=None, tree=None, extra_features=None,\n chr_label=True, loc_label=True, pad=.04):\n\n w, h = fig.get_figwidth(), fig.get_figheight()\n bed = Bed(bedfile)\n order = bed.order\n bf = BlockFile(datafile)\n self.layout = lo = Layout(layoutfile)\n switch = DictFile(switch, delimiter=\"\\t\") if switch else None\n if extra_features:\n extra_features = Bed(extra_features)\n\n exts = []\n extras = []\n for i in xrange(bf.ncols):\n ext = bf.get_extent(i, order)\n exts.append(ext)\n if extra_features:\n start, end, si, ei, chr, orientation, span = ext\n start, end = start.start, end.end # start, end coordinates\n ef = list(extra_features.extract(chr, start, end))\n\n # Pruning removes minor features with < 0.1% of the region\n ef_pruned = [x for x in ef if x.span >= span / 1000]\n print >> sys.stderr, \"Extracted {0} features \"\\\n \"({1} after pruning)\".format(len(ef), len(ef_pruned))\n extras.append(ef_pruned)\n\n maxspan = max(exts, key=lambda x: x[-1])[-1]\n scale = maxspan / .65\n\n self.gg = gg = {}\n self.rr = []\n ymids = []\n vpad = .012 * w / h\n for i in xrange(bf.ncols):\n ext = exts[i]\n ef = extras[i] if extras else None\n r = Region(root, ext, lo[i], bed, scale, switch,\n chr_label=chr_label, loc_label=loc_label,\n vpad=vpad, extra_features=ef)\n self.rr.append(r)\n # Use tid and accn to store gene positions\n gg.update(dict(((i, k), v) for k, v in r.gg.items()))\n ymids.append(r.y)\n\n for i, j in lo.edges:\n for ga, gb, h in bf.iter_pairs(i, j):\n a, b = gg[(i, ga)], gg[(j, gb)]\n ymid = (ymids[i] + ymids[j]) / 2\n Shade(root, a, b, ymid, fc=\"gainsboro\", lw=0, alpha=1)\n\n for ga, gb, h in bf.iter_pairs(i, j, highlight=True):\n a, b = gg[(i, ga)], gg[(j, gb)]\n ymid = (ymids[i] + ymids[j]) / 2\n Shade(root, a, b, ymid, alpha=1, highlight=h, zorder=2)\n\n if tree:\n from jcvi.graphics.tree import draw_tree, read_trees\n\n trees = read_trees(tree)\n ntrees = len(trees)\n logging.debug(\"A total of {0} trees imported.\".format(ntrees))\n xiv = 1. / ntrees\n yiv = .3\n xstart = 0\n ystart = min(ymids) - .4\n for i in xrange(ntrees):\n ax = fig.add_axes([xstart, ystart, xiv, yiv])\n label, outgroup, tx = trees[i]\n draw_tree(ax, tx, outgroup=outgroup, rmargin=.4, leaffont=11)\n xstart += xiv\n RoundLabel(ax, .5, .3, label, fill=True, fc=\"lavender\", color=\"r\")\n\n\ndef draw_gene_legend(ax, x1, x2, ytop, d=.04, text=False, repeat=False):\n ax.plot([x1, x1 + d], [ytop, ytop], \":\", color=forward, lw=2)\n ax.plot([x1 + d], [ytop], \">\", color=forward, mec=forward)\n ax.plot([x2, x2 + d], [ytop, ytop], \":\", color=backward, lw=2)\n ax.plot([x2], [ytop], \"<\", color=backward, mec=\"g\")\n if text:\n ax.text(x1 + d / 2, ytop + d / 2, \"gene (+)\", ha=\"center\")\n ax.text(x2 + d / 2, ytop + d / 2, \"gene (-)\", ha=\"center\")\n if repeat:\n xr = (x1 + x2 + d) / 2\n Glyph(ax, xr - d / 2, xr + d / 2, ytop, .012 * 3 / 4, gradient=False,\n fc='#ff7f00', zorder=2)\n ax.text(xr, ytop + d / 2, \"repeat\", ha=\"center\")\n\n\ndef main():\n p = OptionParser(__doc__)\n p.add_option(\"--switch\",\n help=\"Rename the seqid with two-column file [default: %default]\")\n p.add_option(\"--tree\",\n help=\"Display trees on the bottom of the figure [default: %default]\")\n p.add_option(\"--extra\", help=\"Extra features in BED format\")\n opts, args, iopts = p.set_image_options(figsize=\"8x7\")\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n datafile, bedfile, layoutfile = args\n switch = opts.switch\n tree = opts.tree\n\n pf = datafile.rsplit(\".\", 1)[0]\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, layoutfile,\n switch=switch, tree=tree, extra_features=opts.extra)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
aashnamsft/elastic
[ "5372d6acaf07d130ab0f0ccaf52958a7fde88902" ]
[ "test/timer/local_timer_example.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\nimport logging\nimport multiprocessing as mp\nimport signal\nimport time\nimport unittest\n\nimport torch.multiprocessing as torch_mp\nimport torchelastic.timer as timer\nfrom test_utils import is_asan\n\n\nlogging.basicConfig(\n level=logging.INFO, format=\"[%(levelname)s] %(asctime)s %(module)s: %(message)s\"\n)\n\n\ndef _happy_function(rank, mp_queue):\n timer.configure(timer.LocalTimerClient(mp_queue))\n with timer.expires(after=1):\n time.sleep(0.5)\n\n\ndef _stuck_function(rank, mp_queue):\n timer.configure(timer.LocalTimerClient(mp_queue))\n with timer.expires(after=1):\n time.sleep(5)\n\n\nclass LocalTimerExample(unittest.TestCase):\n \"\"\"\n Demonstrates how to use LocalTimerServer and LocalTimerClient\n to enforce expiration of code-blocks.\n\n Since torch multiprocessing's ``start_process`` method currently\n does not take the multiprocessing context as parameter argument\n there is no way to create the mp.Queue in the correct\n context BEFORE spawning child processes. Once the ``start_process``\n API is changed in torch, then re-enable ``test_torch_mp_example``\n unittest. As of now this will SIGSEGV.\n \"\"\"\n\n @unittest.skip(\"re-enable when torch_mp.spawn() takes mp context as param\")\n def test_torch_mp_example(self):\n # in practice set the max_interval to a larger value (e.g. 60 seconds)\n ctx = mp.get_context(\"spawn\")\n mp_queue = ctx.Queue()\n server = timer.LocalTimerServer(mp_queue, max_interval=0.01)\n server.start()\n\n world_size = 8\n # all processes should complete successfully\n # since start_process does NOT take context as parameter argument yet\n # this method WILL FAIL (hence the test is disabled)\n torch_mp.start_process(\n fn=_happy_function, args=(mp_queue,), nprocs=world_size, context=ctx\n )\n\n with self.assertRaises(Exception):\n # torch.multiprocessing.spawn kills all sub-procs\n # if one of them gets killed\n torch_mp.start_process(\n fn=_stuck_function, args=(mp_queue,), nprocs=world_size, context=ctx\n )\n\n server.stop()\n\n @unittest.skip(is_asan())\n def test_example_start_method_spawn(self):\n self._run_example_with(start_method=\"spawn\")\n\n @unittest.skip(is_asan())\n def test_example_start_method_forkserver(self):\n self._run_example_with(start_method=\"forkserver\")\n\n def test_example_start_method_fork(self):\n self._run_example_with(start_method=\"fork\")\n\n def _run_example_with(self, start_method):\n spawn_ctx = mp.get_context(start_method)\n mp_queue = spawn_ctx.Queue()\n server = timer.LocalTimerServer(mp_queue, max_interval=0.01)\n server.start()\n\n world_size = 8\n processes = []\n for i in range(0, world_size):\n if i % 2 == 0:\n p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))\n else:\n p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))\n p.start()\n processes.append(p)\n\n for i in range(0, world_size):\n p = processes[i]\n p.join()\n if i % 2 == 0:\n self.assertEqual(-signal.SIGKILL, p.exitcode)\n else:\n self.assertEqual(0, p.exitcode)\n\n server.stop()\n" ]
[ [ "torch.multiprocessing.start_process" ] ]
ariercole/Cambridge_COVID-19_ICU
[ "31de37868af26ee0dd16e95c56d37ba6b9966511" ]
[ "graphs.py" ]
[ "\"\"\"Graphs file.\"\"\"\n\nimport dash_core_components as dcc\nimport datetime\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport numpy as np\nimport os\nimport shutil\nimport statsmodels.api as sm\n\nfrom dateutil import parser\nfrom joblib import Parallel, delayed\nfrom numpy.random import gamma, lognormal, normal\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\nfrom tqdm import tqdm\n\n\n# Global variables\nn_obs = 11 # UPDATE THIS\nn_future = 14\nn_total = n_obs + n_future\n\n\ndef load_data():\n daily_cases = pd.read_csv(os.path.join(os.pardir, 'data', 'model', 'uptodate_cases.csv'))\n\n regions = daily_cases['region']\n dates = [f'{13+i}/03' for i in range(n_total)]\n dates = dates[:19] + [f'{i}/04' for i in range(1, n_total-18)]\n\n X = sm.add_constant(np.arange(n_obs))\n X_pred = sm.add_constant(np.arange(n_total))\n\n cum_cases = np.array(daily_cases.iloc[:, 2:], dtype=np.float32)\n\n return X, X_pred, cum_cases, regions, dates\n\n\ndef _regional_prediction(X, X_pred, Y, i):\n mod = sm.OLS(np.log(Y[i]), X)\n res = mod.fit()\n\n y_pred = res.predict(X_pred)\n _, _, std_u = wls_prediction_std(res, exog=X_pred, alpha=1-0.6827) # 1 s.d.\n _, ci_l, ci_u = wls_prediction_std(res, exog=X_pred, alpha=1-0.95) # 95% CI\n\n return y_pred, std_u, ci_l, ci_u, res.params[1]\n\n\ndef regional_predictions(X, X_pred, Y):\n x_true_list = []\n y_true_list = []\n x_pred_list = []\n y_pred_list = []\n ci_l_list = []\n ci_u_list = []\n avgs_list = []\n stds_list = []\n exponent_list = []\n\n # Parallelize model fitting\n parallel = Parallel(n_jobs=-1, prefer=\"threads\")\n results = parallel(delayed(_regional_prediction)(X, X_pred, Y, i) for i in range(7))\n\n for i, (y_pred, std_u, ci_l, ci_u, exponent) in enumerate(results): # 7 regions\n avgs_list += [y_pred]\n stds_list += [std_u - y_pred]\n\n # Log\n y_pred = np.exp(y_pred)\n ci_l = np.exp(ci_l)\n ci_u = np.exp(ci_u)\n\n x_true_list += [X[:, 1]]\n y_true_list += [Y[i]]\n x_pred_list += [X_pred[:, 1]]\n y_pred_list += [y_pred]\n ci_l_list += [ci_l]\n ci_u_list += [ci_u]\n exponent_list += [exponent]\n\n return x_true_list, y_true_list, x_pred_list, y_pred_list, ci_l_list, ci_u_list, avgs_list, stds_list, exponent_list\n\n\ndef occupancy_arrays(means, stds, exponents, pct_need_icu,\n icu_delay_normal_loc=2.0, los_gamma_shape=8.0, log=True):\n means = np.stack(means)\n stds = np.stack(stds)\n n_regions = means.shape[0]\n n_days = means.shape[1]\n n_samples = 500\n arr = np.zeros((n_regions, n_days, n_samples))\n\n for k in range(n_samples):\n if log:\n new_cases = exponents[:, np.newaxis] * lognormal(means, stds)\n else:\n new_cases = normal(means, stds)\n icu_cases = pct_need_icu[:, np.newaxis] * new_cases # ICU cases = new cases each day * icu_per_case\n icu_cases = np.maximum(icu_cases, 1).astype(np.int32)\n\n for i in range(n_regions):\n for j in range(n_days):\n # Start\n delay_2_icu = normal(loc=icu_delay_normal_loc, scale=3.5, size=icu_cases[i, j])\n delay_2_icu = delay_2_icu.round().astype(np.int32)\n\n # End\n los = gamma(shape=los_gamma_shape, scale=1.0, size=icu_cases[i, j])\n los = np.maximum(los, 1).astype(np.int32)\n\n # Indices\n start_inds = j + delay_2_icu\n end_inds = np.minimum(start_inds + los, n_days-1)\n start_inds = np.maximum(start_inds, 0)\n\n for start, end in zip(start_inds, end_inds):\n if start >= n_days:\n continue\n else:\n arr[i, start:end+1, k] += 1\n\n return arr.mean(axis=2), arr.std(axis=2)\n\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n\n\ndef get_new_dates(dates):\n min_date_str = dates[0].split(\"/\")[1] + \"/\" + dates[0].split(\"/\")[0]\n min_date = parser.parse(min_date_str)\n today = datetime.datetime.today()\n max_date = today + datetime.timedelta(days=n_future+1)\n new_dates = []\n today_idx = None\n for i, date in enumerate(daterange(min_date, max_date)):\n if today.strftime(\"%d/%m\") == date.strftime(\"%d/%m\"):\n today_idx = i\n new_dates.append(date.strftime(\"%d/%m\"))\n return new_dates, today_idx\n\n\ndef _make_fig(x_true, y_true, x_pred, y_pred, ci_l, ci_u,\n title, ylabel, obs, dates):\n if \"patients\" in ylabel:\n y_max = max(y_pred)\n elif \"occupancy\" in ylabel:\n y_max = min(100, max(y_pred))\n else:\n pass\n\n color = \"blue\"\n trace0 = go.Scatter(\n x=x_pred,\n y=ci_l.round(2),\n line=dict(color=color),\n name=\"95% CI\",\n showlegend=False\n )\n trace1 = go.Scatter(\n x=x_pred,\n y=ci_u.round(2),\n fill='tonexty',\n name=\"95% CI\",\n line=dict(color=color)\n )\n if obs:\n trace2 = go.Scatter(\n x=x_true,\n y=y_true.round(2),\n mode='markers',\n name='Recorded',\n line=dict(color=\"red\")\n )\n trace3 = go.Scatter(\n x=x_pred,\n y=y_pred.round(2),\n mode='lines+markers',\n name='Mean',\n line=dict(color=color, width=2),\n )\n if obs:\n traces = [trace0, trace1, trace3, trace2]\n else:\n traces = [trace0, trace1, trace3]\n tickvals = list(range(0, len(dates), 3))\n # print_dates = [d if i % 3 == 0 else '' for i, d in enumerate(dates)]\n print_dates = [d for i, d in enumerate(dates) if i % 3 == 0]\n fig=dict(\n data=traces,\n layout=dict(\n showlegend=True,\n title=title,\n yaxis_title=ylabel,\n xaxis=dict(\n tickmode='array',\n tickvals=tickvals,\n ticktext=print_dates,\n showgrid=True,\n range=[min(x_true), max(x_pred)],\n ),\n yaxis=dict(\n title=ylabel,\n range=[0, max(y_pred)]\n ),\n )\n )\n\n return fig\n\n\ndef _make_figs(x_true_list, y_true_list, x_pred_list, y_pred_list, ci_l_list, ci_u_list,\n dates, regions, title, ylabel, obs=True):\n new_dates, today_idx = get_new_dates(dates)\n\n # Parallelize figure making\n parallel = Parallel(n_jobs=-1, prefer=\"threads\")\n fig_list = parallel(\n delayed(_make_fig)(\n x_true, y_true, x_pred, y_pred, ci_l, ci_u, title, ylabel, obs, new_dates)\n for x_true, y_true, x_pred, y_pred, ci_l, ci_u in zip(\n x_true_list, y_true_list, x_pred_list, y_pred_list, ci_l_list, ci_u_list))\n\n return regions, fig_list, today_idx\n\n\ndef patients_update(plot_dict):\n _, _, _, regions, dates = load_data()\n x_true, y_true, x_pred, y_pred, ci_l, ci_u = plot_dict[\"new_patients_loglinear_fit\"]\n regions, fig_list_logged, today_idx = _make_figs(x_true, y_true, x_pred, y_pred, ci_l, ci_u,\n dates, regions, \"Cumulative COVID-19 patients\", \"Number of patients\")\n\n return regions, fig_list_logged, get_new_dates(dates)\n\n\ndef occupancy_update(plot_dict, delay=10, los=8, obs=True):\n _, _, _, regions, dates = load_data()\n\n x_true, y_true, x_pred, y_pred, ci_l, ci_u = plot_dict[\"icu_patients\"][delay][los]\n regions, icu_fig_list, today_idx = _make_figs(x_true, y_true, x_pred, y_pred, ci_l, ci_u,\n dates, regions, \"ICU patients\", \"#patients\", obs=False)\n\n x_true, y_true, x_pred, y_pred, ci_l, ci_u = plot_dict[\"icu_occupancy\"][delay][los]\n regions, occ_fig_list, today_idx = _make_figs(x_true, y_true, x_pred, y_pred, ci_l, ci_u,\n dates, regions, \"% of ICU Bed Occupancy\", \"% occupancy\", obs=False)\n\n return regions, icu_fig_list, occ_fig_list, get_new_dates(dates)\n\n\ndef save_dict_safely(dict_, f=os.path.join('data', 'plot_dict.pkl')):\n if os.path.exists(f):\n os.remove(f)\n with open(f, 'wb') as fp:\n pickle.dump(dict_, fp)\n\n\ndef load_dict_safely(f=os.path.join('data', 'plot_dict.pkl')):\n with open(f, 'rb') as fp:\n dict_ = pickle.load(fp)\n return dict_\n\n\ndef update_backend(icu_delay_normal_locs=list(range(1, 11)), los_gamma_shapes=list(range(3, 12))):\n print('Updating backend dictionary, this may take a while...')\n print('Loading data...')\n X, X_pred, Y, _, _ = load_data()\n death_and_icu_info = pd.read_csv(os.path.join(os.pardir, 'data', 'model', 'hospitalisation_and_fatalities.csv'))\n pct_need_icu = death_and_icu_info['Critical Care Needs Rate']\n beds_info = pd.read_csv(os.path.join(os.pardir, 'data', 'model', 'ICU_beds_region.csv'))\n beds = beds_info['n_beds (2019)'].values\n\n \"\"\"\n Construct large dictionary to be indexed by the web user.\n big_dict:\n new_patients_loglinear_fit: plot_tuple\n icu_patients: icu_patients_dict\n icu_occupancy: icu_occupancy_dict\n\n icu_patients_dict:\n delay: default = [3, 4, 5, 6, 7, 8, 9, 10, 11]\n los: default = [3, 4, 5, 6, 7, 8, 9, 10, 11]\n\n Example indexing:\n - big_dict['new_patients_loglinear_fit'] --> return plot_tuple\n - big_dict['icu_occupancy'][10][8] --> return plot_tuple\n \"\"\"\n big_dict = {}\n\n # Update new patient\n print('Updating estimated new patients (LOG-LINEAR)...')\n x_true, y_true, x_pred, y_pred, ci_l, ci_u, log_means, log_stds, exponents = regional_predictions(X, X_pred, Y)\n big_dict['new_patients_loglinear_fit'] = x_true, y_true, x_pred, y_pred, ci_l, ci_u\n\n # Update ICU patient\n print('\\nUpdating ICU patients info...')\n delay_dict = {}\n for delay in icu_delay_normal_locs:\n los_dict = {}\n for los in tqdm(los_gamma_shapes):\n mu, sig = occupancy_arrays(log_means, log_stds, np.array(exponents), pct_need_icu,\n icu_delay_normal_loc=delay, los_gamma_shape=los)\n ci_l = [np.maximum(mu[i] - 1.96 * sig[i], 0) for i in range(7)]\n ci_u = [mu[i] + 1.96 * sig[i] for i in range(7)]\n\n los_dict[los] = (x_true, y_true, x_pred, mu, ci_l, ci_u)\n delay_dict[delay] = los_dict\n big_dict['icu_patients'] = delay_dict\n\n # Update ICU occupancy\n print('\\nUpdating ICU occupancy info...')\n delay_dict = {}\n for delay in icu_delay_normal_locs:\n los_dict = {}\n for los in tqdm(los_gamma_shapes):\n _, _, _, mu, ci_l, ci_u = big_dict['icu_patients'][delay][los]\n\n avg_occ = [100 * mu[i] / beds[i] for i in range(7)]\n ci_l = [np.maximum(100 * ci_l[i] / beds[i], 0) for i in range(7)]\n ci_u = [100 * ci_u[i] / beds[i] for i in range(7)]\n\n los_dict[los] = (x_true, y_true, x_pred, avg_occ, ci_l, ci_u)\n delay_dict[delay] = los_dict\n big_dict['icu_occupancy'] = delay_dict\n\n # Save\n print('Saving big dictionary to file with pickle...')\n save_dict_safely(big_dict)\n\n\ndef choroplet_plot(plot_dict, geo_data, geo_df, today=None, delay=2, los=8):\n _, _, _, y_pred, _, _ = plot_dict[\"icu_occupancy\"][delay][los]\n if today is None:\n today = n_obs\n # Index regions `today`\n y_pred = np.array(y_pred)\n geo_df['% additional demand'] = y_pred[[3, 4, 3, 2, 1, 2, 0, 5, 6], today].round(2)\n\n fig = px.choropleth_mapbox(\n geo_df,\n geojson=geo_data,\n locations='ID',\n color='% additional demand',\n color_continuous_scale=\"Portland\",\n featureidkey=\"properties.nuts118cd\",\n range_color=(0, 100),\n mapbox_style=\"carto-positron\",\n hover_data=[\"Region\", \"% additional demand\"],\n zoom=4.7,\n center={\"lat\": 53, \"lon\": -2},\n opacity=0.7\n )\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n\n return fig\n\n\nif __name__ == \"__main__\":\n update_backend(icu_delay_normal_locs=list(range(1, 11)), los_gamma_shapes=list(range(3, 12)))\n" ]
[ [ "numpy.random.lognormal", "numpy.log", "numpy.maximum", "numpy.minimum", "numpy.arange", "numpy.stack", "numpy.random.normal", "numpy.random.gamma", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
AlexanderDavid/AutomaticExtractiveArticleSummarization
[ "8791d44dcacd9f0ac663f5788ac8ebe885409543" ]
[ "TextRank.py" ]
[ "import networkx as nx # Graphs\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument # Sent2Vec functionality\nfrom nltk.tokenize import word_tokenize, sent_tokenize # Sentence and word tokenization\nfrom numpy import dot # Fast dot product\nimport operator # Fast touple indexing\nfrom numpy.linalg import norm # Fast vector normalization \n\ndef summarize(article, articles, do_train=False, num_sents=5):\n \"\"\"Summarize an article using TextRank\n\n Args:\n article: The document to summarize\n articles: The corpus to \"train\" on\n do_train: (False) if the model needs retrained on the corpus passed in\n num_sents: (5) Number of sentences to return as the summary\n\n Returns:\n Top n sentences that summarize the document\n\n \"\"\"\n # Initialize the model as nothing\n model = None\n \n # If the train flag is set then train the model\n if do_train:\n # Tag the data\n tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(articles)]\n\n # Set the meta-variables\n max_epochs = 10\n vec_size = 20\n alpha = 0.025\n\n # Create the model (not trained)\n model = Doc2Vec(vector_size=vec_size,\n alpha=alpha, \n min_alpha=0.00025,\n min_count=1,\n dm =1)\n\n # Build the models vocabulary\n model.build_vocab(tagged_data)\n\n # Train the model for each epoch\n for epoch in range(max_epochs):\n model.train(tagged_data,\n total_examples=model.corpus_count,\n epochs=model.epochs)\n # decrease the learning rate\n model.alpha -= 0.0002\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\n\n # Save the trained model\n model.save(\"d2v.model\")\n\n # If the train flag is not set\n else:\n # Load the model \n model = Doc2Vec.load(\"d2v.model\")\n\n # Create a new graph\n G = nx.Graph()\n \n # Split the article into sentences\n article_tok = sent_tokenize(article)\n\n # For each sentence\n for sentence in article_tok:\n # For each sentence again\n for inner in article_tok:\n # If the two sentences are not the same\n if not sentence == inner:\n # Get the vectors for each sentence\n test_data1 = word_tokenize(sentence.lower())\n test_data2 = word_tokenize(inner.lower())\n v1 = model.infer_vector(test_data1)\n v2 = model.infer_vector(test_data2)\n \n # Calculate the cosine similarity between the vectors\n cos_sim = dot(v1, v2)/(norm(v1)*norm(v2))\n \n # Add a new edge between the two nodes with the weight being the cosine similarity\n G.add_edge(sentence, inner, weight=cos_sim)\n\n # Create a new ranked array and counting variable\n ranked = []\n i = 0\n \n # Iterate through the graph\n for n, nbrs in G.adj.items():\n # Sum for the total number of neighbors\n total = 0\n for nbr, eattr in nbrs.items():\n # Append the weight to the total\n wt = nbrs[nbr][\"weight\"]\n total += wt\n # Append the order, sentence, and score to the array\n ranked.append((i, n, total))\n i += 1\n\n # Return the top n sentences sorted by the score and then order seen in the document\n top_sents = list(reversed(sorted(ranked, key=operator.itemgetter(2))))[:num_sents]\n return [x[1] for x in list(sorted(top_sents, key=operator.itemgetter(0)))]" ]
[ [ "numpy.dot", "numpy.linalg.norm" ] ]
manojgupta3051994/ga-learner-dsmp-repo
[ "5b6231d3d07f55428bc462c4a8302c7fb3e0718d" ]
[ "Stats_GA/code.py" ]
[ "# --------------\n#Header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#path of the data file- path\r\n\r\n#Code starts here \r\ndata = pd.read_csv(path)\r\ndata['Gender'].replace('-','Agender',inplace=True)\r\ngender_count = data['Gender'].value_counts()\r\ngender_count.plot(kind='bar')\r\n\n\n\n# --------------\n#Code starts here\r\nalignment = data['Alignment'].value_counts()\r\nalignment.plot(kind='pie')\r\nplt.title('Character Alignment')\n\n\n# --------------\n#Code starts here\r\nsc_df = data[['Strength','Combat']]\r\nsc_covariance = sc_df.Strength.cov(sc_df.Combat)\r\nprint (sc_covariance)\r\nsc_strength = sc_df['Strength'].std()\r\nsc_combat = sc_df['Combat'].std()\r\nx = sc_strength*sc_combat\r\nsc_pearson = sc_covariance/x\r\nprint (round(sc_pearson,2))\r\nic_df = data[['Intelligence','Combat']]\r\nic_covariance = ic_df.Intelligence.cov(ic_df.Combat)\r\nprint (ic_covariance)\r\nic_intelligence = ic_df['Intelligence'].std()\r\nic_combat = ic_df['Combat'].std()\r\ny = ic_intelligence*ic_combat\r\nic_pearson = ic_covariance/y\r\nprint (round(ic_pearson,2))\r\n\n\n\n# --------------\n#Code starts here\r\ntotal_high = data['Total'].quantile(q=0.99)\r\nsuper_best = data[data['Total']>total_high]\r\nsuper_best_names = []\r\nsuper_best_names.append(super_best['Name'])\r\nprint (super_best_names)\r\n\n\n\n# --------------\n#Code starts here\r\nfig,(ax_1,ax_2,ax_3) = plt.subplots(1,3)\r\nax_1.boxplot(data['Intelligence'])\r\nax_1.set_title('Intelligence')\r\nax_2.boxplot(data['Speed'])\r\nax_2.set_title('Speed')\r\nax_3.boxplot(data['Power'])\r\nax_3.set_title('Power')\n\n\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title" ] ]
david-yd-hao/Vision-RB-Robotics
[ "c9fc812d2b08291f47f046ed1b4c97967bf9f098" ]
[ "Vision/mask.py" ]
[ "import cv2\nimport numpy as np\n\ndef red_mask(img, lower=[(0,80,100), (5,255,255)], higher=[(165,80,100), (180,255,255)]):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask1 = cv2.inRange(img_hsv, lower[0], lower[1])\n mask2 = cv2.inRange(img_hsv, higher[0], higher[1])\n mask = cv2.bitwise_or(mask1, mask2 )\n croped = cv2.bitwise_and(img, img, mask=mask)\n return croped\n\n\ndef blue_mask(img, lower=[(115,38,148)], higher=[(160, 113, 255)]):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(img_hsv, lower[0], higher[0])\n croped = cv2.bitwise_and(img, img, mask=mask)\n return croped\n\n\ndef white_mask(img, lower=[(79,54,123)], higher=[(215, 255, 255)]):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(img_hsv, lower[0], higher[0])\n croped = cv2.bitwise_and(img, img, mask=mask)\n return croped\n\ndef green_mask(img, lower = [(140, 20, 60)], higher = [(180, 60, 100)]):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(img_hsv, lower[0], higher[0])\n croped = cv2.bitwise_and(img, img, mask=mask)\n return croped\n\ndef anti_glare(img, lower = [(0, 0, 0)], higher = [(255, 127, 250)]):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(img_hsv, lower[0], higher[0])\n croped = cv2.bitwise_and(img, img, mask=mask)\n return croped\n\ndef sharpen(img):\n kernel = np.array([[0, -1, 0],\n [-1, 5,-1],\n [0, -1, 0]])\n img = cv2.filter2D(src=img, ddepth = -1, kernel = kernel)\n return img\n" ]
[ [ "numpy.array" ] ]
bahalbach/nbaproject
[ "b2336452ebefa1251dee9ff19a6f189756c5a3dd", "b2336452ebefa1251dee9ff19a6f189756c5a3dd" ]
[ "nbaproject/load_roster_br.py", "nbaproject/build_simple_rebound_model.py" ]
[ "import requests\nfrom bs4 import BeautifulSoup, Comment\nimport pandas as pd\nimport datetime\nimport unidecode\nimport time\n\nfrom nba_utils import get_team_abr, map_id\n\n\ndef load_roster_br(team_id, season_name):\n team_abr = get_team_abr(team_id, season_name)\n season_year = season_name[:2]+season_name[5:]\n url = f\"https://www.basketball-reference.com/teams/{team_abr}/{season_year}.html\"\n loaded = False\n while not loaded:\n try:\n page = requests.get(url, timeout=15)\n loaded = True\n except requests.exceptions.Timeout:\n time.sleep(5)\n\n soup = BeautifulSoup(page.text, features=\"lxml\")\n comments = soup.find_all(string=lambda text: isinstance(text, Comment))\n\n all_dfs = pd.read_html(page.text)\n for comment in comments:\n if 'table' in comment:\n try:\n all_dfs.append(pd.read_html(comment)[0])\n except:\n continue\n\n player_data = {}\n\n column_names = ['name', 'id', 'br_id', 'number', 'pos',\n 'height', 'weight', 'bday', 'country', 'exp', 'colleges']\n column_names += [\"age\", \"games\", \"games_started\", \"minutes_played\",\n \"pts\", \"fg2m\", \"fg2a\", \"fg3m\", \"fg3a\", \"ftm\", \"fta\", \"orb\", \"drb\", \"ast\", \"stl\", \"blk\", \"tov\", \"pf\"]\n\n rows = soup.find(id=\"roster\").tbody.find_all('tr')\n for row in rows:\n data = row.find_all('td')\n name = unidecode.unidecode(data[0].a.get_text())\n try:\n br_id = data[0]['data-append-csv']\n except KeyError:\n br_id = data[0].a['href'].split('/')[-1][:-5]\n id = map_id(br_id)\n number = row.find('th').get_text()\n pos = data[1].get_text()\n height = data[2].get_text()\n weight = int(data[3].get_text())\n bday = data[4].get_text()\n bday = datetime.datetime.strptime(bday, \"%B %d, %Y\")\n country = data[5].get_text()\n exp = data[6].get_text()\n exp = 0 if exp == 'R' else int(exp)\n colleges = data[7].get_text()\n player_data[name] = {'player_info': [name, id, br_id, number, pos,\n height, weight, bday, country, exp, colleges]}\n\n total_rows = soup.find(id=\"totals\").tbody.find_all('tr')\n for row in total_rows:\n data = row.find_all('td')\n name = unidecode.unidecode(data[0].get_text())\n\n age = int(data[1].get_text())\n games = int(data[2].get_text())\n games_started = int(data[3].get_text())\n minutes_played = int(data[4].get_text())\n\n pts = int(data[26].get_text())\n\n fg2m = int(data[11].get_text())\n fg2a = int(data[12].get_text())\n\n fg3m = int(data[8].get_text())\n fg3a = int(data[9].get_text())\n\n ftm = int(data[15].get_text())\n fta = int(data[16].get_text())\n\n orb = int(data[18].get_text())\n drb = int(data[19].get_text())\n ast = int(data[21].get_text())\n stl = int(data[22].get_text())\n blk = int(data[23].get_text())\n tov = int(data[24].get_text())\n pf = int(data[25].get_text())\n\n if name in player_data:\n player_data[name]['season_totals'] = [age, games, games_started, minutes_played,\n pts, fg2m, fg2a, fg3m, fg3a, ftm, fta, orb, drb, ast, stl, blk, tov, pf]\n\n salaries = {}\n for comment in comments:\n if 'salaries2' in comment:\n salary_table = BeautifulSoup(comment, features=\"lxml\").table.tbody\n\n for row in salary_table.find_all('tr'):\n data = row.find_all('td')\n try:\n br_id = data[0]['data-append-csv']\n id = map_id(br_id)\n except KeyError:\n id = 0\n try:\n salary = data[1]['csk']\n except KeyError:\n salary = 0\n salaries[id] = salary\n salaries\n\n players = []\n for player in player_data.values():\n player_array = player['player_info'] + \\\n (player['season_totals'] if 'season_totals' in player else [0]*18)\n players.append(player_array)\n\n br_df = pd.DataFrame(players, columns=column_names)\n\n return br_df, salaries, all_dfs\n", "from genericpath import isdir\n\n\nimport tensorflow as tf\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder, LabelBinarizer, OrdinalEncoder, StandardScaler\nfrom sklearn.compose import ColumnTransformer\nimport pandas as pd\nfrom nba_dataclasses import EventType, ResultClass, GamePossessionInfo\nfrom sklearn.model_selection import train_test_split\nfrom nba import NbaTracker\n\ncolumns = ['result_class', 'result_team', 'num_fts', 'shot_type',\n 'is_blocked', 'is_putback', 'in_penalty', 'offense_is_home', 'score_margin']\nnon_player_columns = columns[:]\nnum_rebound_inputs = 6\n\ncatagorical_attributes = ['shot_type']\nbinary_attributes = ['is_blocked', 'is_putback',\n 'in_penalty', 'offense_is_home']\nnumerical_attributes = ['score_margin']\n\n\ndef get_rebound_df_from_games(games: list[GamePossessionInfo]):\n rebounds = []\n for game in games:\n last_game_event = None\n for ge in game.game_events:\n if ge.event_type == EventType.Rebound:\n result_class = ge.result.result_class\n # if result_class == ResultClass.JUMPBALL:\n # result_team = 2\n if result_class in {\n ResultClass.OFF_REBOUND, ResultClass.FT, ResultClass.SAME_TEAM}:\n result_team = 1\n else:\n result_team = 0\n num_fts = ge.result.num_fts\n shot_type = ge.result.shot_type\n is_blocked = ge.result.is_blocked\n is_putback = last_game_event.is_putback\n in_penalty = ge.in_penalty\n offense_is_home = ge.offense_is_home\n score_margin = ge.score_margin\n\n rebound = [\n result_class,\n result_team,\n num_fts,\n shot_type,\n is_blocked,\n is_putback,\n in_penalty,\n offense_is_home,\n score_margin,\n ]\n\n rebounds.append(rebound)\n last_game_event = ge\n return pd.DataFrame(rebounds, columns=columns)\n\n\ndef build_simple_rebound_model(season, random_state=432536):\n return build_rebound_model_from_games(list(season.games.values()), name=season.name, random_state=random_state)\n\n\ndef build_multiseason_rebound_model(nbaTracker, random_state=432536):\n seasons = list(sorted(list(nbaTracker.seasons.keys())))\n name = seasons[0].split(\"-\")[0] + seasons[-1].split(\"-\")[1]\n games = []\n for season_name in seasons:\n season = nbaTracker.seasons[season_name]\n games += list(season.games.values())\n\n return build_rebound_model_from_games(games, name=name, random_state=random_state)\n\n\ndef build_rebound_model_from_games(games, name, random_state):\n train_games, test_games = train_test_split(\n games, test_size=0.1, random_state=random_state)\n\n train_rebounds = get_rebound_df_from_games(train_games)\n test_rebounds = get_rebound_df_from_games(test_games)\n\n train_X = train_rebounds.drop(\n ['result_class', 'result_team', 'num_fts'], axis=1)\n train_y = train_rebounds['result_class']\n train_is_oreb = train_rebounds['result_team']\n test_X = test_rebounds.drop(\n ['result_class', 'result_team', 'num_fts'], axis=1)\n test_y = test_rebounds['result_class']\n test_is_oreb = test_rebounds['result_team']\n\n # transform catagorical attributes to one-hot encoding\n preprocess = ColumnTransformer([(\"categorical\", OneHotEncoder(), catagorical_attributes), (\n \"binary\", 'passthrough', binary_attributes), (\"numerical\", StandardScaler(), numerical_attributes)])\n\n processed_train_X = tf.cast(\n preprocess.fit_transform(train_X), dtype=tf.float32)\n processed_test_X = tf.cast(\n preprocess.transform(test_X), dtype=tf.float32)\n\n path = f'saved_model/rebound_model{name}'\n if isdir(path):\n rebound_model = tf.keras.models.load_model(path)\n else:\n processed_train_y = train_y.to_numpy().reshape(-1, 1) - 1\n processed_test_y = test_y.to_numpy().reshape(-1, 1) - 1\n\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (processed_train_X, (processed_train_y, train_is_oreb.to_numpy().reshape(-1, 1)))).shuffle(10000).batch(32).prefetch(1)\n test_dataset = tf.data.Dataset.from_tensor_slices(\n (processed_test_X, (processed_test_y, test_is_oreb.to_numpy().reshape(-1, 1)))).shuffle(10000).batch(32).prefetch(1)\n\n inputs = tf.keras.Input(processed_train_X.shape[1:])\n internal = tf.keras.layers.Dense(16)(inputs)\n rebound_type = tf.keras.layers.Dense(\n 10, activation='softmax')(internal)\n is_oreb = tf.keras.layers.Dense(1, activation='sigmoid')(internal)\n\n rebound_model = tf.keras.Model(\n inputs=inputs, outputs=[rebound_type, is_oreb])\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=False)\n rebound_model.compile(\n optimizer='adam',\n loss=[loss_fn, 'binary_crossentropy'],\n metrics=['accuracy'])\n\n print(\"built model, now training\")\n # fit data\n history = rebound_model.fit(train_dataset, epochs=2)\n rebound_model.save(path)\n\n print(\"evaluating\")\n print(rebound_model.evaluate(test_dataset))\n\n return preprocess, rebound_model\n # def stack_dict(inputs, fun=tf.stack):\n # values = []\n # for key in sorted(inputs.keys()):\n # values.append(tf.cast(inputs[key], tf.float32))\n\n # return fun(values, axis=-1)\n # inputs = {}\n # for name, column in train_X.items():\n # if (name in catagorical_attributes or name in binary_attributes):\n # dtype = tf.int64\n # else:\n # dtype = tf.float32\n # inputs[name] = tf.keras.Input(shape=(), name=name, dtype=dtype)\n\n # processed = []\n # for name in binary_attributes:\n # inp = inputs[name]\n # inp = inp\n\n # rebound_model.evaluate(processed_test_X, [\n # test_y-1, test_is_oreb.to_numpy().reshape(-1, 1)], verbose=2)\n" ]
[ [ "pandas.read_html", "pandas.DataFrame" ], [ "tensorflow.keras.models.load_model", "tensorflow.keras.Input", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "sklearn.preprocessing.OneHotEncoder", "sklearn.model_selection.train_test_split", "tensorflow.keras.Model", "pandas.DataFrame", "sklearn.preprocessing.StandardScaler" ] ]
chaitjo/lstm-context-embeddings
[ "ab5894eb727ede8daa394ebe6e87735a6207f292" ]
[ "data_helpers.py" ]
[ "import numpy as np\nimport re\nimport itertools\nfrom collections import Counter\n\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n \n return string.strip().lower()\n\n\ndef load_data_and_labels():\n \"\"\"\n Loads polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n \n # Load data from files\n positive_examples = list(open(\"./data/rt-polaritydata/rt-polarity.pos\", \"r\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open(\"./data/rt-polaritydata/rt-polarity.neg\", \"r\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n \n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n \n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n # Generate sequence lengths\n seqlen = np.array([len(sent.split(\" \")) for sent in x_text])\n \n return [x_text, y, seqlen]\n\n\ndef batch_iter(data, seqlen_data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n \n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int(len(data)/batch_size) + 1\n \n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n \n seqlen_batch = seqlen_data[start_index:end_index]\n\n yield shuffled_data[start_index:end_index], seqlen_batch\n #TODO: Problem with seqlens\n\n" ]
[ [ "numpy.concatenate", "numpy.arange", "numpy.array" ] ]
brettkoonce/jraph
[ "fbb3cf2a04015d4d5a878f4f0ab9fad961392618" ]
[ "jraph/examples/sat.py" ]
[ "# Copyright 2020 DeepMind Technologies Limited.\n\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"2-SAT solver example.\n\nHere we train a graph neural network to solve 2-sat problems.\nhttps://en.wikipedia.org/wiki/2-satisfiability\n\nFor instance a 2 sat problem with 3 literals would look like this:\n (a or b) and (not a or c) and (not b or not c)\n\nWe represent this problem in form of a bipartite-graph, with edges\nconnecting the literal-nodes (a, b, c) with the constraint-nodes (O).\nThe corresponding graph looks like this:\n O O O\n |\\ /\\ /|\n | \\/ \\/ |\n | /\\ /\\ |\n |/ \\/ \\|\n a b c\n\nThe nodes are one-hot encoded with literal nodes as (1, 0) and constraint nodes\nas (0, 1). The edges are one-hot encoded with (1, 0) if the literal should be\ntrue and (0, 1) if the literal should be false.\n\nThe graph neural network encodes the nodes and the edges and runs multiple\nmessage passing steps by calculting message for each edge and aggregating\nall the messages of the nodes.\n\nThe training dataset consists of randomly generated 2-sat problems with 2 to 15\nliterals.\nThe test dataset consists of randomly generated 2-sat problems with 16 to 20\nliterals.\n\"\"\"\n\nimport collections\nimport logging\nimport random\n\nfrom absl import app\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport jraph\nimport numpy as np\nimport optax\n\n\nProblem = collections.namedtuple(\"Problem\", (\"graph\", \"labels\", \"mask\"))\n\n\ndef get_2sat_problem(min_n_literals: int, max_n_literals: int) -> Problem:\n \"\"\"Creates bipartite-graph representing a randomly generated 2-sat problem.\n\n Args:\n min_n_literals: minimum number of literals in the 2-sat problem.\n max_n_literals: maximum number of literals in the 2-sat problem.\n\n Returns:\n bipartite-graph, node labels and node mask.\n \"\"\"\n n_literals = random.randint(min_n_literals, max_n_literals)\n n_literals_true = random.randint(1, n_literals - 1)\n n_constraints = n_literals * (n_literals - 1) // 2\n\n n_node = n_literals + n_constraints\n # 0 indicates a literal node\n # 1 indicates a constraint node.\n nodes = [0 if i < n_literals else 1 for i in range(n_node)]\n edges = []\n senders = []\n for literal_node1 in range(n_literals):\n for literal_node2 in range(literal_node1 + 1, n_literals):\n senders.append(literal_node1)\n senders.append(literal_node2)\n # 1 indicates that the literal must be true for this constraint.\n # 0 indicates that the literal must be false for this constraint.\n # I.e. with literals a and b, we have the following possible constraints:\n # 0, 0 -> a or b\n # 1, 0 -> not a or b\n # 0, 1 -> a or not b\n # 1, 1 -> not a or not b\n edges.append(1 if literal_node1 < n_literals_true else 0)\n edges.append(1 if literal_node2 < n_literals_true else 0)\n\n graph = jraph.GraphsTuple(\n n_node=np.asarray([n_node]),\n n_edge=np.asarray([2 * n_constraints]),\n # One-hot encoding for nodes and edges.\n nodes=np.eye(2)[nodes],\n edges=np.eye(2)[edges],\n globals=None,\n senders=np.asarray(senders),\n receivers=np.repeat(np.arange(n_constraints) + n_literals, 2))\n\n # In order to jit compile our code, we have to pad the nodes and edges of\n # the GraphsTuple to a static shape.\n max_n_constraints = max_n_literals * (max_n_literals - 1) // 2\n max_nodes = max_n_literals + max_n_constraints + 1\n max_edges = 2 * max_n_constraints\n graph = jraph.pad_with_graphs(graph, max_nodes, max_edges)\n\n # The ground truth solution for the 2-sat problem.\n labels = (np.arange(max_nodes) < n_literals_true).astype(np.int32)\n labels = np.eye(2)[labels]\n\n # For the loss calculation we create a mask for the nodes, which masks the\n # the constraint nodes and the padding nodes.\n mask = (np.arange(max_nodes) < n_literals).astype(np.int32)\n return Problem(graph=graph, labels=labels, mask=mask)\n\n\ndef network_definition(\n graph: jraph.GraphsTuple,\n num_message_passing_steps: int = 5) -> jraph.ArrayTree:\n \"\"\"Defines a graph neural network.\n\n Args:\n graph: Graphstuple the network processes.\n num_message_passing_steps: number of message passing steps.\n\n Returns:\n Decoded nodes.\n \"\"\"\n embedding = jraph.GraphMapFeatures(\n embed_edge_fn=jax.vmap(hk.Linear(output_size=16)),\n embed_node_fn=jax.vmap(hk.Linear(output_size=16)))\n graph = embedding(graph)\n\n @jax.vmap\n @jraph.concatenated_args\n def update_fn(features):\n net = hk.Sequential([\n hk.Linear(10), jax.nn.relu,\n hk.Linear(10), jax.nn.relu,\n hk.Linear(10), jax.nn.relu])\n return net(features)\n\n for _ in range(num_message_passing_steps):\n gn = jraph.InteractionNetwork(\n update_edge_fn=update_fn,\n update_node_fn=update_fn,\n include_sent_messages_in_node_update=True)\n graph = gn(graph)\n\n return hk.Linear(2)(graph.nodes)\n\n\ndef train(num_steps: int):\n \"\"\"Trains a graph neural network on a 2-sat problem.\"\"\"\n train_dataset = (2, 15)\n test_dataset = (16, 20)\n random.seed(42)\n\n network = hk.without_apply_rng(hk.transform(network_definition))\n problem = get_2sat_problem(*train_dataset)\n params = network.init(jax.random.PRNGKey(42), problem.graph)\n\n @jax.jit\n def prediction_loss(params, problem):\n decoded_nodes = network.apply(params, problem.graph)\n # We interpret the decoded nodes as a pair of logits for each node.\n log_prob = jax.nn.log_softmax(decoded_nodes) * problem.labels\n return -jnp.sum(log_prob * problem.mask[:, None]) / jnp.sum(problem.mask)\n\n opt_init, opt_update = optax.adam(2e-4)\n opt_state = opt_init(params)\n\n @jax.jit\n def update(params, opt_state, problem):\n g = jax.grad(prediction_loss)(params, problem)\n updates, opt_state = opt_update(g, opt_state)\n return optax.apply_updates(params, updates), opt_state\n\n for step in range(num_steps):\n problem = get_2sat_problem(*train_dataset)\n params, opt_state = update(params, opt_state, problem)\n if step % 1000 == 0:\n train_loss = jnp.mean(\n jnp.asarray([\n prediction_loss(params, get_2sat_problem(*train_dataset))\n for _ in range(100)\n ])).item()\n test_loss = jnp.mean(\n jnp.asarray([\n prediction_loss(params, get_2sat_problem(*test_dataset))\n for _ in range(100)\n ])).item()\n logging.info(\"step %r loss train %r test %r\", step, train_loss, test_loss)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n\n train(num_steps=10000)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.eye" ] ]
pixelart7/hadoop
[ "82919a1e7af2ec22993f273e9ee64512a688c99b" ]
[ "hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/docker/with-cifar10-models/ubuntu-16.04/cifar10_estimator_tf_1.8.0/cifar10_main.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"ResNet model for classifying images from CIFAR-10 dataset.\n\nSupport single-host training with one or multiple devices.\n\nResNet as proposed in:\nKaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\nDeep Residual Learning for Image Recognition. arXiv:1512.03385\n\nCIFAR-10 as in:\nhttp://www.cs.toronto.edu/~kriz/cifar.html\n\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport functools\nimport itertools\nimport os\n\nimport cifar10\nimport cifar10_model\nimport cifar10_utils\nimport numpy as np\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef get_model_fn(num_gpus, variable_strategy, num_workers):\n \"\"\"Returns a function that will build the resnet model.\"\"\"\n\n def _resnet_model_fn(features, labels, mode, params):\n \"\"\"Resnet model body.\n\n Support single host, one or more GPU training. Parameter distribution can\n be either one of the following scheme.\n 1. CPU is the parameter server and manages gradient updates.\n 2. Parameters are distributed evenly across all GPUs, and the first GPU\n manages gradient updates.\n\n Args:\n features: a list of tensors, one for each tower\n labels: a list of tensors, one for each tower\n mode: ModeKeys.TRAIN or EVAL\n params: Hyperparameters suitable for tuning\n Returns:\n A EstimatorSpec object.\n \"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n\n tower_features = features\n tower_labels = labels\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = cifar10_utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = cifar10_utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n with tf.variable_scope('resnet', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n is_training, weight_decay, tower_features[i], tower_labels[i],\n data_format, params.num_layers, params.batch_norm_decay,\n params.batch_norm_epsilon)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n # Suggested learning rate scheduling from\n # https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155\n num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch(\n 'train') // (params.train_batch_size * num_workers)\n boundaries = [\n num_batches_per_epoch * x\n for x in np.array([82, 123, 300], dtype=np.int64)\n ]\n staged_lr = [params.learning_rate * x for x in [1, 0.1, 0.01, 0.002]]\n\n learning_rate = tf.train.piecewise_constant(tf.train.get_global_step(),\n boundaries, staged_lr)\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = cifar10_utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate, momentum=momentum)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'classes':\n tf.concat([p['classes'] for p in tower_preds], axis=0),\n 'probabilities':\n tf.concat([p['probabilities'] for p in tower_preds], axis=0)\n }\n stacked_labels = tf.concat(labels, axis=0)\n metrics = {\n 'accuracy':\n tf.metrics.accuracy(stacked_labels, predictions['classes'])\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)\n\n return _resnet_model_fn\n\n\ndef _tower_fn(is_training, weight_decay, feature, label, data_format,\n num_layers, batch_norm_decay, batch_norm_epsilon):\n \"\"\"Build computation tower (Resnet).\n\n Args:\n is_training: true if is training graph.\n weight_decay: weight regularization strength, a float.\n feature: a Tensor.\n label: a Tensor.\n data_format: channels_last (NHWC) or channels_first (NCHW).\n num_layers: number of layers, an int.\n batch_norm_decay: decay for batch normalization, a float.\n batch_norm_epsilon: epsilon for batch normalization, a float.\n\n Returns:\n A tuple with the loss for the tower, the gradients and parameters, and\n predictions.\n\n \"\"\"\n model = cifar10_model.ResNetCifar10(\n num_layers,\n batch_norm_decay=batch_norm_decay,\n batch_norm_epsilon=batch_norm_epsilon,\n is_training=is_training,\n data_format=data_format)\n logits = model.forward_pass(feature, input_data_format='channels_last')\n tower_pred = {\n 'classes': tf.argmax(input=logits, axis=1),\n 'probabilities': tf.nn.softmax(logits)\n }\n\n tower_loss = tf.losses.sparse_softmax_cross_entropy(\n logits=logits, labels=label)\n tower_loss = tf.reduce_mean(tower_loss)\n\n model_params = tf.trainable_variables()\n tower_loss += weight_decay * tf.add_n(\n [tf.nn.l2_loss(v) for v in model_params])\n\n tower_grad = tf.gradients(tower_loss, model_params)\n\n return tower_loss, zip(tower_grad, model_params), tower_pred\n\n\ndef input_fn(data_dir,\n subset,\n num_shards,\n batch_size,\n use_distortion_for_training=True):\n \"\"\"Create input graph for model.\n\n Args:\n data_dir: Directory where TFRecords representing the dataset are located.\n subset: one of 'train', 'validate' and 'eval'.\n num_shards: num of towers participating in data-parallel training.\n batch_size: total batch size for training to be divided by the number of\n shards.\n use_distortion_for_training: True to use distortions.\n Returns:\n two lists of tensors for features and labels, each of num_shards length.\n \"\"\"\n with tf.device('/cpu:0'):\n use_distortion = subset == 'train' and use_distortion_for_training\n dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)\n image_batch, label_batch = dataset.make_batch(batch_size)\n if num_shards <= 1:\n # No GPU available or only 1 GPU.\n return [image_batch], [label_batch]\n\n # Note that passing num=batch_size is safe here, even though\n # dataset.batch(batch_size) can, in some cases, return fewer than batch_size\n # examples. This is because it does so only when repeating for a limited\n # number of epochs, but our dataset repeats forever.\n image_batch = tf.unstack(image_batch, num=batch_size, axis=0)\n label_batch = tf.unstack(label_batch, num=batch_size, axis=0)\n feature_shards = [[] for i in range(num_shards)]\n label_shards = [[] for i in range(num_shards)]\n for i in xrange(batch_size):\n idx = i % num_shards\n feature_shards[idx].append(image_batch[i])\n label_shards[idx].append(label_batch[i])\n feature_shards = [tf.parallel_stack(x) for x in feature_shards]\n label_shards = [tf.parallel_stack(x) for x in label_shards]\n return feature_shards, label_shards\n\n\ndef get_experiment_fn(data_dir,\n num_gpus,\n variable_strategy,\n use_distortion_for_training=True):\n \"\"\"Returns an Experiment function.\n\n Experiments perform training on several workers in parallel,\n in other words experiments know how to invoke train and eval in a sensible\n fashion for distributed training. Arguments passed directly to this\n function are not tunable, all other arguments should be passed within\n tf.HParams, passed to the enclosed function.\n\n Args:\n data_dir: str. Location of the data for input_fns.\n num_gpus: int. Number of GPUs on each worker.\n variable_strategy: String. CPU to use CPU as the parameter server\n and GPU to use the GPUs as the parameter server.\n use_distortion_for_training: bool. See cifar10.Cifar10DataSet.\n Returns:\n A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->\n tf.contrib.learn.Experiment.\n\n Suitable for use by tf.contrib.learn.learn_runner, which will run various\n methods on Experiment (train, evaluate) based on information\n about the current runner in `run_config`.\n \"\"\"\n\n def _experiment_fn(run_config, hparams):\n \"\"\"Returns an Experiment.\"\"\"\n # Create estimator.\n train_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset='train',\n num_shards=num_gpus,\n batch_size=hparams.train_batch_size,\n use_distortion_for_training=use_distortion_for_training)\n\n eval_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset='eval',\n batch_size=hparams.eval_batch_size,\n num_shards=num_gpus)\n\n num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch('eval')\n if num_eval_examples % hparams.eval_batch_size != 0:\n raise ValueError(\n 'validation set size must be multiple of eval_batch_size')\n\n train_steps = hparams.train_steps\n eval_steps = num_eval_examples // hparams.eval_batch_size\n\n classifier = tf.estimator.Estimator(\n model_fn=get_model_fn(num_gpus, variable_strategy,\n run_config.num_worker_replicas or 1),\n config=run_config,\n params=hparams)\n\n # Create experiment.\n return tf.contrib.learn.Experiment(\n classifier,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n train_steps=train_steps,\n eval_steps=eval_steps)\n\n return _experiment_fn\n\n\ndef main(job_dir, data_dir, num_gpus, variable_strategy,\n use_distortion_for_training, log_device_placement, num_intra_threads,\n **hparams):\n # The env variable is on deprecation path, default is set to off.\n os.environ['TF_SYNC_ON_FINISH'] = '0'\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n\n # Session configuration.\n sess_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=log_device_placement,\n intra_op_parallelism_threads=num_intra_threads,\n gpu_options=tf.GPUOptions(force_gpu_compatible=True))\n\n config = cifar10_utils.RunConfig(\n session_config=sess_config, model_dir=job_dir)\n tf.contrib.learn.learn_runner.run(\n get_experiment_fn(data_dir, num_gpus, variable_strategy,\n use_distortion_for_training),\n run_config=config,\n hparams=tf.contrib.training.HParams(\n is_chief=config.is_chief,\n **hparams))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data-dir',\n type=str,\n required=True,\n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--job-dir',\n type=str,\n required=True,\n help='The directory where the model will be stored.')\n parser.add_argument(\n '--variable-strategy',\n choices=['CPU', 'GPU'],\n type=str,\n default='CPU',\n help='Where to locate variable operations')\n parser.add_argument(\n '--num-gpus',\n type=int,\n default=1,\n help='The number of gpus used. Uses only CPU if set to 0.')\n parser.add_argument(\n '--num-layers',\n type=int,\n default=44,\n help='The number of layers of the model.')\n parser.add_argument(\n '--train-steps',\n type=int,\n default=80000,\n help='The number of steps to use for training.')\n parser.add_argument(\n '--train-batch-size',\n type=int,\n default=128,\n help='Batch size for training.')\n parser.add_argument(\n '--eval-batch-size',\n type=int,\n default=100,\n help='Batch size for validation.')\n parser.add_argument(\n '--momentum',\n type=float,\n default=0.9,\n help='Momentum for MomentumOptimizer.')\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=2e-4,\n help='Weight decay for convolutions.')\n parser.add_argument(\n '--learning-rate',\n type=float,\n default=0.1,\n help=\"\"\"\\\n This is the inital learning rate value. The learning rate will decrease\n during training. For more details check the model_fn implementation in\n this file.\\\n \"\"\")\n parser.add_argument(\n '--use-distortion-for-training',\n type=bool,\n default=True,\n help='If doing image distortion for training.')\n parser.add_argument(\n '--sync',\n action='store_true',\n default=False,\n help=\"\"\"\\\n If present when running in a distributed environment will run on sync mode.\\\n \"\"\")\n parser.add_argument(\n '--num-intra-threads',\n type=int,\n default=0,\n help=\"\"\"\\\n Number of threads to use for intra-op parallelism. When training on CPU\n set to 0 to have the system pick the appropriate number or alternatively\n set it to the number of physical CPU cores.\\\n \"\"\")\n parser.add_argument(\n '--num-inter-threads',\n type=int,\n default=0,\n help=\"\"\"\\\n Number of threads to use for inter-op parallelism. If set to 0, the\n system will pick an appropriate number.\\\n \"\"\")\n parser.add_argument(\n '--data-format',\n type=str,\n default=None,\n help=\"\"\"\\\n If not set, the data format best for the training device is used.\n Allowed values: channels_first (NCHW) channels_last (NHWC).\\\n \"\"\")\n parser.add_argument(\n '--log-device-placement',\n action='store_true',\n default=False,\n help='Whether to log device placement.')\n parser.add_argument(\n '--batch-norm-decay',\n type=float,\n default=0.997,\n help='Decay for batch norm.')\n parser.add_argument(\n '--batch-norm-epsilon',\n type=float,\n default=1e-5,\n help='Epsilon for batch norm.')\n args = parser.parse_args()\n\n if args.num_gpus > 0:\n assert tf.test.is_gpu_available(), \"Requested GPUs but none found.\"\n if args.num_gpus < 0:\n raise ValueError(\n 'Invalid GPU count: \\\"--num-gpus\\\" must be 0 or a positive integer.')\n if args.num_gpus == 0 and args.variable_strategy == 'GPU':\n raise ValueError('num-gpus=0, CPU must be used as parameter server. Set'\n '--variable-strategy=CPU.')\n if (args.num_layers - 2) % 6 != 0:\n raise ValueError('Invalid --num-layers parameter.')\n if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:\n raise ValueError('--train-batch-size must be multiple of --num-gpus.')\n if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:\n raise ValueError('--eval-batch-size must be multiple of --num-gpus.')\n\n main(**vars(args))\n" ]
[ [ "tensorflow.device", "tensorflow.train.LoggingTensorHook", "tensorflow.concat", "tensorflow.metrics.accuracy", "tensorflow.contrib.training.GreedyLoadBalancingStrategy", "tensorflow.contrib.learn.Experiment", "tensorflow.nn.l2_loss", "tensorflow.GPUOptions", "tensorflow.group", "tensorflow.add_n", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.train.get_global_step", "tensorflow.train.MomentumOptimizer", "tensorflow.logging.set_verbosity", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.unstack", "numpy.array", "tensorflow.contrib.training.HParams", "tensorflow.parallel_stack", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.train.SyncReplicasOptimizer", "tensorflow.estimator.EstimatorSpec", "tensorflow.test.is_gpu_available" ] ]
newlawrence/Poliastro
[ "9cc569ed4421f1e05d69de5df260ea919fa83a30" ]
[ "src/poliastro/tests/test_coordinates.py" ]
[ "import astropy.units as u\nfrom astropy import time\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom numpy.linalg import norm\n\nfrom poliastro import coordinates, bodies\nfrom poliastro.examples import molniya\nfrom poliastro.twobody.orbit import Orbit\n\n\n# Note that function are tested using astropy current builtin ephemeris.\n# Horizons uses JPL ephemeris DE431, so expected values are hardcoded,\n# instead of being obtained using Horizons.\ndef test_body_centered_to_icrs_transformation():\n\n vexpress_r_venus = [-2.707041558060933E+03, 1.112962479175306E+04, -3.792944408664889E+04] * u.km\n vexpress_v_venus = [-2.045118200275925E-01, 7.978578482960554E-01, 2.664944903217139E+00] * u.km / u.s\n\n expected_r = [-3.47202219448080286E+07, 9.16853879708216339E+07, 4.34117810525591150E+07] * u.km\n expected_v = [-3.34053728321152121E+01, -1.16604776013667291E+01, -2.39943678872506838E-01] * u.km / u.s\n\n r, v = coordinates.body_centered_to_icrs(vexpress_r_venus, vexpress_v_venus, bodies.Venus,\n time.Time(\"2014-08-23 00:00\", scale='tdb'))\n\n assert_quantity_allclose(r, expected_r)\n assert_quantity_allclose(v, expected_v)\n\n\ndef test_icrs_to_body_centered_transformation():\n vexpress_r_icrs = [-3.472125578094885E+07, 9.168528034176737E+07, 4.341160627674723E+07] * u.km\n vexpress_v_icrs = [-3.340574196483147E+01, -1.165974037637970E+01, -2.395829145441408E-01] * u.km / u.s\n\n expected_r = [-3.74486105008138566e+03, 1.10085874027602295e+04, -3.80681106516677464e+04] * u.km\n expected_v = [-2.04845025352488774e-01, 7.98692896032012989e-01, 2.66498465286454023e+00] * u.km / u.s\n\n r, v = coordinates.icrs_to_body_centered(vexpress_r_icrs, vexpress_v_icrs, bodies.Venus,\n time.Time(\"2014-08-23 00:00\", scale='tdb'))\n\n assert_quantity_allclose(r, expected_r)\n assert_quantity_allclose(v, expected_v)\n\n\ndef test_inertial_body_centered_to_pqw():\n molniya_r_peri, molniya_v_peri = coordinates.inertial_body_centered_to_pqw(molniya.r, molniya.v, bodies.Earth)\n\n molniya_peri = Orbit.from_vectors(bodies.Earth, molniya_r_peri, molniya_v_peri, molniya.epoch)\n\n assert_quantity_allclose(molniya_peri.e_vec[-2:], [0, 0], atol=1e-12)\n assert_quantity_allclose(norm(molniya_peri.e_vec), norm(molniya.e_vec))\n" ]
[ [ "numpy.linalg.norm" ] ]
guy1992l/functions
[ "ecc7bc18501d377d10d2b40edccf5c4b256a79d3" ]
[ "sql_to_file/sql_to_file.py" ]
[ "# Generated by nuclio.export.NuclioExporter\n\nimport pandas as pd\nimport pyhive\nfrom sqlalchemy.engine import create_engine\nfrom mlrun.execution import MLClientCtx\n\n\ndef sql_to_file(\n context: MLClientCtx,\n sql_query: str,\n database_url: str,\n file_ext: str = \"parquet\",\n) -> None:\n \"\"\"SQL Ingest - Ingest data using SQL query\n\n :param context: the function context\n :param sql_query: the sql query used to retrieve the data\n :param database_url: database connection URL\n :param file_ext: (\"parquet\") format for result file\n \"\"\"\n\n engine = create_engine(database_url)\n df = pd.read_sql(sql_query, engine)\n\n context.log_dataset(\n \"query result\",\n df=df,\n format=file_ext,\n artifact_path=context.artifact_subpath(\"data\"),\n )\n" ]
[ [ "pandas.read_sql" ] ]
Othinus099/interior
[ "5c3eaa59722d71374422d34f42342c659d7ce8b0" ]
[ "projects/UniDet/unidet/modeling/roi_heads/split_roi_heads.py" ]
[ "import json\nimport torch\nfrom torch import nn\nfrom torch.autograd.function import Function\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference\nfrom detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads\nfrom detectron2.modeling.roi_heads.cascade_rcnn import _ScaleGradient\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom .multi_dataset_fast_rcnn import MultiDatasetFastRCNNOutputLayers\nfrom .custom_roi_heads import CustomCascadeROIHeads\n\nfrom detectron2.utils.events import get_event_storage\n\n@ROI_HEADS_REGISTRY.register()\nclass MultiDatasetCascadeROIHeads(CustomCascadeROIHeads):\n @classmethod\n def _init_box_head(self, cfg, input_shape):\n ret = super()._init_box_head(cfg, input_shape)\n del ret['box_predictors']\n self.dataset_names = cfg.MULTI_DATASET.DATASETS\n cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS\n box_predictors = []\n for box_head, bbox_reg_weights in zip(ret['box_heads'], cascade_bbox_reg_weights):\n box_predictors.append(\n MultiDatasetFastRCNNOutputLayers(\n cfg,\n cfg.MULTI_DATASET.NUM_CLASSES,\n box_head.output_shape,\n box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),\n )\n )\n ret['box_predictors'] = box_predictors\n\n self.unify_label_test = cfg.MULTI_DATASET.UNIFY_LABEL_TEST\n if self.unify_label_test:\n unified_label_data = json.load(\n open(cfg.MULTI_DATASET.UNIFIED_LABEL_FILE, 'r'))\n label_map = unified_label_data['label_map']\n self.label_map = {\n d: torch.tensor(x).long().to(torch.device(cfg.MODEL.DEVICE)) \\\n for d, x in label_map.items()}\n self.unified_num_class = len(set().union(\n *[label_map[d] for d in label_map]))\n # add background class\n self.label_map = {d: torch.cat([\n self.label_map[d], \n self.label_map[d].new_tensor([self.unified_num_class])]) for d in label_map}\n self.class_count = torch.zeros(self.unified_num_class + 1).float().to(\n torch.device(cfg.MODEL.DEVICE))\n for d in self.label_map:\n self.class_count[self.label_map[d]] = \\\n self.class_count[self.label_map[d]] + 1\n\n self.dump_cls_score = cfg.DUMP_CLS_SCORE\n if self.dump_cls_score:\n self.dump_num_img = cfg.DUMP_NUM_IMG\n self.dump_num_per_img = cfg.DUMP_NUM_PER_IMG\n self.class_scores = []\n return ret\n\n def forward(self, images, features, proposals, targets=None, eval_dataset=-1):\n if self.training:\n proposals = self.label_and_sample_proposals(proposals, targets)\n dataset_sources = [target._dataset_source for target in targets]\n else:\n dataset_sources = [eval_dataset for _ in range(len(images))]\n assert len(set(dataset_sources)) == 1, dataset_sources\n dataset_source = dataset_sources[0]\n del images\n\n if self.training:\n losses = self._forward_box(features, proposals, targets, dataset_source)\n losses.update(self._forward_mask(features, proposals))\n losses.update(self._forward_keypoint(features, proposals))\n return proposals, losses\n else:\n pred_instances = self._forward_box(\n features, proposals, dataset_source=dataset_source)\n pred_instances = self.forward_with_given_boxes(features, pred_instances)\n return pred_instances, {}\n\n def _forward_box(self, features, proposals, targets=None, dataset_source=-1):\n features = [features[f] for f in self.box_in_features]\n head_outputs = [] # (predictor, predictions, proposals)\n prev_pred_boxes = None\n image_sizes = [x.image_size for x in proposals]\n for k in range(self.num_cascade_stages):\n if k > 0:\n # The output boxes of the previous stage are the input proposals of the next stage\n proposals = self._create_proposals_from_boxes(\n prev_pred_boxes, image_sizes\n )\n if self.training:\n proposals = self._match_and_label_boxes(proposals, k, targets)\n predictions = self._run_stage(features, proposals, k, dataset_source)\n prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)\n head_outputs.append((self.box_predictor[k], predictions, proposals))\n\n if self.training:\n losses = {}\n storage = get_event_storage()\n for stage, (predictor, predictions, proposals) in enumerate(head_outputs):\n with storage.name_scope(\"{}_stage{}\".format(\n self.dataset_names[dataset_source], stage)):\n stage_losses = predictor.losses(\n predictions, proposals, dataset_source)\n losses.update({\"{}_{}_stage{}\".format(\n self.dataset_names[dataset_source], \n k, stage): v for k, v in stage_losses.items()})\n return losses\n else:\n # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)\n scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]\n\n # Average the scores across heads\n scores = [\n sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)\n for scores_per_image in zip(*scores_per_stage)\n ]\n predictor, predictions, proposals = head_outputs[-1]\n boxes = predictor.predict_boxes(predictions, proposals)\n pred_instances, _ = fast_rcnn_inference(\n boxes,\n scores,\n image_sizes,\n predictor.test_score_thresh,\n predictor.test_nms_thresh,\n predictor.test_topk_per_image,\n )\n return pred_instances\n\n def _run_stage(self, features, proposals, stage, dataset_source):\n \"\"\"\n support dataset_source\n \"\"\"\n box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])\n box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)\n box_features = self.box_head[stage](box_features)\n\n if self.unify_label_test and not self.training:\n pred_class_logits_all, pred_proposal_deltas = self.box_predictor[stage](\n box_features, -1)\n unified_score = pred_proposal_deltas.new_zeros(\n (pred_class_logits_all[0].shape[0], self.unified_num_class + 1))\n for i, d in enumerate(self.dataset_names):\n pred_class_score = pred_class_logits_all[i]\n unified_score[:, self.label_map[d]] = \\\n unified_score[:, self.label_map[d]] + pred_class_score\n unified_score = unified_score / self.class_count\n if dataset_source in self.dataset_names:\n # on training datasets\n pred_class_logits = \\\n unified_score[:, self.label_map[self.dataset_names[dataset_source]]]\n else:\n pred_class_logits = unified_score\n # B x (#U + 1)\n else:\n pred_class_logits, pred_proposal_deltas = self.box_predictor[stage](\n box_features, dataset_source if type(dataset_source) != type('') else -1)\n if not self.training and (dataset_source == -1 or type(dataset_source) == type('')):\n fg = torch.cat(\n [x[:, :-1] for x in pred_class_logits], dim=1)\n bg = torch.cat(\n [x[:, -1:] for x in pred_class_logits], dim=1).mean(dim=1)\n pred_class_logits = torch.cat([fg, bg[:, None]], dim=1)\n # B x (sum C + 1)\n\n if self.dump_cls_score:\n if not self.unify_label_test:\n pred_class_logits_all, _ = self.box_predictor[stage](\n box_features, -1)\n if len(self.class_scores) < self.dump_num_img and stage == 2:\n self.class_scores.append(\n [x[:self.dump_num_per_img].detach().cpu().numpy() \\\n for x in pred_class_logits_all])\n\n return pred_class_logits, pred_proposal_deltas\n" ]
[ [ "torch.device", "torch.tensor", "torch.zeros", "torch.cat" ] ]
rogeroyer/df_singularity_plan_zhaoshang
[ "dab27cfaa039b1fda380409e56d11b453194230f" ]
[ "train_gbdt_model.py" ]
[ "# -*- coding:utf-8 -*-\r\n\r\nimport random\r\nfrom divide_data import *\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom extract_features import extract_feature\r\nfrom extract_features import extract_one_hot_feature\r\nfrom extract_features import extract_evt_lbl_features\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\ndef train_xgb_module(store_features=False, store_result=False, feature_select=False, num_round=300):\r\n if store_features is True:\r\n '''feature'''\r\n train_feature = extract_feature(train_agg, train_log)\r\n test_feature = extract_feature(test_agg, test_log)\r\n print('extract features successfully!')\r\n # '''word2vec feature'''\r\n # train_feature = train_feature.merge(extract_evt_lbl_features(train_log), on='USRID', how='left')\r\n # test_feature = test_feature.merge(extract_evt_lbl_features(test_log), on='USRID', how='left')\r\n # print('extract word2vec features successfully!')\r\n '''EVT_LBL one hot feature'''\r\n train_feature = train_feature.merge(extract_one_hot_feature(train_log), on='USRID', how='left')\r\n test_feature = test_feature.merge(extract_one_hot_feature(test_log), on='USRID', how='left')\r\n print('extract one hot features successfully!')\r\n '''store'''\r\n train_feature = train_feature.merge(train_flg, on='USRID', how='left')\r\n train_feature.to_csv(path + 'train_feature.csv', encoding='utf-8', index=None)\r\n test_feature.to_csv(path + 'test_feature.csv', encoding='utf-8', index=None)\r\n print('store features successfully!')\r\n\r\n # '''add cluster features'''\r\n # train_feature = pd.read_csv(path + 'train_feature.csv', encoding='utf-8', low_memory=False)\r\n # test_feature = pd.read_csv(path + 'test_feature.csv', encoding='utf-8', low_memory=False)\r\n # train_cluster = pd.read_csv(path + 'train_cluster.csv', encoding='utf-8', low_memory=False)\r\n # test_cluster = pd.read_csv(path + 'test_cluster.csv', encoding='utf-8', low_memory=False)\r\n # train_feature = train_feature.merge(train_cluster, on='USRID', how='left')\r\n # test_feature = test_feature.merge(test_cluster, on='USRID', how='left')\r\n else:\r\n train_feature = pd.read_csv(path + 'train_feature.csv', encoding='utf-8', low_memory=False)\r\n test_feature = pd.read_csv(path + 'test_feature.csv', encoding='utf-8', low_memory=False)\r\n # '''cluster relative'''\r\n # train_feature = pd.read_csv(path + 'train_feature_filled.csv', encoding='utf-8', low_memory=False)\r\n # test_feature = pd.read_csv(path + 'test_feature_filled.csv', encoding='utf-8', low_memory=False)\r\n # train_feature = train_feature.drop(['cluster_label', 'center_distance'], axis=1)\r\n # test_feature = test_feature.drop(['cluster_label', 'center_distance'], axis=1)\r\n print('read features successfully!')\r\n\r\n '''no log table'''\r\n # train_feature = train_feature[train_feature['evt_lbl_cnt'].isnull()]\r\n # # pos_feature = train_feature[train_feature['FLAG'] == 1]\r\n # # neg_feature = train_feature[train_feature['FLAG'] == 0]\r\n # # '''instance sample'''\r\n # # neg_feature = neg_feature.sample(frac=0.098, replace=True, random_state=88)\r\n # # train_feature = pos_feature.append(neg_feature)\r\n # # '''shuffle rows'''\r\n # # index = [i for i in range(train_feature.shape[0])]\r\n # # random.shuffle(index)\r\n # # train_feature = train_feature.set_index([index]).sort_index()\r\n #\r\n # test_feature = test_feature[test_feature['evt_lbl_cnt'].isnull()]\r\n # names = ['V' + str(index) for index in range(1, 31, 1)] + ['USRID']\r\n # train_feature = train_feature[names + ['FLAG']]\r\n # test_feature = test_feature[names]\r\n\r\n '''have log table'''\r\n # train_feature = train_feature[train_feature['evt_lbl_cnt'].notnull()]\r\n # test_feature = test_feature[test_feature['evt_lbl_cnt'].notnull()]\r\n # train_feature = train_feature.drop(['first_len_rank', 'second_len_rank', 'three_len_rank', 'evt_lbl_cnt_len_rank', 'evt_lbl_cnt_len_reverse', 'first_len_rank_reverse', 'second_len_rank_reverse', 'three_len_rank_reverse'], axis=1)\r\n # test_feature = test_feature.drop(['first_len_rank', 'second_len_rank', 'three_len_rank', 'evt_lbl_cnt_len_rank', 'evt_lbl_cnt_len_reverse', 'first_len_rank_reverse', 'second_len_rank_reverse', 'three_len_rank_reverse'], axis=1)\r\n\r\n '''fill nan with 0'''\r\n train_feature = train_feature.fillna(0)\r\n test_feature = test_feature.fillna(0)\r\n\r\n x_train, x_test, y_train, y_test = train_test_split(train_feature.drop(['USRID', 'FLAG'], axis=1), train_feature[['FLAG']], test_size=.2, random_state=88)\r\n\r\n if feature_select is True:\r\n # features_name = ['V1', 'V3', 'V6', 'V7', 'V9', 'V10', 'V11', 'V13', 'V15', 'V16', 'V19', 'V22', 'V23', 'V25', 'V27', 'V28', 'V29', 'V30', 'day_set_len', 'tch_typ_set_len', 'tch_typ0', 'tch_typ2', 'tch_typ0_rate', 'tch_typ2_rate', '1', '3', '6', '8', '9', '10', '13', '14', '18', '19', '21', '22', '23', '25', '26', '30', 'days_mean', 'days_min', 'days_max', 'days_var', 'days_median', 'days_day_var', 'days_day_median', 'days_day_skew', 'days_hour_mean', 'days_hour_min', 'days_hour_max', 'days_hour_skew', 'hour_max', 'hour_var', 'hour_skew', 'evt_lbl_cnt_max', 'first_of_max', 'first_of_min', 'first_of_median', 'second_of_max', 'second_of_min', 'second_of_median', 'three_of_median', 'first_max', 'first_min', 'second_min', 'three_max', 'three_median']\r\n # features len:68 300:0.87087545758 400:0.87081954925 500:0.870075481655 #\r\n\r\n features_name = ['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V9', 'V12', 'V13', 'V16', 'V19', 'V21', 'V23', 'V24', 'V26', 'V28', 'V29', 'evt_lbl_cnt', 'evt_lbl_cnt_every_day', 'every_evt_lbl_cnt', 'tch_typ_set_len', 'tch_typ0', 'tch_typ0_rate', '2', '6', '7', '9', '10', '11', '12', '18', '20', '22', '24', '25', '28', 'days_mean', 'days_min', 'days_var', 'days_skew', 'continue_days', 'days_day_min', 'days_day_median', 'days_day_kurtosis', 'days_hour_max', 'days_hour_var', 'days_hour_kurtosis', 'hour_min', 'hour_max', 'hour_var', 'hour_median', 'evt_lbl_cnt_max', 'second_of_max', 'second_of_min', 'second_of_median', 'three_of_max', 'first_max', 'second_max', 'second_min', 'second_median', 'three_max', 'three_min']\r\n\r\n x_train = x_train[features_name]\r\n x_test = x_test[features_name]\r\n\r\n print('I\\'m training validate module.')\r\n module = GradientBoostingClassifier(\r\n n_estimators=num_round,\r\n learning_rate=0.05,\r\n random_state=2018,\r\n max_depth=5,\r\n subsample=0.7,\r\n )\r\n module.fit(x_train, y_train['FLAG'].ravel())\r\n result = module.predict_proba(x_test)[:, 1]\r\n score = roc_auc_score(y_test, result)\r\n print('validate auc:', score)\r\n\r\n if store_result is True:\r\n\r\n '''label set'''\r\n train_label = train_feature[['FLAG']]\r\n '''pure feature'''\r\n train_feature = train_feature.drop(['USRID', 'FLAG'], axis=1)\r\n test_user = test_feature[['USRID']]\r\n test_feature = test_feature.drop(['USRID'], axis=1)\r\n\r\n if feature_select is True:\r\n train_feature = train_feature[features_name]\r\n test_feature = test_feature[features_name]\r\n\r\n print('I\\'m training final module.')\r\n module_two = GradientBoostingClassifier(\r\n n_estimators=num_round,\r\n learning_rate=0.05,\r\n random_state=2018,\r\n max_depth=5,\r\n subsample=0.7,\r\n )\r\n\r\n module_two.fit(train_feature, train_label['FLAG'].ravel())\r\n result = module_two.predict_proba(test_feature)[:, 1]\r\n pd.set_option('chained', None) # remove warning #\r\n test_user['RST'] = [index for index in result]\r\n print(test_user)\r\n '''store result'''\r\n time_string = time.strftime('_%Y%m%d%H%M%S', time.localtime(time.time()))\r\n file_name = 'result_b' + time_string + '.csv'\r\n test_user.to_csv(path + file_name, index=None, encoding='utf-8', sep='\\t')\r\n print('result stored successfully!')\r\n print('program is over!')\r\n\r\n\r\nif __name__ == '__main__':\r\n start_time = time.clock()\r\n train_xgb_module(store_features=False, store_result=False, feature_select=False, num_round=1000)\r\n end_time = time.clock()\r\n print('program spend time:', end_time - start_time, ' sec')\r\n\r\n" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.ensemble.GradientBoostingClassifier" ] ]
MertEfeSevim/legendaryPokemonClassification
[ "17452d8d5192b5e527ae0e5e1df139e5f00cde4a" ]
[ "classifier.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\n\npokemon_csv = pd.read_csv('Pokemon.csv')\n\ndf = pd.DataFrame(pokemon_csv, columns = ['Name', 'Type 1', 'Type 2', 'Total', 'HP','Attack','Defense','Sp. Atk','Sp. Def','Speed','Generation','Legendary'])\n\n\n#print(df.apply(lambda x: x.count()))\n\ndf = df.drop(['Name'],axis=1)\ndf = df.dropna(subset=['Type 2'])\n\n\ni = 0\nj = 0\nuniqueItem = dict()\nuniqueItem2 = dict()\n\nfor item in df['Type 1']:\n if item not in uniqueItem:\n uniqueItem[str(item)] = i\n i+=1\n\nfor type in df['Type 1']:\n if type in uniqueItem:\n df = df.replace({type:uniqueItem.get(type)})\n\n\nfor item in df['Type 2']:\n if item not in uniqueItem2:\n uniqueItem2[str(item)] = j\n j+=1\n\nfor type in df['Type 2']:\n if type in uniqueItem2:\n df = df.replace({type:uniqueItem2.get(type)})\n\n\nX = np.array(df.iloc[:,0:-1])\nY = np.array([[df['Legendary']]])\n\nY = Y.reshape(414)\n\n#Naive bayes starts here\nX_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.2, random_state = 10)\n\nmodel = GaussianNB()\nmodel.fit(X_train, y_train)\n\ny_pred = model.predict(X_test)\nprint(\"Accuracy score of Naive Bayes: \",accuracy_score(y_test,y_pred))\n\n#SGD Starts here\n\nclf = linear_model.SGDClassifier()\nclf.fit(X, Y)\n\ny_pred = clf.predict(X_test)\nprint(\"Accuracy score of SDG: \",accuracy_score(y_test,y_pred))\n\n" ]
[ [ "pandas.read_csv", "sklearn.naive_bayes.GaussianNB", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.array", "sklearn.linear_model.SGDClassifier", "sklearn.metrics.accuracy_score" ] ]
yimuw/yimu-blog
[ "280ab2eca1fa48602d1695d69366842ea40debda" ]
[ "random/my_git/my_git.py" ]
[ "import os\nimport hashlib\nimport pickle\nimport argparse\nimport sys\nimport shutil\nimport copy\n\njoin = os.path.join\n\n\nclass TreeNode:\n def __init__(self, id, blobs=[], parents=[]):\n self.id = id\n self.blobs = blobs\n self.parents = parents\n self.children = []\n\n\nclass Blob:\n def __init__(self, file_path):\n self.file_path = file_path\n self.file_content = open(self.file_path, 'r').read()\n self.hash_val = hashlib.sha1(\n self.file_content.encode('utf-8')).hexdigest()\n\n\nclass MyGit:\n class States:\n def __init__(self):\n self.head = None\n self.tracked_files = set()\n\n def __init__(self, dir='.'):\n self.git_dir = 'my_git'\n self.blobs_dir = join(dir, self.git_dir, 'blobs')\n self.nodes_dir = join(dir, self.git_dir, 'nodes')\n self.states_path = join(dir, self.git_dir, 'states')\n self.states = self.States()\n self.root_name = 'ROOT'\n\n def init(self):\n if os.path.exists(self.git_dir):\n print('git dir exist!')\n return\n os.mkdir(self.git_dir)\n os.mkdir(self.blobs_dir)\n os.mkdir(self.nodes_dir)\n\n root = TreeNode(self.root_name)\n\n self.states.head = root.id\n\n self.save_node(root)\n self.save_states()\n\n print('init my_git repo!')\n\n def load_states(self):\n self.states = pickle.load(open(join(self.git_dir, 'states'), 'rb'))\n\n def save_states(self):\n pickle.dump(self.states, open(join(self.git_dir, 'states'), 'wb'))\n\n def load_node(self, node_id):\n return pickle.load(open(join(self.nodes_dir, node_id), 'rb'))\n\n def save_node(self, node):\n pickle.dump(node, open(join(self.nodes_dir, node.id), 'wb'))\n\n def load_blob(self, blob_hash):\n return pickle.load(open(join(self.blobs_dir, blob_hash), 'rb'))\n\n def save_blob(self, blob):\n pickle.dump(blob, open(join(self.blobs_dir, blob.hash_val), 'wb'))\n\n def status(self):\n self.load_states()\n print('HEAD: ', self.states.head)\n print('tracked files:', self.states.tracked_files)\n for f in self.states.tracked_files:\n print('content of f:', f)\n print(open(f, 'r').read())\n\n def add(self, files):\n self.load_states()\n for f in files:\n if os.path.exists(f):\n self.states.tracked_files.add(f)\n else:\n print(f, 'does not exist!')\n self.save_states()\n\n def commit(self, message, verbose=True):\n self.load_states()\n print('current HEAD:', self.states.head)\n\n has_change = False\n\n all_blob = []\n for f in self.states.tracked_files:\n blob = Blob(f)\n all_blob.append(blob.hash_val)\n if blob.hash_val not in os.listdir(self.blobs_dir):\n has_change = True\n self.save_blob(blob)\n\n # using hash to detect changes\n if has_change == False:\n print('nothing to commit!')\n return\n\n new_node = TreeNode(message, blobs=all_blob,\n parents=[self.states.head])\n\n # update the children of current head node\n head_node = self.load_node(self.states.head)\n head_node.children.append(new_node.id)\n self.save_node(head_node)\n\n # head point to new node\n self.states.head = new_node.id\n self.save_states()\n\n # save the new node\n self.save_node(new_node)\n if verbose:\n print('commit success! ')\n print('new HEAD:', self.states.head)\n\n def __merge_commit(self, p1, p2):\n merge_node = TreeNode(\n 'merge-{}-{}'.format(p1[:6], p2[:6]), parents=[p1, p2])\n merge_node.hash_val = 'merge-{}-{}'.format(p1[:6], p2[:6])\n\n # update the children of p1, p2 node\n p1_node = self.load_node(p1)\n p1_node.children.append(merge_node.hash_val)\n self.save_node(p1_node)\n\n p2_node = self.load_node(p2)\n p2_node.children.append(merge_node.hash_val)\n self.save_node(p2_node)\n\n # head point to new node\n self.states.head = merge_node.hash_val\n self.save_states()\n\n # save the new node\n self.save_node(merge_node)\n\n print('merged commit success! ')\n print('new HEAD:', self.states.head)\n\n def checkout(self, node_id, verbose=True):\n if node_id not in os.listdir(self.nodes_dir):\n print('commit doesn not exist')\n return\n\n self.load_states()\n\n for f in self.states.tracked_files:\n os.remove(f)\n self.states.tracked_files = set()\n\n node = self.load_node(node_id)\n for blob_hash in node.blobs:\n blob = self.load_blob(blob_hash)\n self.states.tracked_files.add(blob.file_path)\n with open(blob.file_path, 'w') as file:\n file.write(blob.file_content)\n self.states.head = node_id\n self.save_states()\n\n if verbose:\n print('checkout success!')\n print('new HEAD:', self.states.head)\n\n def __lowest_common_ancester_path(self, node1_id, node2_id):\n print('finding LCA path between', node1_id, node2_id)\n\n self.lca = None\n\n def recursion(node_id):\n node = self.load_node(node_id)\n\n value = 0\n for c in node.children:\n value += recursion(c)\n\n value += 1 if node.id == node1_id or node.id == node2_id else 0\n\n if self.lca is None and value == 2:\n self.lca = node.id\n return value\n recursion(self.root_name)\n\n return self.lca\n\n def __detect_diff3_conflict(self, path):\n return '<<<<<<<' in open(path, 'r').read()\n\n def merge(self, node_id):\n if node_id not in os.listdir(self.nodes_dir):\n print('commit doesn not exist')\n return\n\n self.load_states()\n lca = self.__lowest_common_ancester_path(self.states.head, node_id)\n print('LCA node:', lca)\n\n if lca == node_id or lca == self.states.head:\n print('fastforward...')\n self.checkout(node_id)\n return\n\n merge_id = 'merge-{}-{}'.format(self.states.head, node_id)\n merge_dir = join(self.git_dir, merge_id)\n source_dir = join(merge_dir, 'source')\n target_dir = join(merge_dir, 'target')\n lca_dir = join(merge_dir, 'lca')\n\n if os.path.exists(merge_dir):\n shutil.rmtree(merge_dir)\n os.mkdir(merge_dir)\n os.mkdir(source_dir)\n os.mkdir(target_dir)\n os.mkdir(lca_dir)\n\n def snapshot(dir):\n for f in self.states.tracked_files:\n shutil.copyfile(f, join(dir, f))\n\n cur_head = self.states.head\n\n snapshot(source_dir)\n source_files = copy.deepcopy(self.states.tracked_files)\n\n self.checkout(lca, verbose=False)\n snapshot(lca_dir)\n lca_files = copy.deepcopy(self.states.tracked_files)\n\n self.checkout(node_id, verbose=False)\n snapshot(target_dir)\n target_files = copy.deepcopy(self.states.tracked_files)\n\n self.checkout(cur_head, verbose=False)\n\n all_files = target_files.union(source_files).union(lca_files)\n\n for f in all_files:\n print('...merging...', f)\n sf = join(source_dir, f)\n tf = join(target_dir, f)\n lf = join(lca_dir, f)\n\n # 1. f in lca, source and target.\n # do a diff3\n if f in source_files and f in target_files and f in lca_files:\n os.system('diff3 {} {} {} -m > {}'.format(sf, lf, tf, f))\n self.states.tracked_files.add(f)\n print('3 way merge')\n if self.__detect_diff3_conflict(f):\n print(\n '!!!! Merge conflict for {}!!!! please address in another commit!'.format(f))\n # 2. f in source and lca, but not target.\n # take source\n elif f in source_files and f in lca_files:\n shutil.copyfile(sf, f)\n self.states.tracked_files.add(f)\n print('new change from source. take from source')\n # 3. f in target and lca, but not source.\n # take target\n elif f in target_files and f in lca_files:\n shutil.copyfile(tf, f)\n self.states.tracked_files.add(f)\n print('new change from target. take from target')\n # 4. f in source or (in source and target).\n # new file. take source\n elif f in source_files:\n shutil.copyfile(sf, f)\n self.states.tracked_files.add(f)\n print('new file. take from source')\n # 5. f only in target.\n # new file. take target\n elif f in target_files:\n shutil.copyfile(tf, f)\n self.states.tracked_files.add(f)\n print('new file. take from target')\n # 6. f only in lca.\n # file is removed. Do nothing\n elif f in lca_files:\n print('file is removed')\n else:\n print('I worte a bug!')\n\n # create a merge commit which has 2 parents\n self.__merge_commit(self.states.head, node_id)\n\n def log(self):\n def tree_recursion(node_id):\n if node_id is None:\n return\n\n node = self.load_node(node_id)\n\n print('======================')\n print('node id/message: ', node.id)\n print('blobs:', node.blobs)\n print('children: ', node.children)\n print('parents: ', node.parents)\n print('======================')\n\n for p in node.parents:\n tree_recursion(p)\n\n self.load_states()\n tree_recursion(self.states.head)\n\n def gitk(self):\n import networkx as nx\n from matplotlib import pyplot as plt\n\n self.load_states()\n\n def node_info_str(node):\n return '{}'.format(node.id)\n\n def get_edges(node_hash):\n node = self.load_node(node_hash)\n\n edges = [(node_info_str(node), node_info_str(self.load_node(c)))\n for c in node.children]\n for c in node.children:\n edges += get_edges(c)\n return edges\n\n graph = nx.DiGraph()\n\n edges = get_edges(self.root_name)\n edges.append(('HEAD', node_info_str(self.load_node(self.states.head))))\n\n graph.add_edges_from(edges)\n plt.tight_layout()\n nx.draw_networkx(graph, arrows=True)\n plt.show()\n\n\nif __name__ == '__main__':\n git = MyGit()\n\n argparser = argparse.ArgumentParser()\n argsubparsers = argparser.add_subparsers(title='Commands', dest='command')\n argsubparsers.required = True\n\n argsubparsers.add_parser('init')\n argsubparsers.add_parser('status')\n argsubparsers.add_parser('log')\n argsubparsers.add_parser('test')\n argsubparsers.add_parser('gitk')\n\n argsp = argsubparsers.add_parser('add')\n argsp.add_argument('files', nargs='+')\n\n argsp = argsubparsers.add_parser('merge')\n argsp.add_argument('id',\n help='target commit')\n\n argsp = argsubparsers.add_parser('commit')\n argsp.add_argument('message',\n help='file to commit')\n\n argsp = argsubparsers.add_parser('checkout')\n argsp.add_argument('id',\n help='id is a hash or a branch')\n\n args = argparser.parse_args(sys.argv[1:])\n\n if args.command == 'status':\n git.status()\n elif args.command == 'checkout':\n git.checkout(args.id)\n elif args.command == 'commit':\n git.commit(args.message)\n elif args.command == 'init':\n git.init()\n elif args.command == 'log':\n git.log()\n elif args.command == 'gitk':\n git.gitk()\n elif args.command == 'merge':\n git.merge(args.id)\n elif args.command == 'add':\n git.add(args.files)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ] ]
resgen/skills-ml
[ "5fa1fc975892230931b002fe5cac2579b242cd03" ]
[ "tests/test_storage.py" ]
[ "from skills_ml.storage import open_sesame, ModelStorage, S3Store, FSStore, PersistedJSONDict, ProxyObjectWithStorage, SerializedByStorage\nfrom skills_ml.algorithms.preprocessing import IterablePipeline\nfrom skills_ml.algorithms import nlp\n\nfrom skills_utils.s3 import upload, list_files\n\nimport joblib\n\nfrom functools import partial\nfrom moto import mock_s3\nimport tempfile\nimport mock\nimport os\nimport unittest\nimport s3fs\nimport json\nimport numpy as np\nimport dill as pickle\npickle.settings['byref'] = True\n\nclass FakeModel(object):\n def __init__(self, val):\n self.val = val\n self.model_name = 'fake_model'\n\n def infer_vector(self, doc_words):\n return [1, 2, 3, 4]\n\n@mock_s3\nclass TestS3Storage(unittest.TestCase):\n def test_s3store(self):\n import boto3\n client = boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = s3fs.S3FileSystem()\n\n storage = S3Store(path=f\"s3://fake-open-skills/model_cache\")\n assert not s3.exists(storage.path) == True\n\n model = FakeModel('val')\n model_pickled = pickle.dumps(model)\n storage.write(model_pickled, 'for_testing.model')\n\n assert storage.exists(\"for_testing.model\")\n\n with storage.open(\"for_testing_compressed.model\", \"wb\") as f:\n joblib.dump(model, f, compress=True)\n\n assert storage.exists(\"for_testing_compressed.model\")\n\n\n with open_sesame(\"s3://fake-open-skills/model_cache/for_testing_compressed.model\", \"rb\") as f:\n model_loaded = joblib.load(f)\n assert model.val == model_loaded.val\n\n\n model_loaded = storage.load('for_testing.model')\n model_loaded = pickle.loads(model_loaded)\n assert model_loaded.val == 'val'\n\n fake_lookup = {'1': 1, '2': 2, '3': 3}\n fake_lookup_bytes = json.dumps(fake_lookup).encode()\n storage.write(fake_lookup_bytes, 'for_testing.json')\n assert storage.exists(\"for_testing.json\")\n\n fake_lookup_loaded = json.loads(storage.load('for_testing.json').decode())\n assert fake_lookup == fake_lookup_loaded\n\n\n storage.delete('for_testing.model')\n assert not storage.exists(\"for_testing.model\")\n\n\nclass TestFSStorage(unittest.TestCase):\n def test_fsstore(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n storage = FSStore(tmpdir)\n model = FakeModel('val')\n model_pickled = pickle.dumps(model)\n storage.write(model_pickled, 'for_testing.model')\n assert os.path.isfile(os.path.join(tmpdir, 'for_testing.model')) == storage.exists('for_testing.model') == True\n\n with storage.open(\"for_testing_compressed.model\", \"wb\") as f:\n joblib.dump(model, f, compress=True)\n\n assert storage.exists(\"for_testing_compressed.model\")\n\n with open_sesame(os.path.join(tmpdir, \"for_testing_compressed.model\"), \"rb\") as f:\n model_loaded = joblib.load(f)\n assert model.val == model_loaded.val\n\n model_loaded = storage.load('for_testing.model')\n model_loaded = pickle.loads(model_loaded)\n assert model_loaded.val == 'val'\n\n storage.delete('for_testing.model')\n assert os.path.isfile(os.path.join(tmpdir, 'for_testing.model')) == storage.exists('for_testing.model') == False\n\n\nclass TestPersistedJSONDict(unittest.TestCase):\n def test_fsstore(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n storage = FSStore(tmpdir)\n\n # 1. Ensure that a new file is correctly created and saved to\n storage_one = PersistedJSONDict(storage, 'test.json')\n storage_one['key1'] = 'value1'\n storage_one['key2'] = {'nestedkey2': 'value2'}\n storage_one.save()\n assert json.load(open(os.path.join(tmpdir, 'test.json')))\\\n == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}}\n\n # 2. Ensure that an existing file is correctly read, updated, and saved to\n storage_two = PersistedJSONDict(storage, 'test.json')\n assert 'key1' in storage_two\n assert storage_two['key1'] == 'value1'\n storage_two['key3'] = 'value3'\n storage_two.save()\n assert json.load(open(os.path.join(tmpdir, 'test.json')))\\\n == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3'}\n\n # 3. Ensure that, in the same thread, updating and svaing an old one gets new chagnes too\n storage_one['key4'] = 'value4'\n storage_one.save()\n assert json.load(open(os.path.join(tmpdir, 'test.json')))\\\n == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4'}\n\n # 4. test autosave - this will be the fourth update of this object\n storage_one.SAVE_EVERY_N_UPDATES = 4\n storage_one['key5'] = 'value5'\n assert json.load(open(os.path.join(tmpdir, 'test.json')))\\\n == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4', 'key5': 'value5'}\n\n # 5. test length checking\n assert len(storage_one) == 5\n\n # 6. test iteration\n assert sorted(\n [(key, value) for key, value in storage_one.items()],\n key=lambda x: x[0]\n ) == [\n ('key1', 'value1'),\n ('key2', {'nestedkey2': 'value2'}),\n ('key3', 'value3'),\n ('key4', 'value4'),\n ('key5', 'value5')\n\n ]\n\n @mock_s3\n def test_s3store(self):\n import boto3\n client = boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n storage = S3Store(path=f\"s3://fake-open-skills/apath\")\n\n # 1. Ensure that a new file is correctly created and saved to\n storage_one = PersistedJSONDict(storage, 'test.json')\n storage_one['key1'] = 'value1'\n storage_one['key2'] = {'nestedkey2': 'value2'}\n storage_one.save()\n loaded = json.loads(storage.load('test.json').decode())\n assert loaded == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}}\n\n # 2. Ensure that an existing file is correctly read, updated, and saved to\n storage_two = PersistedJSONDict(storage, 'test.json')\n assert 'key1' in storage_two\n assert storage_two['key1'] == 'value1'\n storage_two['key3'] = 'value3'\n storage_two.save()\n loaded = json.loads(storage.load('test.json').decode())\n assert loaded == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3'}\n\n # 3. Ensure that, in the same thread, updating and svaing an old one gets new chagnes too\n storage_one['key4'] = 'value4'\n storage_one.save()\n loaded = json.loads(storage.load('test.json').decode())\n assert loaded == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4'}\n\n # 4. test autosave - this will be the fourth update of this object\n storage_one.SAVE_EVERY_N_UPDATES = 4\n storage_one['key5'] = 'value5'\n loaded = json.loads(storage.load('test.json').decode())\n assert loaded == {'key1': 'value1', 'key2': {'nestedkey2': 'value2'}, 'key3': 'value3', 'key4': 'value4', 'key5': 'value5'}\n\n # 5. test length checking\n assert len(storage_one) == 5\n\n # 6. test iteration\n assert sorted(\n [(key, value) for key, value in storage_one.items()],\n key=lambda x: x[0]\n ) == [\n ('key1', 'value1'),\n ('key2', {'nestedkey2': 'value2'}),\n ('key3', 'value3'),\n ('key4', 'value4'),\n ('key5', 'value5')\n\n ]\n\n\nclass TestModelStorage(unittest.TestCase):\n @mock.patch('os.getcwd')\n def test_model_storage(self, mock_getcwd):\n with tempfile.TemporaryDirectory() as td:\n mock_getcwd.return_value = td\n ms = ModelStorage(FSStore(td))\n fake = FakeModel(1)\n ms.save_model(fake, 'test.model')\n assert set(os.listdir(os.getcwd())) == set(['test.model'])\n new_model = ms.load_model('test.model')\n assert new_model.val == fake.val\n\n\nclass TestSerializedByStorage(unittest.TestCase):\n @mock_s3\n def test_pickle_s3(self):\n import boto3\n client = boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = S3Store('fake-open-skills/models')\n model_storage = ModelStorage(storage=s3)\n fake = FakeModel('fake')\n model_storage.save_model(fake, fake.model_name)\n\n s_fake = SerializedByStorage(fake, s3, fake.model_name)\n s3.write(pickle.dumps(s_fake), 'fake.pickle')\n fake_unpickled = pickle.loads(s3.load('fake.pickle'))\n # make sure the fake model wasn't pickled but the reference\n assert fake_unpickled._model == None\n assert fake_unpickled.storage.path == s3.path\n assert fake_unpickled.val == fake.val\n\n # if the object to be pickled doesn't have storage attribute and didn't provide the storage\n # to SerializedByStorage, it will be serialized normally\n s_fake = SerializedByStorage(model=fake, model_name=fake.model_name)\n s3.write(pickle.dumps(s_fake), 'fake.pickle')\n fake_unpickled = pickle.loads(s3.load('fake.pickle'))\n assert fake_unpickled._model != None\n\n def test_delegation(self):\n fake = FakeModel('fake')\n s_fake = SerializedByStorage(model=fake, model_name=fake.model_name)\n assert fake.val == s_fake.val\n\n @mock_s3\n def test_with_iterable_pipelin(self):\n import boto3\n client=boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = S3Store('fake-open-skills/models')\n model_storage = ModelStorage(storage=s3)\n fake = FakeModel('fake')\n\n model_storage.save_model(fake, fake.model_name)\n vectorize_for_pipeline = partial(nlp.vectorize, embedding_model=SerializedByStorage(storage=s3, model_name=fake.model_name, model=fake))\n pipe = IterablePipeline(vectorize_for_pipeline)\n\n pipe_unpickled = pickle.loads(pickle.dumps(pipe))\n # make sure the fake model wasn't pickled but the reference\n assert pipe_unpickled.functions[-1].keywords['embedding_model']._model == None\n assert pipe_unpickled.functions[-1].keywords['embedding_model'].storage.path == s3.path\n # The model will be loaded when it's needed\n assert list(pipe_unpickled([1])) == [[1, 2, 3, 4]]\n\n\nclass TestProxyObject(unittest.TestCase):\n def test_delegation(self):\n fake = FakeModel('fake')\n s_fake = ProxyObjectWithStorage(model_name=fake.model_name, model_obj=fake)\n assert fake.val == s_fake.val\n\n @mock_s3\n def test_save_load(self):\n import boto3\n client=boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = S3Store('fake-open-skills')\n model_storage = ModelStorage(storage=s3)\n fake = FakeModel('fake')\n\n model_storage.save_model(fake, fake.model_name)\n proxy_fake = ProxyObjectWithStorage(model_obj=fake, storage=s3, model_name=fake.model_name)\n\n assert proxy_fake.storage == s3\n\n proxy_fake_unpickled = pickle.loads(pickle.dumps(proxy_fake))\n assert proxy_fake_unpickled.val == proxy_fake.val\n\n model_storage.save_model(proxy_fake, 'proxy_'+ proxy_fake.model_name)\n proxy_fake_loaded= model_storage.load_model('proxy_'+ proxy_fake.model_name)\n\n assert proxy_fake_loaded.val == proxy_fake.val == fake.val\n\n @mock_s3\n def test_with_iterable_pipeline(self):\n import boto3\n client=boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = S3Store('fake-open-skills')\n model_storage = ModelStorage(s3)\n\n proxy_fake = ProxyObjectWithStorage(model_obj=FakeModel('fake'), storage=s3, model_name='fake')\n model_storage.save_model(proxy_fake, proxy_fake.model_name)\n\n vectorize_for_pipeline = partial(nlp.vectorize, embedding_model=SerializedByStorage(model=proxy_fake, model_name=proxy_fake.model_name))\n pipe = IterablePipeline(vectorize_for_pipeline)\n\n s3.write(pickle.dumps(pipe), 'fake.pipe')\n pipe_unpickled = pickle.loads(s3.load('fake.pipe'))\n\n assert list(pipe_unpickled([1])) == [[1, 2, 3, 4]]\n\n @mock_s3\n def test_with_grid_search(self):\n import boto3\n client=boto3.client('s3')\n client.create_bucket(Bucket='fake-open-skills', ACL='public-read-write')\n s3 = S3Store('fake-open-skills')\n model_storage = ModelStorage(s3)\n\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import GridSearchCV\n\n gs = GridSearchCV(RandomForestClassifier(), {})\n proxy_gs = ProxyObjectWithStorage(model_obj=gs, storage=s3, model_name='rf.grid')\n\n X = np.random.rand(20, 2)\n y = np.random.randint(2, size=20)\n\n proxy_gs.fit(X, y)\n model_storage.save_model(proxy_gs, 'rf.grid')\n\n loaded_proxy_gs = model_storage.load_model('rf.grid')\n\n assert loaded_proxy_gs.storage.path == s3.path\n assert proxy_gs.predict([[5, 6]]) == gs.predict([[5, 6]])\n\n" ]
[ [ "numpy.random.rand", "sklearn.ensemble.RandomForestClassifier", "numpy.random.randint" ] ]
vineetjnair9/GridCal
[ "5b63cbae45cbe176b015e5e99164a593f450fe71" ]
[ "src/GridCal/Engine/Core/snapshot_opf_data.py" ]
[ "# This file is part of GridCal.\n#\n# GridCal is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# GridCal is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GridCal. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom typing import List\n\nfrom GridCal.Engine.basic_structures import Logger\nimport GridCal.Engine.Core.topology as tp\nfrom GridCal.Engine.Core.multi_circuit import MultiCircuit\nfrom GridCal.Engine.basic_structures import BranchImpedanceMode\nfrom GridCal.Engine.basic_structures import BusMode\nfrom GridCal.Engine.Core.common_functions import compile_types\n\n\nclass OpfSnapshotCircuit:\n\n def __init__(self, nbus, nline, ntr, nvsc, nhvdc, nload, ngen, nbatt, nshunt, nstagen, sbase,\n apply_temperature=False, branch_tolerance_mode: BranchImpedanceMode = BranchImpedanceMode.Specified):\n \"\"\"\n\n :param nbus: number of buses\n :param nline: number of lines\n :param ntr: number of transformers\n :param nvsc:\n :param nhvdc:\n :param nload:\n :param ngen:\n :param nbatt:\n :param nshunt:\n \"\"\"\n\n self.nbus = nbus\n self.nline = nline\n self.ntr = ntr\n self.nvsc = nvsc\n self.nhvdc = nhvdc\n self.nload = nload\n self.ngen = ngen\n self.nbatt = nbatt\n self.nshunt = nshunt\n self.nstagen = nstagen\n\n self.Sbase = sbase\n\n self.apply_temperature = apply_temperature\n self.branch_tolerance_mode = branch_tolerance_mode\n\n # bus ----------------------------------------------------------------------------------------------------------\n self.bus_names = np.empty(nbus, dtype=object)\n self.bus_active = np.ones(nbus, dtype=int)\n self.Vbus = np.ones(nbus, dtype=complex)\n self.bus_types = np.empty(nbus, dtype=int)\n self.bus_installed_power = np.zeros(nbus, dtype=float)\n\n # branch common ------------------------------------------------------------------------------------------------\n self.nbr = nline + ntr + nvsc # exclude the HVDC model since it is not a real branch\n\n self.branch_names = np.empty(self.nbr, dtype=object)\n self.branch_active = np.zeros(self.nbr, dtype=int)\n self.F = np.zeros(self.nbr, dtype=int) # indices of the \"from\" buses\n self.T = np.zeros(self.nbr, dtype=int) # indices of the \"to\" buses\n self.branch_rates = np.zeros(self.nbr, dtype=float)\n self.branch_cost = np.zeros(self.nbr, dtype=float)\n self.branch_R = np.zeros(self.nbr, dtype=float)\n self.branch_X = np.zeros(self.nbr, dtype=float)\n self.C_branch_bus_f = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their \"from\" bus\n self.C_branch_bus_t = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their \"to\" bus\n\n # lines --------------------------------------------------------------------------------------------------------\n self.line_names = np.zeros(nline, dtype=object)\n self.line_R = np.zeros(nline, dtype=float)\n self.line_X = np.zeros(nline, dtype=float)\n self.line_B = np.zeros(nline, dtype=float)\n self.line_temp_base = np.zeros(nline, dtype=float)\n self.line_temp_oper = np.zeros(nline, dtype=float)\n self.line_alpha = np.zeros(nline, dtype=float)\n self.line_impedance_tolerance = np.zeros(nline, dtype=float)\n\n self.C_line_bus = sp.lil_matrix((nline, nbus), dtype=int) # this ons is just for splitting islands\n\n # transformer 2W + 3W ------------------------------------------------------------------------------------------\n self.tr_names = np.zeros(ntr, dtype=object)\n self.tr_R = np.zeros(ntr, dtype=float)\n self.tr_X = np.zeros(ntr, dtype=float)\n self.tr_G = np.zeros(ntr, dtype=float)\n self.tr_B = np.zeros(ntr)\n\n self.tr_tap_f = np.ones(ntr) # tap generated by the difference in nominal voltage at the form side\n self.tr_tap_t = np.ones(ntr) # tap generated by the difference in nominal voltage at the to side\n self.tr_tap_mod = np.ones(ntr) # normal tap module\n self.tr_tap_ang = np.zeros(ntr) # normal tap angle\n\n self.C_tr_bus = sp.lil_matrix((ntr, nbus), dtype=int) # this ons is just for splitting islands\n\n # hvdc line ----------------------------------------------------------------------------------------------------\n self.hvdc_names = np.zeros(nhvdc, dtype=object)\n self.hvdc_active = np.zeros(nhvdc, dtype=bool)\n self.hvdc_rate = np.zeros(nhvdc, dtype=float)\n\n self.hvdc_Pf = np.zeros(nhvdc)\n self.hvdc_Pt = np.zeros(nhvdc)\n\n self.C_hvdc_bus_f = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands\n self.C_hvdc_bus_t = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands\n\n # vsc converter ------------------------------------------------------------------------------------------------\n self.vsc_names = np.zeros(nvsc, dtype=object)\n self.vsc_R1 = np.zeros(nvsc)\n self.vsc_X1 = np.zeros(nvsc)\n self.vsc_Gsw = np.zeros(nvsc)\n self.vsc_Beq = np.zeros(nvsc)\n self.vsc_m = np.zeros(nvsc)\n self.vsc_theta = np.zeros(nvsc)\n\n self.C_vsc_bus = sp.lil_matrix((nvsc, nbus), dtype=int) # this ons is just for splitting islands\n\n # load ---------------------------------------------------------------------------------------------------------\n self.load_names = np.empty(nload, dtype=object)\n self.load_active = np.zeros(nload, dtype=bool)\n self.load_s = np.zeros(nload, dtype=complex)\n self.load_cost = np.zeros(nload)\n\n self.C_bus_load = sp.lil_matrix((nbus, nload), dtype=int)\n\n # static generators --------------------------------------------------------------------------------------------\n self.static_generator_names = np.empty(nstagen, dtype=object)\n self.static_generator_active = np.zeros(nstagen, dtype=bool)\n self.static_generator_s = np.zeros(nstagen, dtype=complex)\n\n self.C_bus_static_generator = sp.lil_matrix((nbus, nstagen), dtype=int)\n\n # battery ------------------------------------------------------------------------------------------------------\n self.battery_names = np.empty(nbatt, dtype=object)\n self.battery_active = np.zeros(nbatt, dtype=bool)\n self.battery_controllable = np.zeros(nbatt, dtype=bool)\n self.battery_dispatchable = np.zeros(nbatt, dtype=bool)\n self.battery_installed_p = np.zeros(nbatt)\n self.battery_p = np.zeros(nbatt)\n self.battery_pf = np.zeros(nbatt)\n self.battery_v = np.zeros(nbatt)\n self.battery_pmin = np.zeros(nbatt)\n self.battery_pmax = np.zeros(nbatt)\n self.battery_cost = np.zeros(nbatt)\n\n self.C_bus_batt = sp.lil_matrix((nbus, nbatt), dtype=int)\n\n # generator ----------------------------------------------------------------------------------------------------\n self.generator_names = np.empty(ngen, dtype=object)\n self.generator_active = np.zeros(ngen, dtype=bool)\n self.generator_controllable = np.zeros(ngen, dtype=bool)\n self.generator_dispatchable = np.zeros(ngen, dtype=bool)\n self.generator_installed_p = np.zeros(ngen)\n\n self.generator_p = np.zeros(ngen)\n self.generator_pf = np.zeros(ngen)\n self.generator_v = np.zeros(ngen)\n self.generator_pmin = np.zeros(ngen)\n self.generator_pmax = np.zeros(ngen)\n self.generator_cost = np.zeros(ngen)\n\n self.C_bus_gen = sp.lil_matrix((nbus, ngen), dtype=int)\n\n # shunt --------------------------------------------------------------------------------------------------------\n self.shunt_names = np.empty(nshunt, dtype=object)\n self.shunt_active = np.zeros(nshunt, dtype=bool)\n self.shunt_admittance = np.zeros(nshunt, dtype=complex)\n\n self.C_bus_shunt = sp.lil_matrix((nbus, nshunt), dtype=int)\n\n # --------------------------------------------------------------------------------------------------------------\n\n self.Sbus = np.zeros(self.nbus, dtype=complex)\n self.Ibus = np.zeros(self.nbus, dtype=complex)\n self.Yshunt_from_devices = np.zeros(self.nbus, dtype=complex)\n\n self.Ybus = None\n self.Yf = None\n self.Yt = None\n self.Yseries = None\n self.Yshunt = None\n\n self.original_bus_idx = np.arange(self.nbus)\n self.original_branch_idx = np.arange(self.nbr)\n self.original_tr_idx = np.arange(self.ntr)\n self.original_gen_idx = np.arange(self.ngen)\n self.original_bat_idx = np.arange(self.nbatt)\n\n self.pq = list()\n self.pv = list()\n self.vd = list()\n self.pqpv = list()\n\n def consolidate(self):\n \"\"\"\n Consolidates the information of this object\n :return:\n \"\"\"\n self.C_branch_bus_f = self.C_branch_bus_f.tocsc()\n self.C_branch_bus_t = self.C_branch_bus_t.tocsc()\n\n self.C_line_bus = self.C_line_bus.tocsc()\n self.C_tr_bus = self.C_tr_bus.tocsc()\n self.C_hvdc_bus_f = self.C_hvdc_bus_f.tocsc()\n self.C_hvdc_bus_t = self.C_hvdc_bus_t.tocsc()\n self.C_vsc_bus = self.C_vsc_bus.tocsc()\n\n self.C_bus_load = self.C_bus_load.tocsr()\n self.C_bus_batt = self.C_bus_batt.tocsr()\n self.C_bus_gen = self.C_bus_gen.tocsr()\n self.C_bus_shunt = self.C_bus_shunt.tocsr()\n self.C_bus_static_generator = self.C_bus_static_generator.tocsr()\n\n self.bus_installed_power = self.C_bus_gen * self.generator_installed_p\n self.bus_installed_power += self.C_bus_batt * self.battery_installed_p\n\n def R_corrected(self):\n \"\"\"\n Returns temperature corrected resistances (numpy array) based on a formula\n provided by: NFPA 70-2005, National Electrical Code, Table 8, footnote #2; and\n https://en.wikipedia.org/wiki/Electrical_resistivity_and_conductivity#Linear_approximation\n (version of 2019-01-03 at 15:20 EST).\n \"\"\"\n return self.line_R * (1.0 + self.line_alpha * (self.line_temp_oper - self.line_temp_base))\n\n def compute_admittance_matrices(self):\n \"\"\"\n Compute the admittance matrices\n :return: Ybus, Yseries, Yshunt\n \"\"\"\n # form the connectivity matrices with the states applied -------------------------------------------------------\n br_states_diag = sp.diags(self.branch_active)\n Cf = br_states_diag * self.C_branch_bus_f\n Ct = br_states_diag * self.C_branch_bus_t\n\n # Declare the empty primitives ---------------------------------------------------------------------------------\n\n # The composition order is and will be: Pi model, HVDC, VSC\n Ytt = np.empty(self.nbr, dtype=complex)\n Yff = np.empty(self.nbr, dtype=complex)\n Yft = np.empty(self.nbr, dtype=complex)\n Ytf = np.empty(self.nbr, dtype=complex)\n\n # Branch primitives in vector form, for Yseries\n Ytts = np.empty(self.nbr, dtype=complex)\n Yffs = np.empty(self.nbr, dtype=complex)\n Yfts = np.empty(self.nbr, dtype=complex)\n Ytfs = np.empty(self.nbr, dtype=complex)\n\n ysh_br = np.empty(self.nbr, dtype=complex)\n\n # line ---------------------------------------------------------------------------------------------------------\n a = 0\n b = self.nline\n\n # use the specified of the temperature-corrected resistance\n if self.apply_temperature:\n line_R = self.R_corrected()\n else:\n line_R = self.line_R\n\n # modify the branches impedance with the lower, upper tolerance values\n if self.branch_tolerance_mode == BranchImpedanceMode.Lower:\n line_R *= (1 - self.line_impedance_tolerance / 100.0)\n elif self.branch_tolerance_mode == BranchImpedanceMode.Upper:\n line_R *= (1 + self.line_impedance_tolerance / 100.0)\n\n Ys_line = 1.0 / (line_R + 1.0j * self.line_X)\n Ysh_line = 1.0j * self.line_B\n Ys_line2 = Ys_line + Ysh_line / 2.0\n\n # branch primitives in vector form for Ybus\n Ytt[a:b] = Ys_line2\n Yff[a:b] = Ys_line2\n Yft[a:b] = - Ys_line\n Ytf[a:b] = - Ys_line\n\n # branch primitives in vector form, for Yseries\n Ytts[a:b] = Ys_line\n Yffs[a:b] = Ys_line\n Yfts[a:b] = - Ys_line\n Ytfs[a:b] = - Ys_line\n ysh_br[a:b] = Ysh_line / 2.0\n\n # transformer models -------------------------------------------------------------------------------------------\n\n a = self.nline\n b = a + self.ntr\n\n Ys_tr = 1.0 / (self.tr_R + 1.0j * self.tr_X)\n Ysh_tr = 1.0j * self.tr_B\n Ys_tr2 = Ys_tr + Ysh_tr / 2.0\n tap = self.tr_tap_mod * np.exp(1.0j * self.tr_tap_ang)\n\n # branch primitives in vector form for Ybus\n Ytt[a:b] = Ys_tr2 / (self.tr_tap_t * self.tr_tap_t)\n Yff[a:b] = Ys_tr2 / (self.tr_tap_f * self.tr_tap_f * tap * np.conj(tap))\n Yft[a:b] = - Ys_tr / (self.tr_tap_f * self.tr_tap_t * np.conj(tap))\n Ytf[a:b] = - Ys_tr / (self.tr_tap_t * self.tr_tap_f * tap)\n\n # branch primitives in vector form, for Yseries\n Ytts[a:b] = Ys_tr\n Yffs[a:b] = Ys_tr / (tap * np.conj(tap))\n Yfts[a:b] = - Ys_tr / np.conj(tap)\n Ytfs[a:b] = - Ys_tr / tap\n ysh_br[a:b] = Ysh_tr / 2.0\n\n # VSC MODEL ----------------------------------------------------------------------------------------------------\n a = self.nline + self.ntr\n b = a + self.nvsc\n\n Y_vsc = 1.0 / (self.vsc_R1 + 1.0j * self.vsc_X1) # Y1\n Yff[a:b] = Y_vsc\n Yft[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc\n Ytf[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc\n Ytt[a:b] = self.vsc_Gsw + self.vsc_m * self.vsc_m * (Y_vsc + 1.0j * self.vsc_Beq)\n\n Yffs[a:b] = Y_vsc\n Yfts[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc\n Ytfs[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc\n Ytts[a:b] = self.vsc_m * self.vsc_m * (Y_vsc + 1.0j)\n\n # HVDC LINE MODEL ----------------------------------------------------------------------------------------------\n # does not apply since the HVDC-line model is the simplistic 2-generator model\n\n # SHUNT --------------------------------------------------------------------------------------------------------\n self.Yshunt_from_devices = self.C_bus_shunt * (self.shunt_admittance * self.shunt_active / self.Sbase)\n\n # form the admittance matrices ---------------------------------------------------------------------------------\n self.Yf = sp.diags(Yff) * Cf + sp.diags(Yft) * Ct\n self.Yt = sp.diags(Ytf) * Cf + sp.diags(Ytt) * Ct\n self.Ybus = sp.csc_matrix(Cf.T * self.Yf + Ct.T * self.Yt) + sp.diags(self.Yshunt_from_devices)\n\n # form the admittance matrices of the series and shunt elements ------------------------------------------------\n Yfs = sp.diags(Yffs) * Cf + sp.diags(Yfts) * Ct\n Yts = sp.diags(Ytfs) * Cf + sp.diags(Ytts) * Ct\n self.Yseries = sp.csc_matrix(Cf.T * Yfs + Ct.T * Yts)\n\n self.Yshunt = Cf.T * ysh_br + Ct.T * ysh_br + self.Yshunt_from_devices\n\n def get_generator_injections(self):\n \"\"\"\n Compute the active and reactive power of non-controlled generators (assuming all)\n :return:\n \"\"\"\n pf2 = np.power(self.generator_pf, 2.0)\n pf_sign = (self.generator_pf + 1e-20) / np.abs(self.generator_pf + 1e-20)\n Q = pf_sign * self.generator_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))\n return self.generator_p + 1.0j * Q\n\n def get_battery_injections(self):\n \"\"\"\n Compute the active and reactive power of non-controlled batteries (assuming all)\n :return:\n \"\"\"\n pf2 = np.power(self.battery_pf, 2.0)\n pf_sign = (self.battery_pf + 1e-20) / np.abs(self.battery_pf + 1e-20)\n Q = pf_sign * self.battery_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))\n return self.battery_p + 1.0j * Q\n\n def compute_injections(self):\n \"\"\"\n Compute the power\n :return: nothing, the results are stored in the class\n \"\"\"\n self.Sbus = - self.C_bus_load * (self.load_s * self.load_active) # MW\n\n # generators\n self.Sbus += self.C_bus_gen * (self.get_generator_injections() * self.generator_active)\n\n # battery\n self.Sbus += self.C_bus_batt * (self.get_battery_injections() * self.battery_active)\n\n # HVDC forced power\n if self.nhvdc:\n self.Sbus += (self.hvdc_active * self.hvdc_Pf) * self.C_hvdc_bus_f\n self.Sbus += (self.hvdc_active * self.hvdc_Pt) * self.C_hvdc_bus_t\n\n self.Sbus /= self.Sbase\n\n def consolidate(self):\n \"\"\"\n Computes the parameters given the filled-in information\n :return:\n \"\"\"\n self.compute_injections()\n\n self.vd, self.pq, self.pv, self.pqpv = compile_types(Sbus=self.Sbus, types=self.bus_types)\n\n self.compute_admittance_matrices()\n\n\ndef get_opf_island(self, bus_idx) -> \"OpfSnapshotCircuit\":\n \"\"\"\n Get the island corresponding to the given buses\n :param bus_idx: array of bus indices\n :return: SnapshotCircuit\n \"\"\"\n\n # find the indices of the devices of the island\n line_idx = tp.get_elements_of_the_island(self.C_line_bus, bus_idx)\n tr_idx = tp.get_elements_of_the_island(self.C_tr_bus, bus_idx)\n vsc_idx = tp.get_elements_of_the_island(self.C_vsc_bus, bus_idx)\n hvdc_idx = tp.get_elements_of_the_island(self.C_hvdc_bus_f + self.C_hvdc_bus_t, bus_idx)\n br_idx = tp.get_elements_of_the_island(self.C_branch_bus_f + self.C_branch_bus_t, bus_idx)\n\n load_idx = tp.get_elements_of_the_island(self.C_bus_load.T, bus_idx)\n stagen_idx = tp.get_elements_of_the_island(self.C_bus_static_generator.T, bus_idx)\n gen_idx = tp.get_elements_of_the_island(self.C_bus_gen.T, bus_idx)\n batt_idx = tp.get_elements_of_the_island(self.C_bus_batt.T, bus_idx)\n shunt_idx = tp.get_elements_of_the_island(self.C_bus_shunt.T, bus_idx)\n\n nc = OpfSnapshotCircuit(nbus=len(bus_idx),\n nline=len(line_idx),\n ntr=len(tr_idx),\n nvsc=len(vsc_idx),\n nhvdc=len(hvdc_idx),\n nload=len(load_idx),\n ngen=len(gen_idx),\n nbatt=len(batt_idx),\n nshunt=len(shunt_idx),\n nstagen=len(stagen_idx),\n sbase=self.Sbase,\n apply_temperature=self.apply_temperature,\n branch_tolerance_mode=self.branch_tolerance_mode)\n\n nc.original_bus_idx = bus_idx\n nc.original_branch_idx = br_idx\n\n nc.original_tr_idx = tr_idx\n nc.original_gen_idx = gen_idx\n nc.original_bat_idx = batt_idx\n\n # bus ----------------------------------------------------------------------------------------------------------\n nc.bus_names = self.bus_names[bus_idx]\n nc.bus_active = self.bus_active[bus_idx]\n nc.Vbus = self.Vbus[bus_idx]\n nc.bus_types = self.bus_types[bus_idx]\n\n # branch common ------------------------------------------------------------------------------------------------\n nc.branch_names = self.branch_names[br_idx]\n nc.branch_active = self.branch_active[br_idx]\n nc.F = self.F[br_idx]\n nc.T = self.T[br_idx]\n nc.branch_rates = self.branch_rates[br_idx]\n nc.branch_cost = self.branch_cost[br_idx]\n nc.branch_R = self.branch_R[br_idx]\n nc.branch_X = self.branch_X[br_idx]\n nc.C_branch_bus_f = self.C_branch_bus_f[np.ix_(br_idx, bus_idx)]\n nc.C_branch_bus_t = self.C_branch_bus_t[np.ix_(br_idx, bus_idx)]\n\n # lines --------------------------------------------------------------------------------------------------------\n nc.line_names = self.line_names[line_idx]\n nc.line_R = self.line_R[line_idx]\n nc.line_X = self.line_X[line_idx]\n nc.line_B = self.line_B[line_idx]\n nc.line_temp_base = self.line_temp_base[line_idx]\n nc.line_temp_oper = self.line_temp_oper[line_idx]\n nc.line_alpha = self.line_alpha[line_idx]\n nc.line_impedance_tolerance = self.line_impedance_tolerance[line_idx]\n\n nc.C_line_bus = self.C_line_bus[np.ix_(line_idx, bus_idx)]\n\n # transformer 2W + 3W ------------------------------------------------------------------------------------------\n nc.tr_names = self.tr_names[tr_idx]\n nc.tr_R = self.tr_R[tr_idx]\n nc.tr_X = self.tr_X[tr_idx]\n nc.tr_G = self.tr_G[tr_idx]\n nc.tr_B = self.tr_B[tr_idx]\n\n nc.tr_tap_f = self.tr_tap_f[tr_idx]\n nc.tr_tap_t = self.tr_tap_t[tr_idx]\n nc.tr_tap_mod = self.tr_tap_mod[tr_idx]\n nc.tr_tap_ang = self.tr_tap_ang[tr_idx]\n\n nc.C_tr_bus = self.C_tr_bus[np.ix_(tr_idx, bus_idx)]\n\n # hvdc line ----------------------------------------------------------------------------------------------------\n nc.hvdc_names = self.hvdc_names[hvdc_idx]\n nc.hvdc_active = self.hvdc_active[hvdc_idx]\n nc.hvdc_rate = self.hvdc_rate[hvdc_idx]\n\n nc.hvdc_Pf = self.hvdc_Pf[hvdc_idx]\n nc.hvdc_Pt = self.hvdc_Pt[hvdc_idx]\n\n nc.C_hvdc_bus_f = self.C_hvdc_bus_f[np.ix_(hvdc_idx, bus_idx)]\n nc.C_hvdc_bus_t = self.C_hvdc_bus_t[np.ix_(hvdc_idx, bus_idx)]\n\n # vsc converter ------------------------------------------------------------------------------------------------\n nc.vsc_names = self.vsc_names[vsc_idx]\n nc.vsc_R1 = self.vsc_R1[vsc_idx]\n nc.vsc_X1 = self.vsc_X1[vsc_idx]\n nc.vsc_Gsw = self.vsc_Gsw[vsc_idx]\n nc.vsc_Beq = self.vsc_Beq[vsc_idx]\n nc.vsc_m = self.vsc_m[vsc_idx]\n nc.vsc_theta = self.vsc_theta[vsc_idx]\n\n nc.C_vsc_bus = self.C_vsc_bus[np.ix_(vsc_idx, bus_idx)]\n\n # load ---------------------------------------------------------------------------------------------------------\n nc.load_names = self.load_names[load_idx]\n nc.load_active = self.load_active[load_idx]\n nc.load_s = self.load_s[load_idx]\n nc.load_cost = self.load_cost[load_idx]\n\n nc.C_bus_load = self.C_bus_load[np.ix_(bus_idx, load_idx)]\n\n # static generators --------------------------------------------------------------------------------------------\n nc.static_generator_names = self.static_generator_names[stagen_idx]\n nc.static_generator_active = self.static_generator_active[stagen_idx]\n nc.static_generator_s = self.static_generator_s[stagen_idx]\n\n nc.C_bus_static_generator = self.C_bus_static_generator[np.ix_(bus_idx, stagen_idx)]\n\n # battery ------------------------------------------------------------------------------------------------------\n nc.battery_names = self.battery_names[batt_idx]\n nc.battery_active = self.battery_active[batt_idx]\n nc.battery_controllable = self.battery_controllable[batt_idx]\n nc.battery_dispatchable = self.battery_dispatchable[batt_idx]\n nc.battery_cost = self.battery_cost[batt_idx]\n nc.battery_p = self.battery_p[batt_idx]\n nc.battery_pf = self.battery_pf[batt_idx]\n nc.battery_v = self.battery_v[batt_idx]\n nc.battery_pmin = self.battery_pmin[batt_idx]\n nc.battery_pmax = self.battery_pmax[batt_idx]\n\n nc.C_bus_batt = self.C_bus_batt[np.ix_(bus_idx, batt_idx)]\n\n # generator ----------------------------------------------------------------------------------------------------\n nc.generator_names = self.generator_names[gen_idx]\n nc.generator_active = self.generator_active[gen_idx]\n nc.generator_controllable = self.generator_controllable[gen_idx]\n nc.generator_dispatchable = self.generator_dispatchable[gen_idx]\n nc.generator_cost = self.generator_cost[gen_idx]\n nc.generator_p = self.generator_p[gen_idx]\n nc.generator_pf = self.generator_pf[gen_idx]\n nc.generator_v = self.generator_v[gen_idx]\n nc.generator_pmin = self.generator_pmin[gen_idx]\n nc.generator_pmax = self.generator_pmax[gen_idx]\n\n nc.C_bus_gen = self.C_bus_gen[np.ix_(bus_idx, gen_idx)]\n\n # shunt --------------------------------------------------------------------------------------------------------\n nc.shunt_names = self.shunt_names[shunt_idx]\n nc.shunt_active = self.shunt_active[shunt_idx]\n nc.shunt_admittance = self.shunt_admittance[shunt_idx]\n\n nc.C_bus_shunt = self.C_bus_shunt[np.ix_(bus_idx, shunt_idx)]\n\n return nc\n\n\ndef split_into_opf_islands(numeric_circuit: OpfSnapshotCircuit, ignore_single_node_islands=False) -> List[OpfSnapshotCircuit]:\n \"\"\"\n Split circuit into islands\n :param numeric_circuit: NumericCircuit instance\n :param ignore_single_node_islands: ignore islands composed of only one bus\n :return: List[NumericCircuit]\n \"\"\"\n\n # compute the adjacency matrix\n A = tp.get_adjacency_matrix(C_branch_bus_f=numeric_circuit.C_branch_bus_f,\n C_branch_bus_t=numeric_circuit.C_branch_bus_t,\n branch_active=numeric_circuit.branch_active,\n bus_active=numeric_circuit.bus_active)\n\n # find the matching islands\n idx_islands = tp.find_islands(A)\n\n if len(idx_islands) == 1:\n numeric_circuit.consolidate() # compute the internal magnitudes\n return [numeric_circuit]\n\n else:\n\n circuit_islands = list() # type: List[OpfSnapshotCircuit]\n\n for bus_idx in idx_islands:\n\n if ignore_single_node_islands:\n\n if len(bus_idx) > 1:\n island = get_opf_island(numeric_circuit, bus_idx)\n island.consolidate() # compute the internal magnitudes\n circuit_islands.append(island)\n\n else:\n island = get_opf_island(numeric_circuit, bus_idx)\n island.consolidate() # compute the internal magnitudes\n circuit_islands.append(island)\n\n return circuit_islands\n\n\ndef compile_snapshot_opf_circuit(circuit: MultiCircuit, apply_temperature=False,\n branch_tolerance_mode=BranchImpedanceMode.Specified) -> OpfSnapshotCircuit:\n \"\"\"\n Compile the information of a circuit and generate the pertinent power flow islands\n :param circuit: Circuit instance\n :param apply_temperature:\n :param branch_tolerance_mode:\n :return: list of NumericIslands\n \"\"\"\n\n logger = Logger()\n\n bus_dictionary = dict()\n\n # Element count\n nbus = len(circuit.buses)\n nload = 0\n ngen = 0\n n_batt = 0\n nshunt = 0\n nstagen = 0\n for bus in circuit.buses:\n nload += len(bus.loads)\n ngen += len(bus.controlled_generators)\n n_batt += len(bus.batteries)\n nshunt += len(bus.shunts)\n nstagen += len(bus.static_generators)\n\n nline = len(circuit.lines)\n ntr2w = len(circuit.transformers2w)\n nvsc = len(circuit.vsc_converters)\n nhvdc = len(circuit.hvdc_lines)\n\n # declare the numerical circuit\n nc = OpfSnapshotCircuit(nbus=nbus,\n nline=nline,\n ntr=ntr2w,\n nvsc=nvsc,\n nhvdc=nhvdc,\n nload=nload,\n ngen=ngen,\n nbatt=n_batt,\n nshunt=nshunt,\n nstagen=nstagen,\n sbase=circuit.Sbase,\n apply_temperature=apply_temperature,\n branch_tolerance_mode=branch_tolerance_mode\n )\n\n # buses and it's connected elements (loads, generators, etc...)\n i_ld = 0\n i_gen = 0\n i_batt = 0\n i_sh = 0\n i_stagen = 0\n for i, bus in enumerate(circuit.buses):\n\n # bus parameters\n nc.bus_names[i] = bus.name\n nc.bus_active[i] = bus.active\n nc.bus_types[i] = bus.determine_bus_type().value\n\n # Add buses dictionary entry\n bus_dictionary[bus] = i\n\n for elm in bus.loads:\n nc.load_names[i_ld] = elm.name\n nc.load_active[i_ld] = elm.active\n nc.load_s[i_ld] = complex(elm.P, elm.Q)\n nc.load_cost[i_ld] = elm.Cost\n nc.C_bus_load[i, i_ld] = 1\n i_ld += 1\n\n for elm in bus.static_generators:\n nc.static_generator_names[i_stagen] = elm.name\n nc.static_generator_active[i_stagen] = elm.active\n nc.static_generator_s[i_stagen] = complex(elm.P, elm.Q)\n nc.C_bus_static_generator[i, i_stagen] = 1\n i_stagen += 1\n\n for elm in bus.controlled_generators:\n nc.generator_names[i_gen] = elm.name\n nc.generator_cost[i_gen] = elm.Cost\n nc.generator_pf[i_gen] = elm.Pf\n nc.generator_v[i_gen] = elm.Vset\n nc.generator_pmin[i_gen] = elm.Pmin\n nc.generator_pmax[i_gen] = elm.Pmax\n nc.generator_active[i_gen] = elm.active\n nc.generator_controllable[i_gen] = elm.is_controlled\n nc.generator_p[i_gen] = elm.P\n nc.generator_installed_p[i_gen] = elm.Snom\n\n nc.C_bus_gen[i, i_gen] = 1\n\n if nc.Vbus[i].real == 1.0:\n nc.Vbus[i] = complex(elm.Vset, 0)\n elif elm.Vset != nc.Vbus[i]:\n logger.append('Different set points at ' + bus.name + ': ' + str(elm.Vset) + ' !=' + str(nc.Vbus[i]))\n i_gen += 1\n\n for elm in bus.batteries:\n nc.battery_names[i_batt] = elm.name\n nc.battery_cost[i_batt] = elm.Cost\n nc.battery_p[i_batt] = elm.P\n nc.battery_pf[i_batt] = elm.Pf\n nc.battery_v[i_batt] = elm.Vset\n nc.battery_pmin[i_batt] = elm.Pmin\n nc.battery_pmax[i_batt] = elm.Pmax\n nc.battery_active[i_batt] = elm.active\n nc.battery_controllable[i_batt] = elm.is_controlled\n nc.battery_installed_p[i_batt] = elm.Snom\n\n nc.C_bus_batt[i, i_batt] = 1\n\n if nc.Vbus[i].real == 1.0:\n nc.Vbus[i] = complex(elm.Vset, 0)\n elif elm.Vset != nc.Vbus[i]:\n logger.append('Different set points at ' + bus.name + ': ' + str(elm.Vset) + ' !=' + str(nc.Vbus[i]))\n\n i_batt += 1\n\n for elm in bus.shunts:\n nc.shunt_names[i_sh] = elm.name\n nc.shunt_active[i_sh] = elm.active\n nc.shunt_admittance[i_sh] = complex(elm.G, elm.B)\n\n nc.C_bus_shunt[i, i_sh] = 1\n i_sh += 1\n\n # Compile the lines\n for i, elm in enumerate(circuit.lines):\n # generic stuff\n nc.branch_names[i] = elm.name\n nc.branch_active[i] = elm.active\n nc.branch_rates[i] = elm.rate\n\n nc.branch_R[i] = elm.R\n nc.branch_X[i] = elm.X\n nc.branch_cost[i] = elm.Cost\n\n f = bus_dictionary[elm.bus_from]\n t = bus_dictionary[elm.bus_to]\n nc.C_branch_bus_f[i, f] = 1\n nc.C_branch_bus_t[i, t] = 1\n nc.F[i] = f\n nc.T[i] = t\n\n # impedance\n nc.line_names[i] = elm.name\n nc.line_R[i] = elm.R\n nc.line_X[i] = elm.X\n nc.line_B[i] = elm.B\n nc.C_line_bus[i, f] = 1\n nc.C_line_bus[i, t] = 1\n\n # 2-winding transformers\n for i, elm in enumerate(circuit.transformers2w):\n ii = i + nline\n\n # generic stuff\n f = bus_dictionary[elm.bus_from]\n t = bus_dictionary[elm.bus_to]\n\n nc.branch_R[i] = elm.R\n nc.branch_X[i] = elm.X\n nc.branch_cost[i] = elm.Cost\n\n nc.branch_names[ii] = elm.name\n nc.branch_active[ii] = elm.active\n nc.branch_rates[ii] = elm.rate\n nc.C_branch_bus_f[ii, f] = 1\n nc.C_branch_bus_t[ii, t] = 1\n nc.F[ii] = f\n nc.T[ii] = t\n\n # impedance\n nc.tr_names[i] = elm.name\n nc.tr_R[i] = elm.R\n nc.tr_X[i] = elm.X\n nc.tr_G[i] = elm.G\n nc.tr_B[i] = elm.B\n\n nc.C_tr_bus[i, f] = 1\n nc.C_tr_bus[i, t] = 1\n\n # tap changer\n nc.tr_tap_mod[i] = elm.tap_module\n nc.tr_tap_ang[i] = elm.angle\n\n # virtual taps for transformers where the connection voltage is off\n nc.tr_tap_f[i], nc.tr_tap_t[i] = elm.get_virtual_taps()\n\n # VSC\n for i, elm in enumerate(circuit.vsc_converters):\n ii = i + nline + ntr2w\n\n # generic stuff\n f = bus_dictionary[elm.bus_from]\n t = bus_dictionary[elm.bus_to]\n\n nc.branch_R[i] = elm.R1\n nc.branch_X[i] = elm.X1\n nc.branch_cost[i] = elm.Cost\n\n nc.branch_names[ii] = elm.name\n nc.branch_active[ii] = elm.active\n nc.branch_rates[ii] = elm.rate\n nc.C_branch_bus_f[ii, f] = 1\n nc.C_branch_bus_t[ii, t] = 1\n nc.F[ii] = f\n nc.T[ii] = t\n\n # vsc values\n nc.vsc_names[i] = elm.name\n nc.vsc_R1[i] = elm.R1\n nc.vsc_X1[i] = elm.X1\n nc.vsc_Gsw[i] = elm.Gsw\n nc.vsc_Beq[i] = elm.Beq\n nc.vsc_m[i] = elm.m\n nc.vsc_theta[i] = elm.theta\n\n nc.C_vsc_bus[i, f] = 1\n nc.C_vsc_bus[i, t] = 1\n\n # HVDC\n for i, elm in enumerate(circuit.hvdc_lines):\n ii = i + nline + ntr2w + nvsc\n\n # generic stuff\n f = bus_dictionary[elm.bus_from]\n t = bus_dictionary[elm.bus_to]\n\n # hvdc values\n nc.hvdc_names[i] = elm.name\n nc.hvdc_active[i] = elm.active\n nc.hvdc_rate[i] = elm.rate\n\n nc.hvdc_Pf[i], nc.hvdc_Pt[i] = elm.get_from_and_to_power()\n\n # hack the bus types to believe they are PV\n nc.bus_types[f] = BusMode.PV.value\n nc.bus_types[t] = BusMode.PV.value\n\n # the the bus-hvdc line connectivity\n nc.C_hvdc_bus_f[i, f] = 1\n nc.C_hvdc_bus_t[i, t] = 1\n\n # consolidate the information\n nc.consolidate()\n\n return nc\n\n" ]
[ [ "scipy.sparse.csc_matrix", "numpy.ix_", "numpy.conj", "numpy.abs", "numpy.power", "numpy.sqrt", "numpy.arange", "scipy.sparse.diags", "numpy.ones", "numpy.exp", "numpy.zeros", "numpy.empty", "scipy.sparse.lil_matrix" ] ]
lucaspbastos/soundata
[ "8b60852debea7cdc49e2c6853b033a29e503b67c" ]
[ "soundata/datasets/fsdnoisy18k.py" ]
[ "\"\"\"FSDnoisy18K Dataset Loader\n\n.. admonition:: Dataset Info\n :class: dropdown\n\n *Created By:*\n Eduardo Fonseca, Mercedes Collado, Manoj Plakal, Daniel P. W. Ellis, Frederic Font, Xavier Favory, Xavier Serra.\n Music Technology Group, Universitat Pompeu Fabra (Barcelona). Version 1.0\n\n *Description:*\n FSDnoisy18k is an audio dataset collected with the aim of fostering the investigation of label noise in sound\n event classification. It contains 42.5 hours of audio across 20 sound classes, including a small amount of\n manually-labeled data and a larger quantity of real-world noisy data.\n\n What follows is a summary of the most basic aspects of FSDnoisy18k. For a complete description of FSDnoisy18k,\n make sure to check:\n\n * The FSDnoisy18k companion site: http://www.eduardofonseca.net/FSDnoisy18k/\n * The description provided in Section 2 of our ICASSP 2019 paper\n\n FSDnoisy18k is an audio dataset collected with the aim of fostering the investigation of label noise in sound\n event classification. It contains 42.5 hours of audio across 20 sound classes, including a small amount of\n manually-labeled data and a larger quantity of real-world noisy data.\n\n The source of audio content is Freesound—a sound sharing site created an maintained by the Music Technology Group\n hosting over 400,000 clips uploaded by its community of users, who additionally provide some basic metadata\n (e.g., tags, and title). The 20 classes of FSDnoisy18k are drawn from the AudioSet Ontology and are selected based\n on data availability as well as on their suitability to allow the study of label noise.\n The 20 classes are: \"Acoustic guitar\", \"Bass guitar\", \"Clapping\", \"Coin (dropping)\", \"Crash cymbal\", \"Dishes,\n pots, and pans\", \"Engine\", \"Fart\", \"Fire\", \"Fireworks\", \"Glass\", \"Hi-hat\", \"Piano\", \"Rain\", \"Slam\", \"Squeak\",\n \"Tearing\", \"Walk, footsteps\", \"Wind\", and \"Writing\". FSDnoisy18k was created with the Freesound Annotator,\n which is a platform for the collaborative creation of open audio datasets.\n\n We defined a clean portion of the dataset consisting of correct and complete labels. The remaining portion is\n referred to as the noisy portion. Each clip in the dataset has a single ground truth label (singly-labeled data).\n\n The clean portion of the data consists of audio clips whose labels are rated as present in the clip and\n predominant (almost all with full inter-annotator agreement), meaning that the label is correct and, in most\n cases, there is no additional acoustic material other than the labeled class. A few clips may contain some\n additional sound events, but they occur in the background and do not belong to any of the 20 target classes.\n This is more common for some classes that rarely occur alone, e.g., “Fire”, “Glass”, “Wind” or “Walk, footsteps”.\n\n The noisy portion of the data consists of audio clips that received no human validation. In this case, they are\n categorized on the basis of the user-provided tags in Freesound. Hence, the noisy portion features a certain\n amount of label noise.\n \n *Included files and statistics:*\n * FSDnoisy18k contains 18,532 audio clips (42.5h) unequally distributed in the 20 aforementioned classes drawn from the AudioSet Ontology.\n * The audio clips are provided as uncompressed PCM 16 bit, 44.1 kHz, mono audio files.\n * The audio clips are of variable length ranging from 300ms to 30s, and each clip has a single ground truth label (singly-labeled data).\n * The dataset is split into a test set and a train set. The test set is drawn entirely from the clean portion, while the remainder of data forms the train set.\n * The train set is composed of 17,585 clips (41.1h) unequally distributed among the 20 classes. It features a clean subset and a noisy subset. In terms of number of clips their proportion is 10%/90%, whereas in terms of duration the proportion is slightly more extreme (6%/94%). The per-class percentage of clean data within the train set is also imbalanced, ranging from 6.1% to 22.4%. The number of audio clips per class ranges from 51 to 170, and from 250 to 1000 in the clean and noisy subsets, respectively. Further, a noisy small subset is defined, which includes an amount of (noisy) data comparable (in terms of duration) to that of the clean subset.\n * The test set is composed of 947 clips (1.4h) that belong to the clean portion of the data. Its class distribution is similar to that of the clean subset of the train set. The number of per-class audio clips in the test set ranges from 30 to 72. The test set enables a multi-class classification problem.\n * FSDnoisy18k is an expandable dataset that features a per-class varying degree of types and amount of label noise. The dataset allows investigation of label noise as well as other approaches, from semi-supervised learning, e.g., self-training to learning with minimal supervision.\n\n\n *Additional code:*\n We've released the code for our ICASSP 2019 paper at https://github.com/edufonseca/icassp19. The framework\n comprises all the basic stages: feature extraction, training, inference and evaluation. After loading the\n FSDnoisy18k dataset, log-mel energies are computed and a CNN baseline is trained and evaluated. The code also\n allows to test four noise-robust loss functions. Please check our paper for more details.\n\n *Label noise characteristics:*\n FSDnoisy18k features real label noise that is representative of audio data retrieved from the web,\n particularly from Freesound. The analysis of a per-class, random, 15% of the noisy portion of FSDnoisy18k\n revealed that roughly 40% of the analyzed labels are correct and complete, whereas 60% of the labels show\n some type of label noise. Please check the FSDnoisy18k companion site for a detailed characterization of\n the label noise in the dataset, including a taxonomy of label noise for singly-labeled data as well as a\n per-class description of the label noise.\n \n *Relevant links:*\n * Source code for our preprint: https://github.com/edufonseca/icassp19\n * Freesound Annotator: https://annotator.freesound.org/\n * Freesound: https://freesound.org\n * Eduardo Fonseca’s personal website: http://www.eduardofonseca.net/\n\n\n *Please Acknowledge FSDnoisy18K in Academic Research:*\n If you use the FSDnoisy18K Dataset please cite the following paper:\n\n * Eduardo Fonseca, Manoj Plakal, Daniel P. W. Ellis, Frederic Font, Xavier Favory, and Xavier Serra, “Learning Sound Event Classifiers from Web Audio with Noisy Labels”, arXiv preprint arXiv:1901.01189, 2019\n \n This work is partially supported by the European Union’s Horizon 2020 research and innovation programme\n under grant agreement No 688382 AudioCommons. Eduardo Fonseca is also sponsored by a Google Faculty Research\n Award 2017. We thank everyone who contributed to FSDnoisy18k with annotations.\n\n\n *License:*\n FSDnoisy18k has licenses at two different levels, as explained next. All sounds in Freesound are released\n under Creative Commons (CC) licenses, and each audio clip has its own license as defined by the audio clip\n uploader in Freesound. In particular, all Freesound clips included in FSDnoisy18k are released under either\n CC-BY or CC0. For attribution purposes and to facilitate attribution of these files to third parties, we\n include a relation of audio clips and their corresponding license in the LICENSE-INDIVIDUAL-CLIPS file\n downloaded with the dataset.\n\n In addition, FSDnoisy18k as a whole is the result of a curation process and it has an additional license.\n FSDnoisy18k is released under CC-BY. This license is specified in the LICENSE-DATASET file downloaded with\n the dataset.\n\n\n *Feedback:*\n For further questions, please contact eduardo.fonseca@upf.edu, or join the freesound-annotator Google Group.\n\n\"\"\"\n\nimport os\nfrom typing import BinaryIO, Optional, Tuple\n\nimport librosa\nimport csv\nimport numpy as np\n\nfrom soundata import download_utils, jams_utils, core, annotations, io\n\n\nBIBTEX = \"\"\"\n@misc{fonseca2019learning,\n title={Learning Sound Event Classifiers from Web Audio with Noisy Labels},\n author={Eduardo Fonseca and Manoj Plakal and Daniel P. W. Ellis and Frederic Font and Xavier Favory and Xavier Serra},\n year={2019},\n eprint={1901.01189},\n archivePrefix={arXiv},\n primaryClass={cs.SD}\n}\n\"\"\"\nREMOTES = {\n \"audio_train\": download_utils.RemoteFileMetadata(\n filename=\"FSDnoisy18k.audio_train.zip\",\n url=\"https://zenodo.org/record/2529934/files/FSDnoisy18k.audio_train.zip?download=1\",\n checksum=\"34dc1d34ca44622af5bf439ceb6f0d55\",\n ),\n \"audio_test\": download_utils.RemoteFileMetadata(\n filename=\"FSDnoisy18k.audio_test.zip\",\n url=\"https://zenodo.org/record/2529934/files/FSDnoisy18k.audio_test.zip?download=1\",\n checksum=\"1ac73d70b4ef3f81900d98c261a832de\",\n ),\n \"docs\": download_utils.RemoteFileMetadata(\n filename=\"FSDnoisy18k.doc.zip\",\n url=\"https://zenodo.org/record/2529934/files/FSDnoisy18k.doc.zip?download=1\",\n checksum=\"093a1ca185ec341ca4eac14215e7f96b\",\n ),\n \"metadata\": download_utils.RemoteFileMetadata(\n filename=\"FSDnoisy18k.meta.zip\",\n url=\"https://zenodo.org/record/2529934/files/FSDnoisy18k.meta.zip?download=1\",\n checksum=\"96e27a4a63b7a2870522ddcedb5d8296\",\n ),\n}\n\nLICENSE_INFO = \"\"\"\nPlease note that FSDnoisy18k has licenses at two different levels. All sounds in Freesound are released\nunder Creative Commons (CC) licenses, and each audio clip has its own license as defined by the audio clip\nuploader in Freesound. In particular, all Freesound clips included in FSDnoisy18k are released under either\nCC-BY or CC0. For attribution purposes and to facilitate attribution of these files to third parties, we\ninclude a relation of audio clips and their corresponding license in the LICENSE-INDIVIDUAL-CLIPS file\ndownloaded with the dataset.\n\"\"\"\n\n\nclass Clip(core.Clip):\n \"\"\"FSDnoisy18K Clip class\n\n Args:\n clip_id (str): id of the clip\n\n Attributes:\n audio (np.ndarray, float): path to the audio file\n aso_id (str): the id of the corresponding category as per the AudioSet Ontology\n audio_path (str): path to the audio file\n clip_id (str): clip id\n manually_verified (int): flag to indicate whether the clip belongs to the clean portion (1), or to the noisy portion (0) of the train set\n noisy_small (int): flag to indicate whether the clip belongs to the noisy_small portion (1) of the train set\n split (str): flag to indicate whether the clip belongs the train or test split\n tag (soundata.annotations.Tags): tag (label) of the clip + confidence\n \"\"\"\n\n def __init__(self, clip_id, data_home, dataset_name, index, metadata):\n super().__init__(clip_id, data_home, dataset_name, index, metadata)\n\n self.audio_path = self.get_path(\"audio\")\n\n @property\n def audio(self) -> Optional[Tuple[np.ndarray, float]]:\n \"\"\"The clip's audio\n\n Returns:\n * np.ndarray - audio signal\n * float - sample rate\n\n \"\"\"\n return load_audio(self.audio_path)\n\n @property\n def tags(self):\n \"\"\"The clip's tags.\n\n Returns:\n * annotations.Tags - tag (label) of the clip + confidence\n\n \"\"\"\n return annotations.Tags(\n [self._clip_metadata.get(\"tag\")], \"open\", np.array([1.0])\n )\n\n @property\n def aso_id(self):\n \"\"\"The clip's Audioset ontology ID.\n\n Returns:\n * str - the id of the corresponding category as per the AudioSet Ontology\n\n \"\"\"\n return self._clip_metadata.get(\"aso_id\")\n\n @property\n def manually_verified(self):\n \"\"\"The clip's manually annotated flag.\n\n Returns:\n * int - flag to indicate whether the clip belongs to the clean portion (1), or to the noisy portion (0) of the train set\n\n \"\"\"\n return self._clip_metadata.get(\"manually_verified\")\n\n @property\n def noisy_small(self):\n \"\"\"The clip's noisy flag.\n\n Returns:\n * int - flag to indicate whether the clip belongs to the noisy_small portion (1) of the train set\n\n \"\"\"\n return self._clip_metadata.get(\"noisy_small\")\n\n @property\n def split(self):\n \"\"\"The clip's split.\n\n Returns:\n * str - flag to indicate whether the clip belongs the train or test split\n\n \"\"\"\n return self._clip_metadata.get(\"split\")\n\n def to_jams(self):\n \"\"\"Get the clip's data in jams format\n\n Returns:\n jams.JAMS: the clip's data in jams format\n\n \"\"\"\n return jams_utils.jams_converter(\n audio_path=self.audio_path, tags=self.tags, metadata=self._clip_metadata\n )\n\n\n@io.coerce_to_bytes_io\ndef load_audio(fhandle: BinaryIO, sr=None) -> Tuple[np.ndarray, float]:\n \"\"\"Load a FSDnoisy18K audio file.\n\n Args:\n fhandle (str or file-like): File-like object or path to audio file\n sr (int or None): sample rate for loaded audio, 44100 Hz by default.\n If different from file's sample rate it will be resampled on load.\n Use None to load the file using its original sample rate (sample rate\n varies from file to file).\n\n Returns:\n * np.ndarray - the mono audio signal\n * float - The sample rate of the audio file\n\n \"\"\"\n audio, sr = librosa.load(fhandle, sr=sr, mono=True)\n return audio, sr\n\n\n@core.docstring_inherit(core.Dataset)\nclass Dataset(core.Dataset):\n \"\"\"\n The FSDnoisy18K dataset\n \"\"\"\n\n def __init__(self, data_home=None):\n super().__init__(\n data_home,\n name=\"fsdnoisy18k\",\n clip_class=Clip,\n bibtex=BIBTEX,\n remotes=REMOTES,\n license_info=LICENSE_INFO,\n )\n\n @core.copy_docs(load_audio)\n def load_audio(self, *args, **kwargs):\n return load_audio(*args, **kwargs)\n\n @core.cached_property\n def _metadata(self):\n\n metadata_train_path = os.path.join(\n self.data_home, \"FSDnoisy18k.meta\", \"train.csv\"\n )\n metadata_test_path = os.path.join(\n self.data_home, \"FSDnoisy18k.meta\", \"test.csv\"\n )\n\n if not os.path.exists(metadata_train_path):\n raise FileNotFoundError(\n \"Train metadata not found. Did you run .download()?\"\n )\n if not os.path.exists(metadata_test_path):\n raise FileNotFoundError(\"Test metadata not found. Did you run .download()?\")\n\n metadata_index = {}\n\n with open(metadata_train_path, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for row in reader:\n metadata_index[row[0].replace(\".wav\", \"\")] = {\n \"split\": \"train\",\n \"tag\": row[1],\n \"aso_id\": str(row[2]),\n \"manually_verified\": int(row[3]),\n \"noisy_small\": int(row[4]),\n }\n\n with open(metadata_test_path, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for row in reader:\n metadata_index[row[0].replace(\".wav\", \"\")] = {\n \"split\": \"test\",\n \"tag\": row[1],\n \"aso_id\": str(row[2]),\n }\n\n return metadata_index\n" ]
[ [ "numpy.array" ] ]
Alejandro-Valdes/fetal-brain-segmentation
[ "09f7ecea1b0935f96a2bc7b014092439285fea69" ]
[ "models/experimental_models/unet_resnet_upconv_se.py" ]
[ "from losses import *\nfrom keras.models import Model\nfrom keras.optimizers import RMSprop, Adam, SGD\nfrom keras.losses import binary_crossentropy\nfrom keras import backend as K\nfrom keras import layers\nimport numpy as np\n\nimport tensorflow as tf\n\n\ndef down_conv(init, nb_filter, se_version):\n x = layers.Conv2D(nb_filter, (3, 3), padding='same', activation='relu',\n kernel_initializer = 'he_normal')(init)\n x = layers.BatchNormalization()(x)\n\n if se_version:\n x = squeeze_excite_block(x)\n\n\n x = layers.MaxPooling2D(pool_size=(2, 2), padding='same')(x)\n\n return x\n\ndef up_conv(init, skip, nb_filter, se_version):\n x = layers.Conv2DTranspose(nb_filter, 2, strides=(2,2),\n activation='relu', kernel_initializer='he_normal')(init)\n x = layers.BatchNormalization()(x)\n\n if se_version:\n x = squeeze_excite_block(x)\n\n x = layers.concatenate([x, skip], axis=3)\n return x\n\ndef res_block(init, nb_filter, se_version):\n x = layers.Conv2D(nb_filter, (3, 3), padding='same', activation='relu',\n kernel_initializer = 'he_normal')(init)\n x = layers.BatchNormalization()(x)\n\n x = layers.Conv2D(nb_filter, (3, 3), padding='same', activation='relu',\n kernel_initializer = 'he_normal')(x)\n x = layers.BatchNormalization()(x)\n\n if se_version:\n x = squeeze_excite_block(x)\n\n x = layers.concatenate([init, x], axis=3)\n return x\n\ndef squeeze_excite_block(input, ratio=16):\n init = input\n channel_axis = -1\n filters = init._keras_shape[channel_axis]\n se_shape = (1, 1, filters)\n\n se = layers.GlobalAveragePooling2D()(init)\n se = layers.Reshape(se_shape)(se)\n se = layers.Dense(filters // ratio, activation='relu')(se)\n se = layers.Dense(filters, activation='sigmoid')(se)\n\n x = layers.multiply([init, se])\n return x\n\n\ndef create_model(input_shape, se_version):\n inputs = layers.Input(shape=input_shape)\n i = 0\n\n #0\n x = down_conv(inputs, 32, se_version)\n x0 = res_block(x, 32, se_version)\n\n #1\n x = down_conv(x0, 64, se_version)\n x1 = res_block(x, 64, se_version)\n\n #2\n x = down_conv(x1, 128, se_version)\n x2 = res_block(x, 128, se_version)\n\n #3\n x = down_conv(x2, 256, se_version)\n x3 = res_block(x, 256, se_version)\n x3 = layers.Dropout(0.5)(x3)\n\n #--------------- center ------------\n x = down_conv(x3, 512, se_version)\n x = res_block(x, 512, se_version)\n x = layers.Dropout(0.5)(x)\n #--------------- center ------------\n\n #3\n x = up_conv(x, x3, 256, se_version)\n x = res_block(x, 256, se_version)\n\n #2\n x = up_conv(x, x2, 128, se_version)\n x = res_block(x, 128, se_version)\n\n #1\n x = up_conv(x, x1, 64, se_version)\n x = res_block(x, 64, se_version)\n\n #0\n x = up_conv(x, x0, 32, se_version)\n x = res_block(x, 32, se_version)\n\n x = up_conv(x, inputs, 16, se_version)\n x = res_block(x, 16, se_version)\n\n classify = layers.Conv2D(1, (1, 1), activation='sigmoid')(x)\n model = Model(inputs=inputs, outputs=classify)\n\n model.compile(optimizer = Adam(lr = 1e-4),\n loss = binary_crossentropy,\n metrics = [dice_coef])\n\n return model\n\ndef getUnetResUpconv(se_version=False):\n\n tf.reset_default_graph()\n sess = tf.Session()\n K.clear_session()\n\n model = create_model((256,256,1), se_version)\n #print(model.summary())\n return model\n" ]
[ [ "tensorflow.reset_default_graph", "tensorflow.Session" ] ]
sanixa/gan-leaks-custom
[ "f2efd8c8f4d267dd728bf00c8936d6f04a63736e" ]
[ "attack_models/tools/eval_roc.py" ]
[ "import numpy as np\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\n\n############################################################################\n# visualization functions\n############################################################################\ndef plot_roc(pos_results, neg_results):\n labels = np.concatenate((np.zeros((len(neg_results),)), np.ones((len(pos_results),))))\n results = np.concatenate((neg_results, pos_results))\n\n ##--------------\n acc_thres = np.sort(results)[int(len(results)/2)]\n pred_labels = [1 if x > acc_thres else 0 for x in results]\n acc = metrics.accuracy_score(labels, pred_labels)\n tn, fp, fn, tp = metrics.confusion_matrix(labels, pred_labels).ravel()\n tpr_thres = tp/ (tp+fn)\n fpr_thres = fp/ (fp+tn)\n\n fpr, tpr, threshold = metrics.roc_curve(labels, results, pos_label=1)\n auc = metrics.roc_auc_score(labels, results)\n ap = metrics.average_precision_score(labels, results)\n return fpr, tpr, threshold, auc, ap, acc, tpr_thres, fpr_thres\n\n\ndef plot_hist(pos_dist, neg_dist, save_file):\n plt.figure()\n plt.hist(pos_dist, bins=100, alpha=0.5, weights=np.zeros_like(pos_dist) + 1. / pos_dist.size, label='positive')\n plt.hist(neg_dist, bins=100, alpha=0.5, weights=np.zeros_like(neg_dist) + 1. / neg_dist.size, label='negative')\n plt.legend(loc='upper right')\n plt.tight_layout()\n plt.xlabel('distance')\n plt.ylabel('normalized frequency')\n plt.savefig(save_file)\n plt.close()\n\n\n#############################################################################################################\n# get the arguments\n#############################################################################################################\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--result_load_dir', '-ldir', type=str, default=None,\n help='directory of the attack result')\n parser.add_argument('--attack_type', type=str, choices=['fbb', 'pbb', 'wb'],\n help='type of the attack')\n parser.add_argument('--reference_load_dir', '-rdir', default=None,\n help='directory for the reference model result (optional)')\n parser.add_argument('--save_dir', '-sdir', type=str, default=None,\n help='directory for saving the evaluation results (optional)')\n return parser.parse_args()\n\n\n#############################################################################################################\n# main\n#############################################################################################################\ndef main():\n args = parse_arguments()\n attack_type = args.attack_type\n result_load_dir = args.result_load_dir\n reference_load_dir = args.reference_load_dir\n save_dir = args.save_dir\n result_load_dir = result_load_dir.strip(\"\\r\")\n print(f\"atk_type:{args.attack_type}\\nldir:{result_load_dir}\")\n if attack_type == 'fbb':\n pos_loss = np.load(result_load_dir+'/pos_loss.npy')[:, 0]\n neg_loss = np.load(result_load_dir+'/neg_loss.npy')[:, 0]\n else:\n pos_loss = np.load(result_load_dir+ '/pos_loss.npy').flatten()\n neg_loss = np.load(result_load_dir+'/neg_loss.npy').flatten()\n\n ### plot roc curve\n fpr, tpr, threshold, auc, ap, acc, tpr_thres, fpr_thres = plot_roc(-pos_loss, -neg_loss)\n plt.plot(fpr, tpr, label='%s attack, auc=%.3f, ap=%.3f' % (attack_type, auc, ap))\n\n print(\"The accuracy value of %s attack is: %.3f \" % (attack_type, acc))\n print(\"The tpr_thres/fpr_thres value of %s attack is: %.3f and %.3f\" % (attack_type, tpr_thres, fpr_thres))\n print(\"The AUC ROC value of %s attack is: %.3f \" % (attack_type, auc))\n\n ################################################################\n # attack calibration\n ################################################################\n if reference_load_dir is not None:\n pos_ref = np.load(os.path.join(reference_load_dir, 'pos_loss.npy'))\n neg_ref = np.load(os.path.join(reference_load_dir, 'neg_loss.npy'))\n\n num_pos_samples = np.minimum(len(pos_loss), len(pos_ref))\n num_neg_samples = np.minimum(len(neg_loss), len(neg_ref))\n\n try:\n pos_calibrate = pos_loss[:num_pos_samples] - pos_ref[:num_pos_samples]\n neg_calibrate = neg_loss[:num_neg_samples] - neg_ref[:num_neg_samples]\n\n except:\n pos_calibrate = pos_loss[:num_pos_samples] - pos_ref[:num_pos_samples, 0]\n neg_calibrate = neg_loss[:num_neg_samples] - neg_ref[:num_neg_samples, 0]\n\n fpr, tpr, threshold, auc, ap = plot_roc(-pos_calibrate, -neg_calibrate)\n plt.plot(fpr, tpr, label='calibrated %s attack, auc=%.3f, ap=%.3f' % (attack_type, auc, ap))\n \n print(\"The AUC ROC value of calibrated %s attack is: %.3f \" % (attack_type, auc))\n\n plt.legend(loc='lower right')\n plt.xlabel('false positive')\n plt.ylabel('true positive')\n plt.title('ROC curve')\n\n if save_dir is not None:\n plt.savefig(os.path.join(save_dir, 'roc.png'))\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.legend", "sklearn.metrics.confusion_matrix", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.zeros_like", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.close", "numpy.load", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.sort", "sklearn.metrics.average_precision_score", "matplotlib.pyplot.xlabel", "sklearn.metrics.accuracy_score" ] ]
WNoxchi/fastai_docs
[ "7a53964531a67c193c19235ed135c4793e65256c" ]
[ "dev_course/dl2/exp/nb_01.py" ]
[ "\n#################################################\n### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n#################################################\n# file to edit: dev_nb/01_matmul_workbook.ipynb\n\nfrom exp.nb_00 import *\nimport operator\n\ndef test(a,b,cmp,cname=None):\n if cname is None: cname=cmp.__name__\n assert cmp(a,b),f\"{cname}:\\n{a}\\n{b}\"\n\ndef test_eq(a,b): test(a,b,operator.eq,'==')\n\nfrom pathlib import Path\nfrom IPython.core.debugger import set_trace\nfrom fastai import datasets\nimport pickle, gzip, math, torch, matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom torch import tensor\n\nMNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'\n\ndef near(a,b): return torch.allclose(a, b, rtol=1e-3, atol=1e-5)\ndef test_near(a,b): test(a,b,near)" ]
[ [ "torch.allclose" ] ]
lensonp/hwaves
[ "0f566d8380515a5e64a77d72466900d1676a93ee" ]
[ "tests/test_hwf_density.py" ]
[ "import os\n\nimport numpy as np\n\nfrom hwaves.hwf_density import cartesian_density, pack_cartesian_data, write_cartesian\nfrom hwaves.hwf import radial_wf_integral\n\ntests_path = os.path.join(os.getcwd(),'tests')\n\nnx = 30\nny = 30\nnz = 30\ndx = 0.1\ndy = 0.1\ndz = 0.1\n\ndef test_cartesian_density():\n\n x, y, z, PV = cartesian_density(1,0,0,nx,ny,nz,dx,dy,dz)\n ijk_xyz_PV = pack_cartesian_data(x,y,z,PV)\n fpath = os.path.join(tests_path,'cartesian_density_1s.dat')\n write_cartesian(ijk_xyz_PV,fpath)\n #assert(os.path.exists(fpath))\n\ndef test_wf_integrals():\n r_A = np.linspace(0.,50.,1000)\n for n in range(1,4):\n for l in range(0,n):\n integ = radial_wf_integral(n,l,r_A)\n print('density integral for n={},l={}: {}'.format(n,l,integ))\n\n" ]
[ [ "numpy.linspace" ] ]
stuartcrobinson/ImageAI
[ "654f91b55fad98752f650a4fd7b26d3edfd64175" ]
[ "examples/object_detectionSTUART_croppingVideoFrames.py" ]
[ "import json\nimport os\nimport statistics as s\nimport time\nfrom os import listdir\n\nimport cv2\nfrom scipy.spatial import distance\n\nfrom imageai.Detection import ObjectDetection\n\nexecution_path = os.getcwd()\n\ndetectorY = ObjectDetection()\ndetectorY.setModelTypeAsYOLOv3()\ndetectorY.setModelPath(\"/Users/stuartrobinson/repos/computervision/ImageAI/gitignore/yolo.h5\")\ndetectorY.loadModel()\n\n\ndef translateCropCoordToOrig(xy, cropCoords):\n return [xy[0] + cropCoords[0], xy[1] + cropCoords[1]]\n\n\ndef distanceBetween(box_points, ballXY):\n print(\"in distanceBetween, \", box_points, \", \", ballXY)\n boxX = s.mean([box_points[0], box_points[2]])\n boxY = s.mean([box_points[1], box_points[3]])\n return distance.euclidean((boxX, boxY), (ballXY[0], ballXY[1]))\n\n\ndef translateCropBoxPointsToOrig(XYs, cropCoords):\n return translateCropCoordToOrig([XYs[0], XYs[1]], cropCoords)+ translateCropCoordToOrig([XYs[2], XYs[3]], cropCoords)\n\n\ndef getTennisBallCoordinates(detections, currCropCoords, prevNonZeroBallXY):\n # 'box_points': array([ 47, 125, 68, 166]),\n balls = list(filter(lambda x: x['name'] == 'sports ball', detections))\n if prevNonZeroBallXY != [0, 0]:\n for ball in balls:\n ball['distance'] = distanceBetween(translateCropBoxPointsToOrig(ball['box_points'], currCropCoords), prevNonZeroBallXY)\n pprint.pprint(balls)\n if len(balls) == 0:\n return [0, 0]\n if prevNonZeroBallXY != [0, 0]:\n balls = list(filter(lambda j: j['distance'] < 150, balls))\n if len(balls) == 0:\n return [0, 0]\n themax = max(balls, key=lambda k: k['percentage_probability'])\n ballCoords = themax['box_points']\n x = s.mean([ballCoords[0], ballCoords[2]])\n y = s.mean([ballCoords[1], ballCoords[3]])\n # x = currCropCoords[0] + x\n # y = currCropCoords[1] + y\n return translateCropCoordToOrig([x, y], currCropCoords)\n\n\ndef saveDetections(detections, file):\n with open(file, 'w') as outfile:\n json.dump(json.dump(detections), outfile)\n\n\ndef saveBall(ballXY, file):\n with open(file, 'w') as outfile:\n json.dump(json.loads(str(ballXY)), outfile)\n\n\nimport pprint\n\n\ndef findBall(im, prevBallXY, file, outDir, radius, prevNonZeroCropCoords, prevNonZeroBallXY, name):\n print(\"radius:\", radius)\n currCropCoords = None\n imCrop = None\n if prevNonZeroBallXY == [0, 0]:\n currCropCoords = [0, 0, 0, 0]\n imCrop = im.copy()\n else:\n if radius < 0:\n x1 = prevNonZeroCropCoords[0]\n y1 = prevNonZeroCropCoords[1]\n x2 = prevNonZeroCropCoords[2]\n y2 = prevNonZeroCropCoords[3]\n currCropCoords = [x1, y1, x2, y2]\n imCrop = im[y1:y2, x1:x2, :]\n else:\n x1 = max(0, prevNonZeroBallXY[0] - radius)\n y1 = max(0, prevNonZeroBallXY[1] - radius)\n x2 = min(im.shape[1], x1 + radius * 2)\n y2 = min(im.shape[0], y1 + radius * 2)\n currCropCoords = [x1, y1, x2, y2]\n imCrop = im[y1:y2, x1:x2, :]\n #\n print(\"currCropCoords\", currCropCoords)\n # print(\"im shape\", imCrop.shape)\n print(outDir, file)\n start_timeY = time.time()\n os.makedirs(outDir, exist_ok=True)\n detectionsY = detectorY.detectObjectsFromImage(\n input_image=imCrop,\n input_type='array',\n output_image_path=os.path.join(outDir, file + '_' + str(name) + \".jpg\"),\n minimum_percentage_probability=0)\n print(\"\\ntookY\", time.time() - start_timeY)\n ballXY = getTennisBallCoordinates(detectionsY, currCropCoords, prevNonZeroBallXY)\n print(\"ballXY\", ballXY)\n print(\"-----------\")\n return detectionsY, currCropCoords, ballXY\n\n\n# inputDir = \"/Users/stuartrobinson/repos/computervision/andre_aigassi/images/tennis_video/frames/raw/backhand\"\ninputDir = \"/Users/stuartrobinson/repos/computervision/andre_aigassi/images/tennis_video/frames/raw/19sec\"\n\noutDir = os.path.join(execution_path, \"gitignore\", \"19sec_crop\")\nos.makedirs(outDir, exist_ok=True)\n\nonlyfiles = [f for f in listdir(inputDir) if f.endswith('.png')]\nonlyfiles.sort()\n\nprevBallXY = [0, 0] # tennisBall\nprevNonZeroBallXY = [0, 0] # tennisBall\ncurrCropCoords = [0, 0, 0, 0] #\nprevNonZeroCropCoords = [0, 0, 0, 0] #\n\ncount = 0\nfor file in onlyfiles:\n count += 1\n # if count < 100:\n # continue\n # if 19 < count < 47:\n # continue\n # # if count < 47:\n # # continue\n # if count > 113:\n # break\n print(\"\\n------------------------------------------------------------------------\")\n filePath = os.path.join(inputDir, file)\n print(\"file:\", filePath)\n print(\"prevBallXY\", prevBallXY)\n print(\"prevNonZeroCropCoords\", prevNonZeroCropCoords)\n print(\"prevNonZeroBallXY\", prevNonZeroBallXY)\n im = cv2.imread(filePath, cv2.IMREAD_COLOR) # , cv2.IMREAD_GRAYSCALE)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n detectionsY, currCropCoords, ballXY = findBall(im, prevBallXY, file, outDir, 100, prevNonZeroCropCoords, prevNonZeroBallXY, 1)\n if ballXY == [0, 0]:\n detectionsY, currCropCoords, ballXY = findBall(im, prevBallXY, file, outDir, 150, prevNonZeroCropCoords, prevNonZeroBallXY, 2)\n if ballXY == [0, 0]:\n detectionsY, currCropCoords, ballXY = findBall(im, prevBallXY, file, outDir, 250, prevNonZeroCropCoords, prevNonZeroBallXY, 3)\n # if ballXY == [0, 0]:\n # detectionsY, currCropCoords, ballXY = findBall(im, prevBallXY, file, outDir, 500, prevNonZeroCropCoords, prevNonZeroBallXY, 3)\n # if ballXY == [0, 0]:\n # detectionsY, currCropCoords, ballXY = findBall(im, prevBallXY, file, outDir, -1, prevNonZeroCropCoords, prevNonZeroBallXY, 4)\n if currCropCoords != [0, 0, 0, 0]:\n prevNonZeroCropCoords = currCropCoords\n saveBall(ballXY, os.path.join(outDir, file + \".json\"))\n prevBallXY = ballXY\n if ballXY != [0, 0]:\n prevNonZeroBallXY = ballXY\n\nprint('\\nstarting\\n')\n\n#TODO 486_3 to 487 has the wrong zoom\n\n#why 477_3 never pick up ball? or 481? 485 - caught wrong ball" ]
[ [ "scipy.spatial.distance.euclidean" ] ]
AITuringSAS/images_for_codecommit
[ "e2d383fbcaead9024f9d26751ee0aa3d39688258" ]
[ "efficientdet_aituring/automl/efficientdet/utils.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utils.\"\"\"\nimport contextlib\nimport os\nfrom typing import Text, Tuple, Union\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow.compat.v2 as tf2\nfrom tensorflow.python.eager import tape as tape_lib # pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import\n# pylint: disable=logging-format-interpolation\n\n\ndef srelu_fn(x):\n \"\"\"Smooth relu: a smooth version of relu.\"\"\"\n with tf.name_scope('srelu'):\n beta = tf.Variable(20.0, name='srelu_beta', dtype=tf.float32)**2\n beta = tf.cast(beta**2, x.dtype)\n safe_log = tf.math.log(tf.where(x > 0., beta * x + 1., tf.ones_like(x)))\n return tf.where((x > 0.), x - (1. / beta) * safe_log, tf.zeros_like(x))\n\n\ndef activation_fn(features: tf.Tensor, act_type: Text):\n \"\"\"Customized non-linear activation type.\"\"\"\n if act_type in ('silu', 'swish'):\n return tf.nn.swish(features)\n elif act_type == 'swish_native':\n return features * tf.sigmoid(features)\n elif act_type == 'hswish':\n return features * tf.nn.relu6(features + 3) / 6\n elif act_type == 'relu':\n return tf.nn.relu(features)\n elif act_type == 'relu6':\n return tf.nn.relu6(features)\n elif act_type == 'mish':\n return features * tf.math.tanh(tf.math.softplus(features))\n elif act_type == 'srelu':\n return srelu_fn(features)\n else:\n raise ValueError('Unsupported act_type {}'.format(act_type))\n\n\ndef cross_replica_mean(t, num_shards_per_group=None):\n \"\"\"Calculates the average value of input tensor across TPU replicas.\"\"\"\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if not num_shards:\n return t\n\n if not num_shards_per_group:\n return tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)\n\n group_assignment = None\n if num_shards_per_group > 1:\n if num_shards % num_shards_per_group != 0:\n raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0' %\n (num_shards, num_shards_per_group))\n num_groups = num_shards // num_shards_per_group\n group_assignment = [[\n x for x in range(num_shards) if x // num_shards_per_group == y\n ] for y in range(num_groups)]\n return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(\n num_shards_per_group, t.dtype)\n\n\ndef get_ema_vars():\n \"\"\"Get all exponential moving average (ema) variables.\"\"\"\n ema_vars = (\n tf.trainable_variables() +\n tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES))\n for v in tf.global_variables():\n # We maintain mva for batch norm moving mean and variance as well.\n if 'moving_mean' in v.name or 'moving_variance' in v.name:\n ema_vars.append(v)\n return list(set(ema_vars))\n\n\ndef get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, skip_mismatch=None):\n \"\"\"Get a var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n skip_mismatch: skip variables if shape mismatch.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint {}'.format(ckpt_path))\n if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):\n raise ValueError('Please specific scope name ending with /')\n if ckpt_scope.startswith('/'):\n ckpt_scope = ckpt_scope[1:]\n if var_scope.startswith('/'):\n var_scope = var_scope[1:]\n\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_name_to_shape = reader.get_variable_to_shape_map()\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n\n if tf.distribute.get_replica_context():\n replica_id = tf.get_static_value(\n tf.distribute.get_replica_context().replica_id_in_sync_group)\n else:\n replica_id = 0\n\n for i, v in enumerate(model_vars):\n var_op_name = v.op.name\n\n if replica_id >= 1:\n var_op_name = ''.join(var_op_name.rsplit(f'/replica_{replica_id}', 1))\n\n if not var_op_name.startswith(var_scope):\n logging.info('skip {} -- does not match scope {}'.format(\n var_op_name, var_scope))\n ckpt_var = ckpt_scope + var_op_name[len(var_scope):]\n if 'global_step' in ckpt_var:\n continue\n\n if (ckpt_var not in ckpt_var_names and\n var_op_name.endswith('/ExponentialMovingAverage')):\n ckpt_var = ckpt_scope + var_op_name[:-len('/ExponentialMovingAverage')]\n\n if ckpt_var not in ckpt_var_names:\n if 'Momentum' in ckpt_var or 'RMSProp' in ckpt_var:\n # Skip optimizer variables.\n continue\n if skip_mismatch:\n logging.info('skip {} ({}) -- not in ckpt'.format(\n var_op_name, ckpt_var))\n continue\n raise ValueError('{} is not in ckpt {}'.format(v.op, ckpt_path))\n\n if v.shape != ckpt_var_name_to_shape[ckpt_var]:\n if skip_mismatch:\n logging.info('skip {} ({} vs {}) -- shape mismatch'.format(\n var_op_name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n continue\n raise ValueError('shape mismatch {} ({} vs {})'.format(\n var_op_name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n\n if i < 5:\n # Log the first few elements for sanity check.\n logging.info('Init {} from ckpt var {}'.format(var_op_name, ckpt_var))\n var_map[ckpt_var] = v\n\n return var_map\n\n\nclass TpuBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('TpuBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n num_shards = tpu_function.get_tpu_context().number_of_shards or 1\n num_shards_per_group = min(32, num_shards) # aggregate up to 32 cores.\n logging.info('TpuBatchNormalization with num_shards_per_group {}'.format(\n num_shards_per_group))\n if num_shards_per_group > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n group_mean = cross_replica_mean(shard_mean, num_shards_per_group)\n group_mean_of_square = cross_replica_mean(shard_mean_of_square,\n num_shards_per_group)\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass SyncBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('SyncBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n replica_context = tf.distribute.get_replica_context()\n num_shards = replica_context.num_replicas_in_sync or 1\n\n if num_shards > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n group_mean = replica_context.all_reduce(\n tf.distribute.ReduceOp.MEAN, shard_mean)\n group_mean_of_square = replica_context.all_reduce(\n tf.distribute.ReduceOp.MEAN, shard_mean_of_square)\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass BatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Fixed default name of BatchNormalization to match TpuBatchNormalization.\"\"\"\n\n def __init__(self, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n super().__init__(**kwargs)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\ndef batch_norm_class(is_training, strategy=None):\n if is_training and strategy == 'tpu':\n return TpuBatchNormalization\n elif is_training and strategy == 'gpus':\n return SyncBatchNormalization\n else:\n return BatchNormalization\n\n\ndef batch_normalization(inputs, training=False, strategy=None, **kwargs):\n \"\"\"A wrapper for TpuBatchNormalization.\"\"\"\n bn_layer = batch_norm_class(training, strategy)(**kwargs)\n return bn_layer(inputs, training=training)\n\n\ndef batch_norm_act(inputs,\n is_training_bn: bool,\n act_type: Union[Text, None],\n init_zero: bool = False,\n data_format: Text = 'channels_last',\n momentum: float = 0.99,\n epsilon: float = 1e-3,\n strategy: Text = None,\n name: Text = None):\n \"\"\"Performs a batch normalization followed by a non-linear activation.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training_bn: `bool` for whether the model is training.\n act_type: non-linear relu function type. If None, omits the relu operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n momentum: `float`, momentume of batch norm.\n epsilon: `float`, small value for numerical stability.\n strategy: string to specify training strategy for TPU/GPU/CPU.\n name: the name of the batch normalization layer\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = 3\n\n inputs = batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=momentum,\n epsilon=epsilon,\n center=True,\n scale=True,\n training=is_training_bn,\n strategy=strategy,\n gamma_initializer=gamma_initializer,\n name=name)\n\n if act_type:\n inputs = activation_fn(inputs, act_type)\n return inputs\n\n\ndef drop_connect(inputs, is_training, survival_prob):\n \"\"\"Drop the entire conv with given survival probability.\"\"\"\n # \"Deep Networks with Stochastic Depth\", https://arxiv.org/pdf/1603.09382.pdf\n if not is_training:\n return inputs\n\n # Compute tensor.\n batch_size = tf.shape(inputs)[0]\n random_tensor = survival_prob\n random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)\n binary_tensor = tf.floor(random_tensor)\n # Unlike conventional way that multiply survival_prob at test time, here we\n # divide survival_prob at training time, such that no addition compute is\n # needed at test time.\n output = inputs / survival_prob * binary_tensor\n return output\n\n\ndef num_params_flops(readable_format=True):\n \"\"\"Return number of parameters and flops.\"\"\"\n nparams = np.sum(\n [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\n options = tf.profiler.ProfileOptionBuilder.float_operation()\n options['output'] = 'none'\n flops = tf.profiler.profile(\n tf.get_default_graph(), options=options).total_float_ops\n # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.\n flops = flops // 2\n if readable_format:\n nparams = float(nparams) * 1e-6\n flops = float(flops) * 1e-9\n return nparams, flops\n\n\nconv_kernel_initializer = tf.initializers.variance_scaling()\ndense_kernel_initializer = tf.initializers.variance_scaling()\n\n\nclass Pair(tuple):\n\n def __new__(cls, name, value):\n return super().__new__(cls, (name, value))\n\n def __init__(self, name, _): # pylint: disable=super-init-not-called\n self.name = name\n\n\ndef scalar(name, tensor, is_tpu=True):\n \"\"\"Stores a (name, Tensor) tuple in a custom collection.\"\"\"\n logging.info('Adding scalar summary {}'.format(Pair(name, tensor)))\n if is_tpu:\n tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor)))\n else:\n tf.summary.scalar(name, tf.reduce_mean(tensor))\n\n\ndef image(name, tensor, is_tpu=True):\n logging.info('Adding image summary {}'.format(Pair(name, tensor)))\n if is_tpu:\n tf.add_to_collection('image_summaries', Pair(name, tensor))\n else:\n tf.summary.image(name, tensor)\n\n\ndef get_tpu_host_call(global_step, params):\n \"\"\"Get TPU host call for summaries.\"\"\"\n scalar_summaries = tf.get_collection('scalar_summaries')\n if params['img_summary_steps']:\n image_summaries = tf.get_collection('image_summaries')\n else:\n image_summaries = []\n if not scalar_summaries and not image_summaries:\n return None # No summaries to write.\n\n model_dir = params['model_dir']\n iterations_per_loop = params.get('iterations_per_loop', 100)\n img_steps = params['img_summary_steps']\n\n def host_call_fn(global_step, *args):\n \"\"\"Training host call. Creates summaries for training metrics.\"\"\"\n gs = global_step[0]\n with tf2.summary.create_file_writer(\n model_dir, max_queue=iterations_per_loop).as_default():\n with tf2.summary.record_if(True):\n for i, _ in enumerate(scalar_summaries):\n name = scalar_summaries[i][0]\n tensor = args[i][0]\n tf2.summary.scalar(name, tensor, step=gs)\n\n if img_steps:\n with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)):\n # Log images every 1k steps.\n for i, _ in enumerate(image_summaries):\n name = image_summaries[i][0]\n tensor = args[i + len(scalar_summaries)]\n tf2.summary.image(name, tensor, step=gs)\n\n return tf.summary.all_v2_summary_ops()\n\n reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries]\n reshaped_tensors += [t for _, t in image_summaries]\n global_step_t = tf.reshape(global_step, [1])\n return host_call_fn, [global_step_t] + reshaped_tensors\n\n\ndef archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):\n \"\"\"Archive a checkpoint if the metric is better.\"\"\"\n ckpt_dir, ckpt_name = os.path.split(ckpt_path)\n\n saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')\n saved_objective = float('-inf')\n if tf.io.gfile.exists(saved_objective_path):\n with tf.io.gfile.GFile(saved_objective_path, 'r') as f:\n saved_objective = float(f.read())\n if saved_objective > ckpt_objective:\n logging.info('Ckpt {} is worse than {}'.format(ckpt_objective,\n saved_objective))\n return False\n\n filenames = tf.io.gfile.glob(ckpt_path + '.*')\n if filenames is None:\n logging.info('No files to copy for checkpoint {}'.format(ckpt_path))\n return False\n\n # clear up the backup folder.\n backup_dir = os.path.join(ckpt_dir, 'backup')\n if tf.io.gfile.exists(backup_dir):\n tf.io.gfile.rmtree(backup_dir)\n\n # rename the old checkpoints to backup folder.\n dst_dir = os.path.join(ckpt_dir, 'archive')\n if tf.io.gfile.exists(dst_dir):\n logging.info('mv {} to {}'.format(dst_dir, backup_dir))\n tf.io.gfile.rename(dst_dir, backup_dir)\n\n # Write checkpoints.\n tf.io.gfile.makedirs(dst_dir)\n for f in filenames:\n dest = os.path.join(dst_dir, os.path.basename(f))\n tf.io.gfile.copy(f, dest, overwrite=True)\n ckpt_state = tf.train.generate_checkpoint_state_proto(\n dst_dir, model_checkpoint_path=os.path.join(dst_dir, ckpt_name))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:\n f.write(str(ckpt_state))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:\n f.write('%s' % ckpt_eval)\n\n # Update the best objective.\n with tf.io.gfile.GFile(saved_objective_path, 'w') as f:\n f.write('%f' % ckpt_objective)\n\n logging.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir))\n return True\n\n\ndef parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):\n \"\"\"Parse the image size and return (height, width).\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n\n Returns:\n A tuple of integer (height, width).\n \"\"\"\n if isinstance(image_size, int):\n # image_size is integer, with the same width and height.\n return (image_size, image_size)\n\n if isinstance(image_size, str):\n # image_size is a string with format WxH\n width, height = image_size.lower().split('x')\n return (int(height), int(width))\n\n if isinstance(image_size, tuple):\n return image_size\n\n raise ValueError('image_size must be an int, WxH string, or (height, width)'\n 'tuple. Was %r' % image_size)\n\n\ndef get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]],\n max_level: int):\n \"\"\"Get feat widths and heights for all levels.\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n max_level: maximum feature level.\n\n Returns:\n feat_sizes: a list of tuples (height, width) for each level.\n \"\"\"\n image_size = parse_image_size(image_size)\n feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]\n feat_size = image_size\n for _ in range(1, max_level + 1):\n feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)\n feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})\n return feat_sizes\n\n\ndef verify_feats_size(feats,\n feat_sizes,\n min_level,\n max_level,\n data_format='channels_last'):\n \"\"\"Verify the feature map sizes.\"\"\"\n expected_output_size = feat_sizes[min_level:max_level + 1]\n for cnt, size in enumerate(expected_output_size):\n h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2)\n if feats[cnt].shape[h_id] != size['height']:\n raise ValueError(\n 'feats[{}] has shape {} but its height should be {}.'\n '(input_height: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'],\n min_level, max_level))\n if feats[cnt].shape[w_id] != size['width']:\n raise ValueError(\n 'feats[{}] has shape {} but its width should be {}.'\n '(input_width: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'],\n min_level, max_level))\n\n\ndef get_precision(strategy: str, mixed_precision: bool = False):\n \"\"\"Get the precision policy for a given strategy.\"\"\"\n if mixed_precision:\n if strategy == 'tpu':\n return 'mixed_bfloat16'\n\n if tf.config.list_physical_devices('GPU'):\n return 'mixed_float16'\n\n # TODO(fsx950223): Fix CPU float16 inference\n # https://github.com/google/automl/issues/504\n logging.warning('float16 is not supported for CPU, use float32 instead')\n return 'float32'\n\n return 'float32'\n\n\n@contextlib.contextmanager\ndef float16_scope():\n \"\"\"Scope class for float16.\"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Returns a custom getter that methods must be called under.\"\"\"\n cast_to_float16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == tf.float16:\n kwargs['dtype'] = tf.float32\n cast_to_float16 = True\n var = getter(*args, **kwargs)\n if cast_to_float16:\n var = tf.cast(var, tf.float16)\n return var\n\n with tf.variable_scope('', custom_getter=_custom_getter) as varscope:\n yield varscope\n\n\ndef set_precision_policy(policy_name: Text = None):\n \"\"\"Set precision policy according to the name.\n\n Args:\n policy_name: precision policy name, one of 'float32', 'mixed_float16',\n 'mixed_bfloat16', or None.\n \"\"\"\n if not policy_name:\n return\n\n assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32')\n logging.info('use mixed precision policy name %s', policy_name)\n tf.compat.v1.keras.layers.enable_v2_dtype_behavior()\n # mixed_float16 training is not supported for now, so disable loss_scale.\n # float32 and mixed_bfloat16 do not need loss scale for training.\n policy = tf2.keras.mixed_precision.Policy(policy_name)\n tf2.keras.mixed_precision.set_global_policy(policy)\n\n\ndef build_model_with_precision(pp, mm, ii, *args, **kwargs):\n \"\"\"Build model with its inputs/params for a specified precision context.\n\n This is highly specific to this codebase, and not intended to be general API.\n Advanced users only. DO NOT use it if you don't know what it does.\n NOTE: short argument names are intended to avoid conficts with kwargs.\n\n Args:\n pp: A string, precision policy name, such as \"mixed_float16\".\n mm: A function, for rmodel builder.\n ii: A tensor, for model inputs.\n *args: A list of model arguments.\n **kwargs: A dict, extra model parameters.\n\n Returns:\n the output of mm model.\n \"\"\"\n if pp == 'mixed_bfloat16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.bfloat16)\n with tf.tpu.bfloat16_scope():\n outputs = mm(inputs, *args, **kwargs)\n elif pp == 'mixed_float16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.float16)\n with float16_scope():\n outputs = mm(inputs, *args, **kwargs)\n elif not pp or pp == 'float32':\n set_precision_policy(pp)\n outputs = mm(ii, *args, **kwargs)\n else:\n raise ValueError('Unknow precision name {}'.format(pp))\n\n # Users are responsible to convert the dtype of all outputs.\n return outputs\n\n\ndef _recompute_grad(f):\n \"\"\"An eager-compatible version of recompute_grad.\n\n For f(*args, **kwargs), this supports gradients with respect to args or\n kwargs, but kwargs are currently only supported in eager-mode.\n Note that for keras layer and model objects, this is handled automatically.\n\n Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not\n be able to access the member variables of that object, because `g` returns\n through the wrapper function `inner`. When recomputing gradients through\n objects that inherit from tf2, we suggest keeping a reference to the\n underlying object around for the purpose of accessing these variables.\n\n Args:\n f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.\n\n Returns:\n A function `g` that wraps `f`, but which recomputes `f` on the backwards\n pass of a gradient call.\n \"\"\"\n\n @tf.custom_gradient\n def inner(*args, **kwargs):\n \"\"\"Inner function closure for calculating gradients.\"\"\"\n current_var_scope = tf.get_variable_scope()\n with tape_lib.stop_recording():\n result = f(*args, **kwargs)\n\n def grad_wrapper(*wrapper_args, **grad_kwargs):\n \"\"\"Wrapper function to accomodate lack of kwargs in graph mode decorator.\"\"\"\n\n @tf.custom_gradient\n def inner_recompute_grad(*dresult):\n \"\"\"Nested custom gradient function for computing grads in reverse and forward mode autodiff.\"\"\"\n # Gradient calculation for reverse mode autodiff.\n variables = grad_kwargs.get('variables')\n with tf.GradientTape() as t:\n id_args = tf.nest.map_structure(tf.identity, args)\n t.watch(id_args)\n if variables is not None:\n t.watch(variables)\n with tf.control_dependencies(dresult):\n with tf.variable_scope(current_var_scope):\n result = f(*id_args, **kwargs)\n kw_vars = []\n if variables is not None:\n kw_vars = list(variables)\n grads = t.gradient(\n result,\n list(id_args) + kw_vars,\n output_gradients=dresult,\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n\n def transpose(*t_args, **t_kwargs):\n \"\"\"Gradient function calculation for forward mode autodiff.\"\"\"\n # Just throw an error since gradients / activations are not stored on\n # tape for recompute.\n raise NotImplementedError(\n 'recompute_grad tried to transpose grad of {}. '\n 'Consider not using recompute_grad in forward mode'\n 'autodiff'.format(f.__name__))\n\n return (grads[:len(id_args)], grads[len(id_args):]), transpose\n\n return inner_recompute_grad(*wrapper_args)\n\n return result, grad_wrapper\n\n return inner\n\n\ndef recompute_grad(recompute=False):\n \"\"\"Decorator determine whether use gradient checkpoint.\"\"\"\n\n def _wrapper(f):\n if recompute:\n return _recompute_grad(f)\n return f\n\n return _wrapper\n" ]
[ [ "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.io.gfile.rename", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.random.uniform", "tensorflow.compat.v1.GradientTape", "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "tensorflow.compat.v1.zeros_initializer", "tensorflow.python.eager.tape.stop_recording", "tensorflow.compat.v1.io.gfile.glob", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.math.square", "tensorflow.python.tpu.tpu_function.get_tpu_context", "tensorflow.compat.v1.nn.relu6", "tensorflow.compat.v1.math.equal", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v2.summary.record_if", "tensorflow.compat.v1.summary.image", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.io.gfile.rmtree", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v1.nest.map_structure", "tensorflow.compat.v2.summary.image", "tensorflow.compat.v1.distribute.get_replica_context", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.ones_initializer", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.io.gfile.copy", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.initializers.variance_scaling", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.config.list_physical_devices", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v2.summary.create_file_writer", "tensorflow.compat.v1.nn.swish", "tensorflow.compat.v1.floor", "tensorflow.compat.v1.io.gfile.GFile", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v2.summary.scalar", "tensorflow.compat.v1.math.softplus", "tensorflow.compat.v1.cast", "tensorflow.compat.v2.keras.mixed_precision.Policy", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.tpu.bfloat16_scope", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.summary.all_v2_summary_ops", "tensorflow.compat.v1.compat.v1.keras.layers.enable_v2_dtype_behavior", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v2.keras.mixed_precision.set_global_policy", "tensorflow.compat.v1.train.load_checkpoint", "tensorflow.compat.v1.tpu.cross_replica_sum" ] ]
kangyongxin/deepmind-research
[ "5b22f165327a263a31b2a467b9446fd1bf74d7db" ]
[ "curl/training.py" ]
[ "################################################################################\n# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\"\"\"Script to train CURL.\"\"\"\n\nimport collections\nimport functools\nfrom absl import logging\n\nimport numpy as np\nfrom sklearn import neighbors\nimport sonnet as snt\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport tensorflow_probability as tfp\n\nfrom curl import model\nfrom curl import utils\n\ntfc = tf.compat.v1\n\n# pylint: disable=g-long-lambda\n\nMainOps = collections.namedtuple('MainOps', [\n 'elbo', 'll', 'log_p_x', 'kl_y', 'kl_z', 'elbo_supervised', 'll_supervised',\n 'log_p_x_supervised', 'kl_y_supervised', 'kl_z_supervised',\n 'cat_probs', 'confusion', 'purity', 'latents'\n])\n\nDatasetTuple = collections.namedtuple('DatasetTuple', [\n 'train_data', 'train_iter_for_clf', 'train_data_for_clf',\n 'valid_iter', 'valid_data', 'test_iter', 'test_data', 'ds_info'\n])\n\n\ndef compute_purity(confusion):\n return np.sum(np.max(confusion, axis=0)).astype(float) / np.sum(confusion)\n\n\ndef process_dataset(iterator,\n ops_to_run,\n sess,\n feed_dict=None,\n aggregation_ops=np.stack,\n processing_ops=None):\n \"\"\"Process a dataset by computing ops and accumulating batch by batch.\n\n Args:\n iterator: iterator through the dataset.\n ops_to_run: dict, tf ops to run as part of dataset processing.\n sess: tf.Session to use.\n feed_dict: dict, required placeholders.\n aggregation_ops: fn or dict of fns, aggregation op to apply for each op.\n processing_ops: fn or dict of fns, extra processing op to apply for each op.\n\n Returns:\n Results accumulated over dataset.\n \"\"\"\n\n if not isinstance(ops_to_run, dict):\n raise TypeError('ops_to_run must be specified as a dict')\n\n if not isinstance(aggregation_ops, dict):\n aggregation_ops = {k: aggregation_ops for k in ops_to_run}\n if not isinstance(processing_ops, dict):\n processing_ops = {k: processing_ops for k in ops_to_run}\n\n out_results = collections.OrderedDict()\n sess.run(iterator.initializer)\n while True:\n # Iterate over the whole dataset and append the results to a per-key list.\n try:\n outs = sess.run(ops_to_run, feed_dict=feed_dict)\n for key, value in outs.items():\n out_results.setdefault(key, []).append(value)\n\n except tf.errors.OutOfRangeError: # end of dataset iterator\n break\n\n # Aggregate and process results.\n for key, value in out_results.items():\n if aggregation_ops[key]:\n out_results[key] = aggregation_ops[key](value)\n if processing_ops[key]:\n out_results[key] = processing_ops[key](out_results[key], axis=0)\n\n return out_results\n\n\ndef get_data_sources(dataset, dataset_kwargs, batch_size, test_batch_size,\n training_data_type, n_concurrent_classes, image_key,\n label_key):\n \"\"\"Create and return data sources for training, validation, and testing.\n\n Args:\n dataset: str, name of dataset ('mnist', 'omniglot', etc).\n dataset_kwargs: dict, kwargs used in tf dataset constructors.\n batch_size: int, batch size used for training.\n test_batch_size: int, batch size used for evaluation.\n training_data_type: str, how training data is seen ('iid', or 'sequential').\n n_concurrent_classes: int, # classes seen at a time (ignored for 'iid').\n image_key: str, name if image key in dataset.\n label_key: str, name of label key in dataset.\n\n Returns:\n A namedtuple containing all of the dataset iterators and batches.\n\n \"\"\"\n\n # Load training data sources\n ds_train, ds_info = tfds.load(\n name=dataset,\n split=tfds.Split.TRAIN,\n with_info=True,\n as_dataset_kwargs={'shuffle_files': False},\n **dataset_kwargs)\n\n # Validate assumption that data is in [0, 255]\n assert ds_info.features[image_key].dtype == tf.uint8\n\n n_classes = ds_info.features[label_key].num_classes\n num_train_examples = ds_info.splits['train'].num_examples\n\n def preprocess_data(x):\n \"\"\"Convert images from uint8 in [0, 255] to float in [0, 1].\"\"\"\n x[image_key] = tf.image.convert_image_dtype(x[image_key], tf.float32)\n return x\n\n if training_data_type == 'sequential':\n c = None # The index of the class number, None for now and updated later\n if n_concurrent_classes == 1:\n filter_fn = lambda v: tf.equal(v[label_key], c)\n else:\n # Define the lowest and highest class number at each data period.\n assert n_classes % n_concurrent_classes == 0, (\n 'Number of total classes must be divisible by '\n 'number of concurrent classes')\n cmin = []\n cmax = []\n for i in range(int(n_classes / n_concurrent_classes)):\n for _ in range(n_concurrent_classes):\n cmin.append(i * n_concurrent_classes)\n cmax.append((i + 1) * n_concurrent_classes)\n\n filter_fn = lambda v: tf.logical_and(\n tf.greater_equal(v[label_key], cmin[c]), tf.less(\n v[label_key], cmax[c]))\n\n # Set up data sources/queues (one for each class).\n train_datasets = []\n train_iterators = []\n train_data = []\n\n full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)\n full_ds = full_ds.map(preprocess_data)\n for c in range(n_classes):\n filtered_ds = full_ds.filter(filter_fn).batch(\n batch_size, drop_remainder=True)\n train_datasets.append(filtered_ds)\n train_iterators.append(train_datasets[-1].make_one_shot_iterator())\n train_data.append(train_iterators[-1].get_next())\n\n else: # not sequential\n full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)\n full_ds = full_ds.map(preprocess_data)\n train_datasets = full_ds.batch(batch_size, drop_remainder=True)\n train_data = train_datasets.make_one_shot_iterator().get_next()\n\n # Set up data source to get full training set for classifier training\n full_ds = ds_train.repeat(1).shuffle(num_train_examples, seed=0)\n full_ds = full_ds.map(preprocess_data)\n train_datasets_for_classifier = full_ds.batch(\n test_batch_size, drop_remainder=True)\n train_iter_for_classifier = (\n train_datasets_for_classifier.make_initializable_iterator())\n train_data_for_classifier = train_iter_for_classifier.get_next()\n\n # Load validation dataset.\n try:\n valid_dataset = tfds.load(\n name=dataset, split=tfds.Split.VALIDATION, **dataset_kwargs)\n num_valid_examples = ds_info.splits[tfds.Split.VALIDATION].num_examples\n assert (num_valid_examples %\n test_batch_size == 0), ('test_batch_size must be a multiple of %d' %\n num_valid_examples)\n valid_dataset = valid_dataset.repeat(1).batch(\n test_batch_size, drop_remainder=True)\n valid_dataset = valid_dataset.map(preprocess_data)\n valid_iter = valid_dataset.make_initializable_iterator()\n valid_data = valid_iter.get_next()\n except KeyError:\n logging.warning('No validation set!!')\n valid_iter = None\n valid_data = None\n\n # Load test dataset.\n test_dataset = tfds.load(\n name=dataset, split=tfds.Split.TEST, **dataset_kwargs)\n num_test_examples = ds_info.splits['test'].num_examples\n assert (num_test_examples %\n test_batch_size == 0), ('test_batch_size must be a multiple of %d' %\n num_test_examples)\n test_dataset = test_dataset.repeat(1).batch(\n test_batch_size, drop_remainder=True)\n test_dataset = test_dataset.map(preprocess_data)\n test_iter = test_dataset.make_initializable_iterator()\n test_data = test_iter.get_next()\n logging.info('Loaded %s data', dataset)\n\n return DatasetTuple(train_data, train_iter_for_classifier,\n train_data_for_classifier, valid_iter, valid_data,\n test_iter, test_data, ds_info)\n\n\ndef setup_training_and_eval_graphs(x, label, y, n_y, curl_model,\n classify_with_samples, is_training, name):\n \"\"\"Set up the graph and return ops for training or evaluation.\n\n Args:\n x: tf placeholder for image.\n label: tf placeholder for ground truth label.\n y: tf placeholder for some self-supervised label/prediction.\n n_y: int, dimensionality of discrete latent variable y.\n curl_model: snt.AbstractModule representing the CURL model.\n classify_with_samples: bool, whether to *sample* latents for classification.\n is_training: bool, whether this graph is the training graph.\n name: str, graph name.\n\n Returns:\n A namedtuple with the required graph ops to perform training or evaluation.\n\n \"\"\"\n # kl_y_supervised is -log q(y=y_true | x)\n (log_p_x, kl_y, kl_z, log_p_x_supervised, kl_y_supervised,\n kl_z_supervised) = curl_model.log_prob_elbo_components(x, y)\n\n ll = log_p_x - kl_y - kl_z\n elbo = -tf.reduce_mean(ll)\n\n # Supervised loss, either for SMGR, or adaptation to supervised benchmark.\n ll_supervised = log_p_x_supervised - kl_y_supervised - kl_z_supervised\n elbo_supervised = -tf.reduce_mean(ll_supervised)\n\n # Summaries\n kl_y = tf.reduce_mean(kl_y)\n kl_z = tf.reduce_mean(kl_z)\n log_p_x_supervised = tf.reduce_mean(log_p_x_supervised)\n kl_y_supervised = tf.reduce_mean(kl_y_supervised)\n kl_z_supervised = tf.reduce_mean(kl_z_supervised)\n\n # Evaluation.\n hiddens = curl_model.get_shared_rep(x, is_training=is_training)\n cat = curl_model.infer_cluster(hiddens)\n cat_probs = cat.probs\n\n confusion = tf.confusion_matrix(label, tf.argmax(cat_probs, axis=1),\n num_classes=n_y, name=name + '_confusion')\n purity = (tf.reduce_sum(tf.reduce_max(confusion, axis=0))\n / tf.reduce_sum(confusion))\n\n if classify_with_samples:\n latents = curl_model.infer_latent(\n hiddens=hiddens, y=tf.to_float(cat.sample())).sample()\n else:\n latents = curl_model.infer_latent(\n hiddens=hiddens, y=tf.to_float(cat.mode())).mean()\n\n return MainOps(elbo, ll, log_p_x, kl_y, kl_z, elbo_supervised, ll_supervised,\n log_p_x_supervised, kl_y_supervised, kl_z_supervised,\n cat_probs, confusion, purity, latents)\n\n\ndef get_generated_data(sess, gen_op, y_input, gen_buffer_size,\n component_counts):\n \"\"\"Get generated model data (in place of saving a model snapshot).\n\n Args:\n sess: tf.Session.\n gen_op: tf op representing a batch of generated data.\n y_input: tf placeholder for which mixture components to generate from.\n gen_buffer_size: int, number of data points to generate.\n component_counts: np.array, prior probabilities over components.\n\n Returns:\n A tuple of two numpy arrays\n The generated data\n The corresponding labels\n \"\"\"\n\n batch_size, n_y = y_input.shape.as_list()\n\n # Sample based on the history of all components used.\n cluster_sample_probs = component_counts.astype(float)\n cluster_sample_probs = np.maximum(1e-12, cluster_sample_probs)\n cluster_sample_probs = cluster_sample_probs / np.sum(cluster_sample_probs)\n\n # Now generate the data based on the specified cluster prior.\n gen_buffer_images = []\n gen_buffer_labels = []\n for _ in range(gen_buffer_size):\n gen_label = np.random.choice(\n np.arange(n_y),\n size=(batch_size,),\n replace=True,\n p=cluster_sample_probs)\n y_gen_posterior_vals = np.zeros((batch_size, n_y))\n y_gen_posterior_vals[np.arange(batch_size), gen_label] = 1\n gen_image = sess.run(gen_op, feed_dict={y_input: y_gen_posterior_vals})\n gen_buffer_images.append(gen_image)\n gen_buffer_labels.append(gen_label)\n\n gen_buffer_images = np.vstack(gen_buffer_images)\n gen_buffer_labels = np.concatenate(gen_buffer_labels)\n\n return gen_buffer_images, gen_buffer_labels\n\n\ndef setup_dynamic_ops(n_y):\n \"\"\"Set up ops to move / copy mixture component weights for dynamic expansion.\n\n Args:\n n_y: int, dimensionality of discrete latent variable y.\n\n Returns:\n A dict containing all of the ops required for dynamic updating.\n\n \"\"\"\n # Set up graph ops to dynamically modify component params.\n graph = tf.get_default_graph()\n\n # 1) Ops to get and set latent encoder params (entire tensors)\n latent_enc_tensors = {}\n for k in range(n_y):\n latent_enc_tensors['latent_w_' + str(k)] = graph.get_tensor_by_name(\n 'latent_encoder/mlp_latent_encoder_{}/w:0'.format(k))\n latent_enc_tensors['latent_b_' + str(k)] = graph.get_tensor_by_name(\n 'latent_encoder/mlp_latent_encoder_{}/b:0'.format(k))\n\n latent_enc_assign_ops = {}\n latent_enc_phs = {}\n for key, tensor in latent_enc_tensors.items():\n latent_enc_phs[key] = tfc.placeholder(tensor.dtype, tensor.shape)\n latent_enc_assign_ops[key] = tf.assign(tensor, latent_enc_phs[key])\n\n # 2) Ops to get and set cluster encoder params (columns of a tensor)\n # We will be copying column ind_from to column ind_to.\n cluster_w = graph.get_tensor_by_name(\n 'cluster_encoder/mlp_cluster_encoder_final/w:0')\n cluster_b = graph.get_tensor_by_name(\n 'cluster_encoder/mlp_cluster_encoder_final/b:0')\n\n ind_from = tfc.placeholder(dtype=tf.int32)\n ind_to = tfc.placeholder(dtype=tf.int32)\n\n # Determine indices of cluster encoder weights and biases to be updated\n w_indices = tf.transpose(\n tf.stack([\n tf.range(cluster_w.shape[0], dtype=tf.int32),\n ind_to * tf.ones(shape=(cluster_w.shape[0],), dtype=tf.int32)\n ]))\n b_indices = ind_to\n # Determine updates themselves\n cluster_w_updates = tf.squeeze(\n tf.slice(cluster_w, begin=(0, ind_from), size=(cluster_w.shape[0], 1)))\n cluster_b_updates = cluster_b[ind_from]\n # Create update ops\n cluster_w_update_op = tf.scatter_nd_update(cluster_w, w_indices,\n cluster_w_updates)\n cluster_b_update_op = tf.scatter_update(cluster_b, b_indices,\n cluster_b_updates)\n\n # 3) Ops to get and set latent prior params (columns of a tensor)\n # We will be copying column ind_from to column ind_to.\n latent_prior_mu_w = graph.get_tensor_by_name(\n 'latent_decoder/latent_prior_mu/w:0')\n latent_prior_sigma_w = graph.get_tensor_by_name(\n 'latent_decoder/latent_prior_sigma/w:0')\n\n mu_indices = tf.transpose(\n tf.stack([\n ind_to * tf.ones(shape=(latent_prior_mu_w.shape[1],), dtype=tf.int32),\n tf.range(latent_prior_mu_w.shape[1], dtype=tf.int32)\n ]))\n mu_updates = tf.squeeze(\n tf.slice(\n latent_prior_mu_w,\n begin=(ind_from, 0),\n size=(1, latent_prior_mu_w.shape[1])))\n mu_update_op = tf.scatter_nd_update(latent_prior_mu_w, mu_indices, mu_updates)\n sigma_indices = tf.transpose(\n tf.stack([\n ind_to *\n tf.ones(shape=(latent_prior_sigma_w.shape[1],), dtype=tf.int32),\n tf.range(latent_prior_sigma_w.shape[1], dtype=tf.int32)\n ]))\n sigma_updates = tf.squeeze(\n tf.slice(\n latent_prior_sigma_w,\n begin=(ind_from, 0),\n size=(1, latent_prior_sigma_w.shape[1])))\n sigma_update_op = tf.scatter_nd_update(latent_prior_sigma_w, sigma_indices,\n sigma_updates)\n\n dynamic_ops = {\n 'ind_from_ph': ind_from,\n 'ind_to_ph': ind_to,\n 'latent_enc_tensors': latent_enc_tensors,\n 'latent_enc_assign_ops': latent_enc_assign_ops,\n 'latent_enc_phs': latent_enc_phs,\n 'cluster_w_update_op': cluster_w_update_op,\n 'cluster_b_update_op': cluster_b_update_op,\n 'mu_update_op': mu_update_op,\n 'sigma_update_op': sigma_update_op\n }\n\n return dynamic_ops\n\n\ndef copy_component_params(ind_from, ind_to, sess, ind_from_ph, ind_to_ph,\n latent_enc_tensors, latent_enc_assign_ops,\n latent_enc_phs,\n cluster_w_update_op, cluster_b_update_op,\n mu_update_op, sigma_update_op):\n \"\"\"Copy parameters from component i to component j.\n\n Args:\n ind_from: int, component index to copy from.\n ind_to: int, component index to copy to.\n sess: tf.Session.\n ind_from_ph: tf placeholder for component to copy from.\n ind_to_ph: tf placeholder for component to copy to.\n latent_enc_tensors: dict, tensors in the latent posterior encoder.\n latent_enc_assign_ops: dict, assignment ops for latent posterior encoder.\n latent_enc_phs: dict, placeholders for assignment ops.\n cluster_w_update_op: op for updating weights of cluster encoder.\n cluster_b_update_op: op for updating biased of cluster encoder.\n mu_update_op: op for updating mu weights of latent prior.\n sigma_update_op: op for updating sigma weights of latent prior.\n\n \"\"\"\n update_ops = []\n feed_dict = {}\n # Copy for latent encoder.\n new_w_val, new_b_val = sess.run([\n latent_enc_tensors['latent_w_' + str(ind_from)],\n latent_enc_tensors['latent_b_' + str(ind_from)]\n ])\n update_ops.extend([\n latent_enc_assign_ops['latent_w_' + str(ind_to)],\n latent_enc_assign_ops['latent_b_' + str(ind_to)]\n ])\n feed_dict.update({\n latent_enc_phs['latent_w_' + str(ind_to)]: new_w_val,\n latent_enc_phs['latent_b_' + str(ind_to)]: new_b_val\n })\n\n # Copy for cluster encoder softmax.\n update_ops.extend([cluster_w_update_op, cluster_b_update_op])\n feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})\n\n # Copy for latent prior.\n update_ops.extend([mu_update_op, sigma_update_op])\n feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})\n sess.run(update_ops, feed_dict)\n\n\ndef run_training(\n dataset,\n training_data_type,\n n_concurrent_classes,\n blend_classes,\n train_supervised,\n n_steps,\n random_seed,\n lr_init,\n lr_factor,\n lr_schedule,\n output_type,\n n_y,\n n_y_active,\n n_z,\n encoder_kwargs,\n decoder_kwargs,\n dynamic_expansion,\n ll_thresh,\n classify_with_samples,\n report_interval,\n knn_values,\n gen_replay_type,\n use_supervised_replay):\n \"\"\"Run training script.\n\n Args:\n dataset: str, name of the dataset.\n training_data_type: str, type of training run ('iid' or 'sequential').\n n_concurrent_classes: int, # of classes seen at a time (ignored for 'iid').\n blend_classes: bool, whether to blend in samples from the next class.\n train_supervised: bool, whether to use supervision during training.\n n_steps: int, number of total training steps.\n random_seed: int, seed for tf and numpy RNG.\n lr_init: float, initial learning rate.\n lr_factor: float, learning rate decay factor.\n lr_schedule: float, epochs at which the decay should be applied.\n output_type: str, output distribution (currently only 'bernoulli').\n n_y: int, maximum possible dimensionality of discrete latent variable y.\n n_y_active: int, starting dimensionality of discrete latent variable y.\n n_z: int, dimensionality of continuous latent variable z.\n encoder_kwargs: dict, parameters to specify encoder.\n decoder_kwargs: dict, parameters to specify decoder.\n dynamic_expansion: bool, whether to perform dynamic expansion.\n ll_thresh: float, log-likelihood threshold below which to keep poor samples.\n classify_with_samples: bool, whether to sample latents when classifying.\n report_interval: int, number of steps after which to evaluate and report.\n knn_values: list of ints, k values for different k-NN classifiers to run\n (values of 3, 5, and 10 were used in different parts of the paper).\n gen_replay_type: str, 'fixed', 'dynamic', or None.\n use_supervised_replay: str, whether to use supervised replay (aka 'SMGR').\n \"\"\"\n\n # Set tf random seed.\n tfc.set_random_seed(random_seed)\n np.set_printoptions(precision=2, suppress=True)\n\n # First set up the data source(s) and get dataset info.\n if dataset == 'mnist':\n batch_size = 100\n test_batch_size = 1000\n dataset_kwargs = {}\n image_key = 'image'\n label_key = 'label'\n elif dataset == 'omniglot':\n batch_size = 15\n test_batch_size = 8115\n dataset_kwargs = {'split': 'instance', 'label': 'alphabet'}\n image_key = 'image'\n label_key = 'alphabet'\n else:\n raise NotImplementedError\n\n dataset_ops = get_data_sources(dataset, dataset_kwargs, batch_size,\n test_batch_size, training_data_type,\n n_concurrent_classes, image_key, label_key)\n train_data = dataset_ops.train_data\n train_data_for_clf = dataset_ops.train_data_for_clf\n valid_data = dataset_ops.valid_data\n test_data = dataset_ops.test_data\n\n output_shape = dataset_ops.ds_info.features[image_key].shape\n n_x = np.prod(output_shape)\n n_classes = dataset_ops.ds_info.features[label_key].num_classes\n num_train_examples = dataset_ops.ds_info.splits['train'].num_examples\n\n # Check that the number of classes is compatible with the training scenario\n assert n_classes % n_concurrent_classes == 0\n assert n_steps % (n_classes / n_concurrent_classes) == 0\n\n # Set specific params depending on the type of gen replay\n if gen_replay_type == 'fixed':\n data_period = data_period = int(n_steps /\n (n_classes / n_concurrent_classes))\n gen_every_n = 2 # Blend in a gen replay batch every 2 steps\n gen_refresh_period = data_period # How often to refresh the batches of\n # generated data (equivalent to snapshotting a generative model)\n gen_refresh_on_expansion = False # Don't refresh on dyn expansion\n elif gen_replay_type == 'dynamic':\n gen_every_n = 2 # Blend in a gen replay batch every 2 steps\n gen_refresh_period = 1e8 # Never refresh generated data periodically\n gen_refresh_on_expansion = True # Refresh on dyn expansion instead\n elif gen_replay_type is None:\n gen_every_n = 0 # Don't use any gen replay batches\n gen_refresh_period = 1e8 # Never refresh generated data periodically\n gen_refresh_on_expansion = False # Don't refresh on dyn expansion\n else:\n raise NotImplementedError\n\n max_gen_batches = 5000 # Max num of gen batches (proxy for storing a model)\n\n # Set dynamic expansion parameters\n exp_wait_steps = 100 # Steps to wait after expansion before eligible again\n exp_burn_in = 100 # Steps to wait at start of learning before eligible\n exp_buffer_size = 100 # Size of the buffer of poorly explained data\n num_buffer_train_steps = 20 # Num steps to train component on buffer\n\n # Define a global tf variable for the number of active components.\n n_y_active_np = n_y_active\n n_y_active = tfc.get_variable(\n initializer=tf.constant(n_y_active_np, dtype=tf.int32),\n trainable=False,\n name='n_y_active',\n dtype=tf.int32)\n\n logging.info('Starting CURL script on %s data.', dataset)\n\n # Set up placeholders for training.\n\n x_train_raw = tfc.placeholder(\n dtype=tf.float32, shape=(batch_size,) + output_shape)\n label_train = tfc.placeholder(dtype=tf.int32, shape=(batch_size,))\n\n def binarize_fn(x):\n \"\"\"Binarize a Bernoulli by rounding the probabilities.\n\n Args:\n x: tf tensor, input image.\n\n Returns:\n A tf tensor with the binarized image\n \"\"\"\n return tf.cast(tf.greater(x, 0.5 * tf.ones_like(x)), tf.float32)\n\n if dataset == 'mnist':\n x_train = binarize_fn(x_train_raw)\n x_valid = binarize_fn(valid_data[image_key]) if valid_data else None\n x_test = binarize_fn(test_data[image_key])\n x_train_for_clf = binarize_fn(train_data_for_clf[image_key])\n elif 'cifar' in dataset or dataset == 'omniglot':\n x_train = x_train_raw\n x_valid = valid_data[image_key] if valid_data else None\n x_test = test_data[image_key]\n x_train_for_clf = train_data_for_clf[image_key]\n else:\n raise ValueError('Unknown dataset {}'.format(dataset))\n\n label_valid = valid_data[label_key] if valid_data else None\n label_test = test_data[label_key]\n\n # Set up CURL modules.\n shared_encoder = model.SharedEncoder(name='shared_encoder', **encoder_kwargs)\n latent_encoder = functools.partial(model.latent_encoder_fn, n_y=n_y, n_z=n_z)\n latent_encoder = snt.Module(latent_encoder, name='latent_encoder')\n latent_decoder = functools.partial(model.latent_decoder_fn, n_z=n_z)\n latent_decoder = snt.Module(latent_decoder, name='latent_decoder')\n cluster_encoder = functools.partial(\n model.cluster_encoder_fn, n_y_active=n_y_active, n_y=n_y)\n cluster_encoder = snt.Module(cluster_encoder, name='cluster_encoder')\n data_decoder = functools.partial(\n model.data_decoder_fn,\n output_type=output_type,\n output_shape=output_shape,\n n_x=n_x,\n n_y=n_y,\n **decoder_kwargs)\n data_decoder = snt.Module(data_decoder, name='data_decoder')\n\n # Uniform prior over y.\n prior_train_probs = utils.construct_prior_probs(batch_size, n_y, n_y_active)\n prior_train = snt.Module(\n lambda: tfp.distributions.OneHotCategorical(probs=prior_train_probs),\n name='prior_unconditional_train')\n prior_test_probs = utils.construct_prior_probs(test_batch_size, n_y,\n n_y_active)\n prior_test = snt.Module(\n lambda: tfp.distributions.OneHotCategorical(probs=prior_test_probs),\n name='prior_unconditional_test')\n\n model_train = model.Curl(\n prior_train,\n latent_decoder,\n data_decoder,\n shared_encoder,\n cluster_encoder,\n latent_encoder,\n n_y_active,\n is_training=True,\n name='curl_train')\n model_eval = model.Curl(\n prior_test,\n latent_decoder,\n data_decoder,\n shared_encoder,\n cluster_encoder,\n latent_encoder,\n n_y_active,\n is_training=False,\n name='curl_test')\n\n # Set up training graph\n y_train = label_train if train_supervised else None\n y_valid = label_valid if train_supervised else None\n y_test = label_test if train_supervised else None\n\n train_ops = setup_training_and_eval_graphs(\n x_train,\n label_train,\n y_train,\n n_y,\n model_train,\n classify_with_samples,\n is_training=True,\n name='train')\n\n hiddens_for_clf = model_eval.get_shared_rep(x_train_for_clf,\n is_training=False)\n cat_for_clf = model_eval.infer_cluster(hiddens_for_clf)\n\n if classify_with_samples:\n latents_for_clf = model_eval.infer_latent(\n hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.sample())).sample()\n else:\n latents_for_clf = model_eval.infer_latent(\n hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.mode())).mean()\n\n # Set up validation graph\n if valid_data is not None:\n valid_ops = setup_training_and_eval_graphs(\n x_valid,\n label_valid,\n y_valid,\n n_y,\n model_eval,\n classify_with_samples,\n is_training=False,\n name='valid')\n\n # Set up test graph\n test_ops = setup_training_and_eval_graphs(\n x_test,\n label_test,\n y_test,\n n_y,\n model_eval,\n classify_with_samples,\n is_training=False,\n name='test')\n\n # Set up optimizer (with scheduler).\n global_step = tf.train.get_or_create_global_step()\n lr_schedule = [\n tf.cast(el * num_train_examples / batch_size, tf.int64)\n for el in lr_schedule\n ]\n num_schedule_steps = tf.reduce_sum(\n tf.cast(global_step >= lr_schedule, tf.float32))\n lr = float(lr_init) * float(lr_factor)**num_schedule_steps\n optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = optimizer.minimize(train_ops.elbo)\n train_step_supervised = optimizer.minimize(train_ops.elbo_supervised)\n\n # Set up ops for generative replay\n if gen_every_n > 0:\n # How many generative batches will we use each period?\n gen_buffer_size = min(\n int(gen_refresh_period / gen_every_n), max_gen_batches)\n\n # Class each sample should be drawn from (default to uniform prior)\n y_gen = tfp.distributions.OneHotCategorical(\n probs=np.ones((batch_size, n_y)) / n_y,\n dtype=tf.float32,\n name='extra_train_classes').sample()\n\n gen_samples = model_train.sample(y=y_gen, mean=True)\n if dataset == 'mnist' or dataset == 'omniglot':\n gen_samples = binarize_fn(gen_samples)\n\n # Set up ops to dynamically modify parameters (for dynamic expansion)\n dynamic_ops = setup_dynamic_ops(n_y)\n\n logging.info('Created computation graph.')\n\n n_steps_per_class = n_steps / n_classes # pylint: disable=invalid-name\n\n cumulative_component_counts = np.array([0] * n_y).astype(float)\n recent_component_counts = np.array([0] * n_y).astype(float)\n\n gen_buffer_ind = 0\n\n # Buffer of poorly explained data (if we're doing dynamic expansion).\n poor_data_buffer = []\n poor_data_labels = []\n all_full_poor_data_buffers = []\n all_full_poor_data_labels = []\n has_expanded = False\n steps_since_expansion = 0\n gen_buffer_ind = 0\n eligible_for_expansion = False # Flag to ensure we wait a bit after expansion\n\n # Set up basic ops to run and quantities to log.\n ops_to_run = {\n 'train_ELBO': train_ops.elbo,\n 'train_log_p_x': train_ops.log_p_x,\n 'train_kl_y': train_ops.kl_y,\n 'train_kl_z': train_ops.kl_z,\n 'train_ll': train_ops.ll,\n 'train_batch_purity': train_ops.purity,\n 'train_probs': train_ops.cat_probs,\n 'n_y_active': n_y_active\n }\n if valid_data is not None:\n valid_ops_to_run = {\n 'valid_ELBO': valid_ops.elbo,\n 'valid_kl_y': valid_ops.kl_y,\n 'valid_kl_z': valid_ops.kl_z,\n 'valid_confusion': valid_ops.confusion\n }\n else:\n valid_ops_to_run = {}\n test_ops_to_run = {\n 'test_ELBO': test_ops.elbo,\n 'test_kl_y': test_ops.kl_y,\n 'test_kl_z': test_ops.kl_z,\n 'test_confusion': test_ops.confusion\n }\n to_log = ['train_batch_purity']\n to_log_eval = ['test_purity', 'test_ELBO', 'test_kl_y', 'test_kl_z']\n if valid_data is not None:\n to_log_eval += ['valid_ELBO', 'valid_purity']\n\n if train_supervised:\n # Track supervised losses, train on supervised loss.\n ops_to_run.update({\n 'train_ELBO_supervised': train_ops.elbo_supervised,\n 'train_log_p_x_supervised': train_ops.log_p_x_supervised,\n 'train_kl_y_supervised': train_ops.kl_y_supervised,\n 'train_kl_z_supervised': train_ops.kl_z_supervised,\n 'train_ll_supervised': train_ops.ll_supervised\n })\n default_train_step = train_step_supervised\n to_log += [\n 'train_ELBO_supervised', 'train_log_p_x_supervised',\n 'train_kl_y_supervised', 'train_kl_z_supervised'\n ]\n else:\n # Track unsupervised losses, train on unsupervised loss.\n ops_to_run.update({\n 'train_ELBO': train_ops.elbo,\n 'train_kl_y': train_ops.kl_y,\n 'train_kl_z': train_ops.kl_z,\n 'train_ll': train_ops.ll\n })\n default_train_step = train_step\n to_log += ['train_ELBO', 'train_kl_y', 'train_kl_z']\n\n with tf.train.SingularMonitoredSession() as sess:\n\n for step in range(n_steps):\n feed_dict = {}\n\n # Use the default training loss, but vary it each step depending on the\n # training scenario (eg. for supervised gen replay, we alternate losses)\n ops_to_run['train_step'] = default_train_step\n\n ### 1) PERIODICALLY TAKE SNAPSHOTS FOR GENERATIVE REPLAY ###\n if (gen_refresh_period and step % gen_refresh_period == 0 and\n gen_every_n > 0):\n\n # First, increment cumulative count and reset recent probs count.\n cumulative_component_counts += recent_component_counts\n recent_component_counts = np.zeros(n_y)\n\n # Generate enough samples for the rest of the next period\n # (Functionally equivalent to storing and sampling from the model).\n gen_buffer_images, gen_buffer_labels = get_generated_data(\n sess=sess,\n gen_op=gen_samples,\n y_input=y_gen,\n gen_buffer_size=gen_buffer_size,\n component_counts=cumulative_component_counts)\n\n ### 2) DECIDE WHICH DATA SOURCE TO USE (GENERATIVE OR REAL DATA) ###\n periodic_refresh_started = (\n gen_refresh_period and step >= gen_refresh_period)\n refresh_on_expansion_started = (gen_refresh_on_expansion and has_expanded)\n if ((periodic_refresh_started or refresh_on_expansion_started) and\n gen_every_n > 0 and step % gen_every_n == 1):\n # Use generated data for the training batch\n used_real_data = False\n\n s = gen_buffer_ind * batch_size\n e = (gen_buffer_ind + 1) * batch_size\n\n gen_data_array = {\n 'image': gen_buffer_images[s:e],\n 'label': gen_buffer_labels[s:e]\n }\n gen_buffer_ind = (gen_buffer_ind + 1) % gen_buffer_size\n\n # Feed it as x_train because it's already reshaped and binarized.\n feed_dict.update({\n x_train: gen_data_array['image'],\n label_train: gen_data_array['label']\n })\n\n if use_supervised_replay:\n # Convert label to one-hot before feeding in.\n gen_label_onehot = np.eye(n_y)[gen_data_array['label']]\n feed_dict.update({model_train.y_label: gen_label_onehot})\n ops_to_run['train_step'] = train_step_supervised\n\n else:\n # Else use the standard training data sources.\n used_real_data = True\n\n # Select appropriate data source for iid or sequential setup.\n if training_data_type == 'sequential':\n current_data_period = int(\n min(step / n_steps_per_class, len(train_data) - 1))\n\n # If training supervised, set n_y_active directly based on how many\n # classes have been seen\n if train_supervised:\n assert not dynamic_expansion\n n_y_active_np = n_concurrent_classes * (\n current_data_period // n_concurrent_classes +1)\n n_y_active.load(n_y_active_np, sess)\n\n train_data_array = sess.run(train_data[current_data_period])\n\n # If we are blending classes, figure out where we are in the data\n # period and add some fraction of other samples.\n if blend_classes:\n # If in the first quarter, blend in examples from the previous class\n if (step % n_steps_per_class < n_steps_per_class / 4 and\n current_data_period > 0):\n other_train_data_array = sess.run(\n train_data[current_data_period - 1])\n\n num_other = int(\n (n_steps_per_class / 2 - 2 *\n (step % n_steps_per_class)) * batch_size / n_steps_per_class)\n other_inds = np.random.permutation(batch_size)[:num_other]\n\n train_data_array[image_key][:num_other] = other_train_data_array[\n image_key][other_inds]\n train_data_array[label_key][:num_other] = other_train_data_array[\n label_key][other_inds]\n\n # If in the last quarter, blend in examples from the next class\n elif (step % n_steps_per_class > 3 * n_steps_per_class / 4 and\n current_data_period < n_classes - 1):\n other_train_data_array = sess.run(train_data[current_data_period +\n 1])\n\n num_other = int(\n (2 * (step % n_steps_per_class) - 3 * n_steps_per_class / 2) *\n batch_size / n_steps_per_class)\n other_inds = np.random.permutation(batch_size)[:num_other]\n\n train_data_array[image_key][:num_other] = other_train_data_array[\n image_key][other_inds]\n train_data_array['label'][:num_other] = other_train_data_array[\n label_key][other_inds]\n\n # Otherwise, just use the current class\n\n else:\n train_data_array = sess.run(train_data)\n\n feed_dict.update({\n x_train_raw: train_data_array[image_key],\n label_train: train_data_array[label_key]\n })\n\n ### 3) PERFORM A GRADIENT STEP ###\n results = sess.run(ops_to_run, feed_dict=feed_dict)\n del results['train_step']\n\n ### 4) COMPUTE ADDITIONAL DIAGNOSTIC OPS ON VALIDATION/TEST SETS. ###\n if (step+1) % report_interval == 0:\n if valid_data is not None:\n logging.info('Evaluating on validation and test set!')\n proc_ops = {\n k: (np.sum if 'confusion' in k\n else np.mean) for k in valid_ops_to_run\n }\n results.update(\n process_dataset(\n dataset_ops.valid_iter,\n valid_ops_to_run,\n sess,\n feed_dict=feed_dict,\n processing_ops=proc_ops))\n results['valid_purity'] = compute_purity(results['valid_confusion'])\n else:\n logging.info('Evaluating on test set!')\n proc_ops = {\n k: (np.sum if 'confusion' in k\n else np.mean) for k in test_ops_to_run\n }\n results.update(process_dataset(dataset_ops.test_iter,\n test_ops_to_run,\n sess,\n feed_dict=feed_dict,\n processing_ops=proc_ops))\n results['test_purity'] = compute_purity(results['test_confusion'])\n curr_to_log = to_log + to_log_eval\n else:\n curr_to_log = list(to_log) # copy to prevent in-place modifications\n\n ### 5) DYNAMIC EXPANSION ###\n if dynamic_expansion and used_real_data:\n # If we're doing dynamic expansion and below max capacity then add\n # poorly defined data points to a buffer.\n\n # First check whether the model is eligible for expansion (the model\n # becomes ineligible for a fixed time after each expansion, and when\n # it has hit max capacity).\n if (steps_since_expansion >= exp_wait_steps and step >= exp_burn_in and\n n_y_active_np < n_y):\n eligible_for_expansion = True\n\n steps_since_expansion += 1\n\n if eligible_for_expansion:\n # Add poorly explained data samples to a buffer.\n poor_inds = results['train_ll'] < ll_thresh\n poor_data_buffer.extend(feed_dict[x_train_raw][poor_inds])\n poor_data_labels.extend(feed_dict[label_train][poor_inds])\n\n n_poor_data = len(poor_data_buffer)\n\n # If buffer is big enough, then add a new component and train just the\n # new component with several steps of gradient descent.\n # (We just feed in a onehot cluster vector to indicate which\n # component).\n if n_poor_data >= exp_buffer_size:\n # Dump the buffers so we can log them.\n all_full_poor_data_buffers.append(poor_data_buffer)\n all_full_poor_data_labels.append(poor_data_labels)\n\n # Take a new generative snapshot if specified.\n if gen_refresh_on_expansion and gen_every_n > 0:\n # Increment cumulative count and reset recent probs count.\n cumulative_component_counts += recent_component_counts\n recent_component_counts = np.zeros(n_y)\n\n gen_buffer_images, gen_buffer_labels = get_generated_data(\n sess=sess,\n gen_op=gen_samples,\n y_input=y_gen,\n gen_buffer_size=gen_buffer_size,\n component_counts=cumulative_component_counts)\n\n # Cull to a multiple of batch_size (keep the later data samples).\n n_poor_batches = int(n_poor_data / batch_size)\n poor_data_buffer = poor_data_buffer[-(n_poor_batches * batch_size):]\n poor_data_labels = poor_data_labels[-(n_poor_batches * batch_size):]\n\n # Find most probable component (on poor batch).\n poor_cprobs = []\n for bs in range(n_poor_batches):\n poor_cprobs.append(\n sess.run(\n train_ops.cat_probs,\n feed_dict={\n x_train_raw:\n poor_data_buffer[bs * batch_size:(bs + 1) *\n batch_size]\n }))\n best_cluster = np.argmax(np.sum(np.vstack(poor_cprobs), axis=0))\n\n # Initialize parameters of the new component from most prob\n # existing.\n new_cluster = n_y_active_np\n\n copy_component_params(best_cluster, new_cluster, sess,\n **dynamic_ops)\n\n # Increment mixture component count n_y_active.\n n_y_active_np += 1\n n_y_active.load(n_y_active_np, sess)\n\n # Perform a number of steps of gradient descent on the data buffer,\n # training only the new component (supervised loss).\n for _ in range(num_buffer_train_steps):\n for bs in range(n_poor_batches):\n x_batch = poor_data_buffer[bs * batch_size:(bs + 1) *\n batch_size]\n label_batch = poor_data_labels[bs * batch_size:(bs + 1) *\n batch_size]\n label_onehot_batch = np.eye(n_y)[label_batch]\n _ = sess.run(\n train_step_supervised,\n feed_dict={\n x_train_raw: x_batch,\n model_train.y_label: label_onehot_batch\n })\n\n # Empty the buffer.\n poor_data_buffer = []\n poor_data_labels = []\n\n # Reset the threshold flag so we have a burn in before the next\n # component.\n eligible_for_expansion = False\n has_expanded = True\n steps_since_expansion = 0\n\n # Accumulate counts.\n if used_real_data:\n train_cat_probs_vals = results['train_probs']\n recent_component_counts += np.sum(\n train_cat_probs_vals, axis=0).astype(float)\n\n ### 6) LOGGING AND EVALUATION ###\n cleanup_for_print = lambda x: ', {}: %.{}f'.format(\n x.capitalize().replace('_', ' '), 3)\n log_str = 'Iteration %d'\n log_str += ''.join([cleanup_for_print(el) for el in curr_to_log])\n log_str += ' n_active: %d'\n logging.info(\n log_str,\n *([step] + [results[el] for el in curr_to_log] + [n_y_active_np]))\n\n # Periodically perform evaluation\n if (step + 1) % report_interval == 0:\n\n # Report test purity and related measures\n logging.info(\n 'Iteration %d, Test purity: %.3f, Test ELBO: %.3f, Test '\n 'KLy: %.3f, Test KLz: %.3f', step, results['test_purity'],\n results['test_ELBO'], results['test_kl_y'], results['test_kl_z'])\n # Flush data only once in a while to allow buffering of data for more\n # efficient writes.\n results['all_full_poor_data_buffers'] = all_full_poor_data_buffers\n results['all_full_poor_data_labels'] = all_full_poor_data_labels\n logging.info('Also training a classifier in latent space')\n\n # Perform knn classification from latents, to evaluate discriminability.\n\n # Get and encode training and test datasets.\n clf_train_vals = process_dataset(\n dataset_ops.train_iter_for_clf, {\n 'latents': latents_for_clf,\n 'labels': train_data_for_clf[label_key]\n },\n sess,\n feed_dict,\n aggregation_ops=np.concatenate)\n clf_test_vals = process_dataset(\n dataset_ops.test_iter, {\n 'latents': test_ops.latents,\n 'labels': test_data[label_key]\n },\n sess,\n aggregation_ops=np.concatenate)\n\n # Perform knn classification.\n knn_models = []\n for nval in knn_values:\n # Fit training dataset.\n clf = neighbors.KNeighborsClassifier(n_neighbors=nval)\n clf.fit(clf_train_vals['latents'], clf_train_vals['labels'])\n knn_models.append(clf)\n\n results['train_' + str(nval) + 'nn_acc'] = clf.score(\n clf_train_vals['latents'], clf_train_vals['labels'])\n\n # Get test performance.\n results['test_' + str(nval) + 'nn_acc'] = clf.score(\n clf_test_vals['latents'], clf_test_vals['labels'])\n\n logging.info(\n 'Iteration %d %d-NN classifier accuracies, Training: '\n '%.3f, Test: %.3f', step, nval,\n results['train_' + str(nval) + 'nn_acc'],\n results['test_' + str(nval) + 'nn_acc'])\n" ]
[ [ "tensorflow.scatter_update", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "numpy.concatenate", "numpy.max", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.scatter_nd_update", "tensorflow.get_collection", "numpy.arange", "numpy.eye", "tensorflow.train.get_or_create_global_step", "sklearn.neighbors.KNeighborsClassifier", "tensorflow.argmax", "numpy.zeros", "tensorflow.less", "numpy.array", "numpy.sum", "tensorflow.reduce_max", "numpy.maximum", "tensorflow.train.SingularMonitoredSession", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.slice", "tensorflow.range", "numpy.set_printoptions", "tensorflow.assign", "tensorflow.ones_like", "tensorflow.ones", "numpy.ones", "numpy.random.permutation", "numpy.prod", "tensorflow.image.convert_image_dtype", "tensorflow.greater_equal", "numpy.vstack" ] ]
noamsgl/RL201_HW4
[ "76bd3b173eaf07d3d4c47a3448ec6bdebbe7a7aa" ]
[ "main.py" ]
[ "\"\"\"\nNoam Siegel and Dolev Orgad,\nReinforcement Learning,\nBen Gurion University of the Negev, 2020\n\"\"\"\n\nfrom datetime import datetime\n\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.linalg import inv\nfrom tqdm import tqdm\n\n\"\"\"\nAssumes installation of tqdm:\npip install tqdm\n\nIf this is an issue, remove the tqdm wrapper in line #40\n\"\"\"\n\n\ndef features_vector(S):\n p, v = S\n x = abs(C_arr - np.asarray([p, v]))\n vec = np.zeros((len(x), 1))\n for i in range(len(x)):\n vec[i] = np.exp(np.dot(np.dot(-x[i].T, inv(covariance_matrix)), x[i]) / 2)\n return vec\n\n\ndef get_action(s, thetas, epsilon=0):\n if np.random.random() < epsilon:\n return np.random.randint(nA)\n else:\n action_probabilites = softmax(s, thetas)\n return np.random.choice(range(nA), p=action_probabilites.flatten())\n\n\ndef get_state_value(S, W):\n \"\"\"\n Return V(S)\n :param S: current state (p, v)\n :param W: Weights of state Value function (\n :return: state value\n \"\"\"\n F_arr = features_vector(S)\n return W.T @ F_arr\n\n\ndef softmax(s, theta):\n F_arr = features_vector(s)\n policy = np.exp(theta.T @ F_arr - max(theta.T @ F_arr))\n policy = policy / np.sum(policy)\n return policy\n\n\ndef get_expected_feature(S, policy):\n \"\"\"\n Return expected features based on policy derived from theta with softmax\n :param theta: vector of weights\n :return:\n \"\"\"\n F_arr = features_vector(S)\n return F_arr @ policy.T\n\n\ndef actor_critic(alpha_theta, alpha_w):\n \"\"\"\n Solves for the environment\n :param alpha_w: learning rate for w (state-value func)\n :param alpha_theta: learning rate for theta (policy)\n :return: learned parameters theta, W\n \"\"\"\n X, Y = [], []\n epsilon = 0.1\n\n W = np.ones((nC, 1))\n thetas = np.ones((nC, nA))\n\n\n S = env.reset()\n t_episode = 0\n I = 1\n for t in tqdm(range(int(20000)), desc=\"Actor-Critic Steps\"):\n env.render()\n epsilon = 0.99999 * epsilon\n\n phi = features_vector(S)\n policy = softmax(S, thetas)\n A = get_action(S, thetas, epsilon)\n S_tag, R, done, info = env.step(A)\n\n delta = R + gamma * get_state_value(S_tag, W) - get_state_value(S, W)\n W = W + alpha_w * delta * phi\n\n F_mat = np.zeros((nC, nA))\n F_mat[:, A] = features_vector(S).flatten()\n thetas = thetas + alpha_theta * I * delta * (F_mat - get_expected_feature(S, policy))\n\n I = gamma * I\n S = S_tag\n if done or t_episode >= max_episode_steps:\n S = env.reset()\n t_episode = 0\n\n if t != 0 and t % x_step_size == 0:\n value = policy_value(thetas)\n print(\"\\n***** Appending Data *****)\")\n print(\"X: {}\".format(t))\n print(\"Y: {}\".format(value))\n print()\n X.append(t)\n Y.append(value)\n\n return W, thetas, X, Y\n\n\ndef simulate_agent(thetas):\n S = env.reset()\n for t in range(max_episode_steps):\n env.render()\n A = get_action(S, thetas)\n S, reward, done, info = env.step(A)\n if done:\n env.render()\n break\n env.close()\n\n\ndef policy_value(thetas):\n num_episodes = 100\n returns = []\n for i in tqdm(range(num_episodes), desc=\"Evaluating Policy Episode\"):\n S = env.reset()\n t = 0\n discounted_rewards = []\n while True: # begin episode\n A = get_action(S, thetas)\n S, R, done, info = env.step(A)\n discounted_rewards.append((gamma ** t) * R)\n t += 1\n if done or t >= max_episode_steps:\n break\n episode_return = np.sum(discounted_rewards)\n returns.append(episode_return)\n return np.mean(returns)\n\n\ndef print_title(s):\n print()\n print(\"*\" * len(s))\n print(s)\n print(\"*\" * len(s))\n\n\nif __name__ == '__main__':\n # Initialization - GYM\n max_episode_steps = 500\n env = gym.make('MountainCar-v0')\n env._max_episode_steps = max_episode_steps\n nA = 3\n env.reset()\n\n # Initialization - Actor-Critic\n gamma = 1\n alpha_theta = 0.02\n alpha_W = 0.08\n x_step_size = 500\n sigma_p = 0.04\n sigma_v = 0.0004\n covariance_matrix = np.diag((sigma_p, sigma_v))\n\n # Initialization\n N_p = 8\n N_v = 8\n nC = N_p * N_v\n position_min, position_max = env.observation_space.low[0] * 0.9, env.observation_space.high[0] * 0.9\n velocity_min, velocity_max = env.observation_space.low[1] * 0.9, env.observation_space.high[1] * 0.9\n C_p = np.linspace(position_min, position_max, num=N_p, endpoint=True)\n C_v = np.linspace(velocity_min, velocity_max, num=N_v, endpoint=True)\n C_arr = np.transpose([np.tile(C_p, len(C_v)), np.repeat(C_v, len(C_p))])\n\n # 0 Render full episode with learned policy\n print_title(\"Simulating Learned Policy\")\n try:\n thetas = np.loadtxt(\"thetas.csv\")\n print(\"A weights file was found. Simulating:\")\n simulate_agent(thetas)\n except:\n print(\"A weights file was not found.\")\n\n # 1 Run actor-critic\n print_title(\"Starting actor-critic with env{}\".format(env.unwrapped.spec.id))\n W, thetas, X, Y = actor_critic(alpha_theta, alpha_W)\n\n # 2 Output/append graphics\n plt.figure(figsize=(12, 7))\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Estimated Policy Value\")\n plt.title(\"Estimated Policy Value\")\n plt.plot(X, Y)\n\n # 3 Show Graphics\n print_title(\"Showing Graphics\")\n plt.show()\n\n # 4. Save weights to file\n fname = \"weights_{}.csv\".format(datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\"))\n print_title(\"Saving weights to file {}\".format(fname))\n np.savetxt(fname, W)\n fname = \"thetas_{}.csv\".format(datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\"))\n print_title(\"Saving thetas to file {}\".format(fname))\n np.savetxt(fname, thetas)\n\n" ]
[ [ "numpy.diag", "numpy.random.random", "numpy.linspace", "matplotlib.pyplot.title", "numpy.asarray", "numpy.linalg.inv", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.mean", "numpy.random.randint", "numpy.savetxt", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
jjhenkel/nteract
[ "088222484b59af14b1da22de4d0990d8925adf95" ]
[ "applications/jupyter-extension/nteract_on_jupyter/notebooks/utils/cb/python/lowlevel.py" ]
[ "import json\nimport regex\nimport pickle\nimport os.path\nimport xxhash\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.dataset as ds \nimport pyarrow.parquet as pq \nimport pyarrow.gandiva as gd\n\nTYPE_TO_BYTES = [\n 'aliased_import',\n 'argument_list',\n 'assert_statement',\n 'assignment',\n 'attribute',\n 'augmented_assignment',\n 'await',\n 'binary_operator',\n 'block',\n 'boolean_operator',\n 'break_statement',\n 'call',\n 'chevron',\n 'class_definition',\n 'comment',\n 'comparison_operator',\n 'compound_statement',\n 'concatenated_string',\n 'conditional_expression',\n 'continue_statement',\n 'decorated_definition',\n 'decorator',\n 'default_parameter',\n 'delete_statement',\n 'dictionary',\n 'dictionary_comprehension',\n 'dictionary_splat',\n 'dictionary_splat_pattern',\n 'dotted_name',\n 'elif_clause',\n 'ellipsis',\n 'else_clause',\n 'escape_sequence',\n 'except_clause',\n 'exec_statement',\n 'expression',\n 'expression_list',\n 'expression_statement',\n 'f_alias',\n 'f_alternative',\n 'f_argument',\n 'f_arguments',\n 'f_attribute',\n 'f_body',\n 'f_cause',\n 'f_child',\n 'f_code',\n 'f_condition',\n 'f_consequence',\n 'f_definition',\n 'f_function',\n 'f_key',\n 'f_left',\n 'f_module_name',\n 'f_name',\n 'f_object',\n 'f_operator',\n 'f_parameters',\n 'f_return_type',\n 'f_right',\n 'f_subscript',\n 'f_superclasses',\n 'f_type',\n 'f_value',\n 'false',\n 'finally_clause',\n 'float',\n 'for_in_clause',\n 'for_statement',\n 'format_expression',\n 'format_specifier',\n 'function_definition',\n 'future_import_statement',\n 'generator_expression',\n 'global_statement',\n 'identifier',\n 'if_clause',\n 'if_statement',\n 'import_from_statement',\n 'import_prefix',\n 'import_statement',\n 'integer',\n 'interpolation',\n 'keyword_argument',\n 'lambda',\n 'lambda_parameters',\n 'list',\n 'list_comprehension',\n 'list_pattern',\n 'list_splat',\n 'list_splat_pattern',\n 'module',\n 'named_expression',\n 'none',\n 'nonlocal_statement',\n 'not_operator',\n 'pair',\n 'parameter',\n 'parameters',\n 'parenthesized_expression',\n 'parenthesized_list_splat',\n 'pass_statement',\n 'pattern',\n 'pattern_list',\n 'primary_expression',\n 'print_statement',\n 'raise_statement',\n 'relative_import',\n 'return_statement',\n 'set',\n 'set_comprehension',\n 'simple_statement',\n 'slice',\n 'string',\n 'subscript',\n 'true',\n 'try_statement',\n 'tuple',\n 'tuple_pattern',\n 'type',\n 'type_conversion',\n 'typed_default_parameter',\n 'typed_parameter',\n 'unary_operator',\n 'while_statement',\n 'wildcard_import',\n 'with_clause',\n 'with_item',\n 'with_statement',\n 'yield',\n]\n\n__F_CACHE = {}\n__DATA = None\n__T_CACHE = {}\n__M_CACHE = {}\n\ndef type_to_bytes(the_type):\n return TYPE_TO_BYTES.index(the_type).to_bytes(2, byteorder='little')\n\ndef type_to_idx(the_type):\n return TYPE_TO_BYTES.index(the_type)\n\nQUERY_LANG = r\"^((\\{(>|<)?=?\\d*\\})(\\^)?(\\\".*?\\\"=)?\\(?([a-z_0-9]+)?\\)?(\\.f_[a-z_0-9]+)?(\\[\\d+\\])?)+$\"\nSUB_QUERY_LANG = r\"(\\{(>|<)?=?\\d*\\})(\\^)?(\\\".*?\\\"=)?\\(?([a-z_0-9]+)?\\)?(\\.f_[a-z_0-9]+)?(\\[\\d+\\])?\"\nQUERY_REGEX = regex.compile(QUERY_LANG)\nSUB_QUERY_REGEX = regex.compile(SUB_QUERY_LANG)\n\ndef get_text_fragement(fid, start, end, debug=False):\n if debug:\n return 'get_file({}@{}:{})'.format(fid, start, end), 0, 0, 0, 0\n try:\n if fid not in __F_CACHE:\n with open('/data/raw-files/{}.txt'.format(fid), 'rb') as fh:\n __F_CACHE[fid] = fh.read().decode('utf-8')\n text = __F_CACHE[fid][start:end]\n last_newline = __F_CACHE[fid].rindex('\\n', 0, start)\n start_col = start - last_newline\n start_line = __F_CACHE[fid].count('\\n', 0, start)\n last_newline = __F_CACHE[fid].rindex('\\n', 0, end)\n end_col = end - last_newline\n end_line = __F_CACHE[fid].count('\\n', 0, end)\n return text, start_line + 1, start_col, end_line + 1, end_col\n except Exception as ex:\n return str(ex) + '\\ncant find {}@{}:{}'.format(fid, start, end), 0, 0, 0, 0\n\n\ndef decode_op_dist(dist):\n if dist is None or len(dist.replace('{', '').replace('}', '')) <= 0:\n return 0, int(0)\n \n dist = dist[1:-1]\n if dist[0] == '=':\n return 5, int(dist[1:])\n elif dist[:2] == '<=':\n return 2, int(dist[2:])\n elif dist[:2] == '>=':\n return 4, int(dist[2:])\n elif dist[0] == '<':\n return 1, int(dist[1:])\n elif dist[0] == '>':\n return 3, int(dist[1:])\n else:\n return 0, int(dist)\n\n\ndef parse_query(query_string, type_to_idx, debug=False):\n builder = gd.TreeExprBuilder()\n\n if query_string[0] != '{':\n query_string = '{}' + query_string\n\n match = regex.match(QUERY_REGEX, query_string, version=regex.V1)\n\n target_func = 'match_tree_path_{}_'.format(len(match.captures(1)))\n generic_func = target_func + 'generic'\n\n params = []\n generic_params_pre = []\n generic_params_post = []\n\n first_label = '?'\n\n for i, sub_string in enumerate(match.captures(1)):\n sub_match = regex.match(SUB_QUERY_REGEX, sub_string, version=regex.V1)\n steps, _, negate, name, label, field, index = sub_match.groups()\n\n if first_label == '?':\n first_label = label\n\n negate = negate == '^'\n\n match_name = name is not None\n name = name[1:-2] if match_name else None\n \n match_label = label is not None\n label = type_to_idx(label) if match_label else 0\n\n match_field = field is not None\n field = type_to_idx(field[1:]) if match_field else 0\n\n match_index = index is not None\n index = int(index[1:-1]) if match_index else 0\n \n steps_op, steps_dist = decode_op_dist(steps)\n\n target_func += ('1' if negate else '0')\n generic_params_pre.append(builder.make_literal(negate, pa.bool_()))\n target_func += ('1' if match_label else '0')\n generic_params_pre.append(builder.make_literal(match_label, pa.bool_()))\n target_func += ('1' if match_name else '0')\n generic_params_pre.append(builder.make_literal(match_name, pa.bool_()))\n target_func += ('1' if match_field else '0')\n generic_params_pre.append(builder.make_literal(match_field, pa.bool_()))\n target_func += ('1' if match_index else '0')\n generic_params_pre.append(builder.make_literal(match_index, pa.bool_()))\n \n if match_label:\n params.append(builder.make_literal(label, pa.uint16()))\n generic_params_post.append(builder.make_literal(label, pa.uint16()))\n else:\n generic_params_post.append(builder.make_literal(0, pa.uint16()))\n\n\n if match_name:\n as_hash = int.from_bytes(\n xxhash.xxh64(name, seed=3235823838).intdigest().to_bytes(8, byteorder='little'),\n signed=True, byteorder=\"little\"\n )\n params.append(builder.make_literal(as_hash, pa.int64()))\n generic_params_post.append(builder.make_literal(as_hash, pa.int64()))\n else:\n generic_params_post.append(builder.make_literal(0, pa.int64()))\n \n if match_field:\n params.append(builder.make_literal(field, pa.uint16()))\n generic_params_post.append(builder.make_literal(field, pa.uint16()))\n else:\n generic_params_post.append(builder.make_literal(0, pa.uint16()))\n\n if match_index:\n params.append(builder.make_literal(index, pa.uint16()))\n generic_params_post.append(builder.make_literal(index, pa.uint16()))\n else:\n generic_params_post.append(builder.make_literal(0, pa.uint16()))\n \n if steps_op == 5:\n target_func += '3'\n params.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_post.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_pre.append(builder.make_literal(3, pa.uint16()))\n elif steps_op == 4:\n target_func += '2'\n params.append(builder.make_literal(steps_dist - 1, pa.uint16()))\n generic_params_post.append(builder.make_literal(steps_dist - 1, pa.uint16()))\n generic_params_pre.append(builder.make_literal(2, pa.uint16()))\n elif steps_op == 3:\n target_func += '2'\n params.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_post.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_pre.append(builder.make_literal(2, pa.uint16()))\n elif steps_op == 2:\n target_func += '1'\n params.append(builder.make_literal(steps_dist + 1, pa.uint16()))\n generic_params_post.append(builder.make_literal(steps_dist + 1, pa.uint16()))\n generic_params_pre.append(builder.make_literal(1, pa.uint16()))\n elif steps_op == 1:\n target_func += '1'\n params.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_post.append(builder.make_literal(steps_dist, pa.uint16()))\n generic_params_pre.append(builder.make_literal(1, pa.uint16()))\n else:\n target_func += '0'\n generic_params_post.append(builder.make_literal(0, pa.uint16()))\n generic_params_pre.append(builder.make_literal(0, pa.uint16()))\n\n target_func += '_'\n \n target_func = target_func[:-1]\n if debug:\n print(first_label, target_func, params)\n\n\n generic_params = (generic_params_pre, generic_params_post)\n return first_label, generic_func, target_func, generic_params, params, builder\n\n\ndef get_text_from_capture(res, cidx):\n offset = 32 + (cidx - 1) * 40\n return get_text_fragement(\n int.from_bytes(res[0:8], signed=True, byteorder=\"little\"),\n int.from_bytes(res[offset+0:offset+4], signed=False, byteorder=\"little\"),\n int.from_bytes(res[offset+4:offset+8], signed=False, byteorder=\"little\")\n )\n\ndef get_texts_from_capture(res, cidx, node_type, limit=None):\n offset = 32 + (cidx - 1) * 40\n gid_offset = 8 + (cidx - 1) * 40\n out = []\n for r in res[:limit] if limit is not None else res:\n gid = str(int.from_bytes(r[gid_offset:gid_offset+8], signed=True, byteorder=\"little\"))\n fid = str(int.from_bytes(r[0:8], signed=True, byteorder=\"little\"))\n sidx = int.from_bytes(r[offset+0:offset+4], signed=False, byteorder=\"little\")\n eidx = int.from_bytes(r[offset+4:offset+8], signed=False, byteorder=\"little\")\n text, sl, sc, el, ec = get_text_fragement(fid, sidx, eidx)\n out.append({\n 'gid': gid,\n 'fid': fid,\n 's_line': sl,\n 's_col': sc,\n 'e_line': el,\n 'e_col': ec,\n 'text': text,\n 'type': node_type,\n 'project': __M_CACHE[fid][0] if fid in __M_CACHE else '???',\n 'version': __M_CACHE[fid][1] if fid in __M_CACHE else '???',\n 'file_path': __M_CACHE[fid][2] if fid in __M_CACHE else '???',\n })\n return out\n\n\ndef query_python(query_string, extra=\"file_id\", name_is=None, name_regex=None):\n global __DATA, __T_CACHE\n \n root_type, generic_func, target_func, generic_params, params, builder = parse_query(\n query_string, type_to_idx\n )\n\n as_table = None\n proj = None\n\n if __DATA is None:\n __DATA = ds.dataset('/data/parquet', format='parquet', partitioning='hive')\n\n if root_type not in __T_CACHE:\n the_filter = ds.field('type') == root_type\n\n extra_cols = [extra, 'gid', 'project', 'version', 'file_path']\n if name_is is not None:\n extra_cols.append('name')\n the_filter = the_filter & (ds.field('name') == name_is)\n elif name_regex is not None:\n print('Regex name filter not yet supported')\n\n __T_CACHE[root_type] = __DATA.to_table(\n columns=['path'] + extra_cols,\n filter=the_filter\n )\n \n \n as_table = __T_CACHE[root_type]\n \n params = [\n builder.make_field(as_table.schema.field(extra)),\n builder.make_field(as_table.schema.field('path'))\n ] + params\n generic_params = generic_params[0] + [\n builder.make_field(as_table.schema.field(extra)),\n builder.make_field(as_table.schema.field('path'))\n ] + generic_params[1]\n\n proj = None\n\n try:\n proj = gd.make_projector(as_table.schema, [\n builder.make_expression(\n builder.make_function(target_func, params, pa.binary()),\n pa.field(\"result\", pa.binary())\n )\n ], pa.default_memory_pool())\n except:\n proj = gd.make_projector(as_table.schema, [\n builder.make_expression(\n builder.make_function(generic_func, generic_params, pa.binary()),\n pa.field(\"result\", pa.binary())\n )\n ], pa.default_memory_pool())\n\n total = []\n for record_batch in as_table.to_batches():\n res, = proj.evaluate(record_batch)\n for i, row in res.to_pandas().items():\n if row == b'':\n continue\n total.append(row)\n if str(record_batch['file_id'][i]) not in __M_CACHE:\n __M_CACHE[str(record_batch['file_id'][i])] = (\n str(record_batch['project'][i]),\n str(record_batch['version'][i]),\n str(record_batch['file_path'][i])\n )\n \n if len(total) > 0:\n return pd.Series(total)\n else:\n return pd.Series([ b'' ])\n\n\ndef merge_paths(series_l, series_r, on):\n on_l, on_r = on\n\n frame_l = series_l.copy()\n if not isinstance(series_l, pd.DataFrame):\n frame_l = series_l.copy().to_frame(name=\"dat\")\n\n frame_r = series_r.copy()\n if not isinstance(series_r, pd.DataFrame):\n frame_r = series_r.copy().to_frame(name=\"dat\")\n\n target_l = None\n if on_l.startswith('left.') or on_l.startswith('right.'):\n parts = on_l.split('.')[:-2][::-1]\n the_ref = 'dat_' + '_'.join([ part[0] for part in parts ])\n target_l = frame_l[the_ref]\n on_l = '.'.join(on_l.split('.')[-2:])\n else:\n target_l = frame_l.dat\n\n target_r = None\n if on_r.startswith('left.') or on_r.startswith('right.'):\n parts = on_r.split('.')[:-2][::-1]\n the_ref = 'dat_' + '_'.join([ part[0] for part in parts ])\n target_r = frame_r[the_ref]\n on_r = '.'.join(on_r.split('.')[-2:])\n else:\n target_r = frame_r.dat\n\n if on_l.startswith('defs.'):\n cindex = int(on_l.replace('defs.', '')) - 1\n frame_l['key'] = target_l.str[16+40*cindex:24+40*cindex]\n elif on_l.startswith('gids.'):\n cindex = int(on_l.replace('gids.', '')) - 1\n frame_l['key'] = target_l.str[8+40*cindex:16+40*cindex]\n\n if on_r.startswith('defs.'):\n cindex = int(on_r.replace('defs.', '')) - 1\n frame_r['key'] = target_r.str[16+40*cindex:24+40*cindex]\n elif on_r.startswith('gids.'):\n cindex = int(on_r.replace('gids.', '')) - 1\n frame_r['key'] = target_r.str[8+40*cindex:16+40*cindex]\n \n frame_l.columns = frame_l.columns.map(lambda x: str(x) + '_l')\n frame_r.columns = frame_r.columns.map(lambda x: str(x) + '_r')\n\n return frame_l.merge(\n frame_r,\n how=\"inner\",\n left_on=\"key_l\",\n right_on=\"key_r\"\n )\n\n\ndef get_results(result_set, labels):\n if len(result_set) == 1 and 0 in result_set and result_set[0] == b'':\n return { }\n\n def _get_all_labels(cur):\n if isinstance(cur, list):\n res = []\n for i, l in enumerate(cur):\n if l[0] is not None:\n res.append(('dat', i + 1, l)) \n return res\n \n return list(map(\n lambda x: (x[0] + '_l', x[1], x[2]),\n _get_all_labels(cur['left'])\n )) + list(map(\n lambda x: (x[0] + '_r', x[1], x[2]),\n _get_all_labels(cur['right'])\n ))\n \n results_map = { }\n for path, idx, (label, node_type) in _get_all_labels(labels):\n if path == 'dat':\n results_map[label] = get_texts_from_capture(result_set, idx, node_type)\n else:\n results_map[label] = get_texts_from_capture(result_set[path], idx, node_type)\n \n return results_map\n\n\ndef display_results(results, limit=10, just_text=False):\n if not just_text:\n from IPython import display\n display.display({\n 'application/code-book-matches+json': { 'results': results, 'lang': 'python' }\n }, raw=True)\n return\n\n for rs, results_map in enumerate(results):\n if '$match' not in results_map:\n continue\n other_keys = sorted([ k for k in results_map.keys() if k != '$match' ])\n for i, val in enumerate(results_map['$match']):\n if i > limit:\n print('Over {} matches. Stopping early.'.format(limit))\n break\n print('Match (RS#{}):\\n```\\n{}\\n```'.format(\n rs + 1, val['text']\n ))\n for j, k in enumerate(other_keys):\n print(' ' * j + '└─ {}: ```\\n{}{}\\n{}```'.format(\n k,\n ' ' * (j+3),\n results_map[k][i]['text'].replace('\\n', '\\n' + ' ' * (j+3)),\n ' ' * (j+3)\n ))\n \n" ]
[ [ "pandas.Series" ] ]
flying-sheep/pyRserve
[ "7bc53fe3ebda026df3e5ed27484fb9fbeabddcff" ]
[ "pyRserve/taggedContainers.py" ]
[ "\"\"\"\nSome specialized list and array classes to store results obtained from R. These\nclasses provide means not to only access object items by index but also - sort\nof like a dictionary - by key. However keys must not be unique or can even be\nNone. In those cases only the first item with that key is found.\n\nAvailable classes:\n- TaggedList\n- TaggedArray\n\"\"\"\nimport numpy\n\n\nclass TaggedList(object):\n # This code is mainly based on UserList.UserList and modified for tags\n \"\"\"\n A tagged list is useful for additionally addressing individual items by\n name instead of only by index. In contrast to dictionaries multiple items\n can have the same name or key. However only the first one will be found.\n\n In many cases a TaggedList behaves like a normal list, however for lazyness\n reasons of the programmer not all methods are implemented yet.\n\n Example:\n l = TaggedList( [('v1', 1), ('v2', 2), 3, ('v2', 4)] )\n l[0] # returns 1\n l['v1'] # returns 1\n l['v2'] # returns 2 (not 4 !)\n l[3] # returns 4\n\n Data can be appended or inserted in the following way:\n l.insert(0, x=3)\n l['x'] # returns 3\n l[0] # also returns 3\n\n l.append(y=3)\n l[-1] # returns 3\n \"\"\"\n def __init__(self, initlist=[]):\n \"\"\"\n Items in initlist can either be\n - tuples of (key,values)\n - or plain values\n Keys can be None or empty strings in item tuples.\n \"\"\"\n self.values = []\n self.keys = []\n for idx, item in enumerate(initlist):\n try:\n key, value = item\n key = None if key == '' else key\n except (TypeError, ValueError):\n value = item\n key = None\n finally:\n self.values.append(value)\n self.keys.append(key)\n\n def astuples(self):\n \"\"\"\n Convert a TaggedList into a representation suitable to be provided\n to __init__()\n \"\"\"\n return list(zip(self.keys, self.values))\n\n def __repr__(self):\n data = [\"%s=%s\" % (key, repr(value)) if key else \"'%s'\" % value\n for key, value in self.astuples()]\n return '<TaggedList(%s)>' % ', '.join(data)\n\n # def __lt__(self, other): return self.values < self.__cast(other)\n # def __le__(self, other): return self.values <= self.__cast(other)\n # def __eq__(self, other): return self.values == self.__cast(other)\n # def __ne__(self, other): return self.values != self.__cast(other)\n # def __gt__(self, other): return self.values > self.__cast(other)\n # def __ge__(self, other): return self.values >= self.__cast(other)\n # def __cast(self, other):\n # if isinstance(other, UserList): return other.data\n # else: return other\n # def __cmp__(self, other):\n # return cmp(self.values, self.__cast(other))\n __hash__ = None # Mutable sequence, so not hashable\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __contains__(self, item):\n return item in self.values\n\n def __len__(self):\n return len(self.values)\n\n def __getitem__(self, i):\n if type(i) == str:\n i = self.keys.index(i)\n return self.values[i]\n\n def __setitem__(self, i, item):\n if type(i) == str:\n i = self.keys.index[i]\n self.values[i] = item\n\n def __delitem__(self, i):\n if type(i) == str:\n i = self.keys.index[i]\n del self.keys[i]\n del self.values[i]\n\n def __getslice__(self, i, j):\n i = max(i, 0)\n j = max(j, 0)\n return self.__class__(self.astuples()[i:j])\n\n # def __setslice__(self, i, j, other):\n # i = max(i, 0); j = max(j, 0)\n # if isinstance(other, UserList):\n # self.values[i:j] = other.data\n # elif isinstance(other, type(self.values)):\n # self.values[i:j] = other\n # else:\n # self.values[i:j] = list(other)\n\n def __delslice__(self, i, j):\n raise NotImplementedError()\n # i = max(i, 0); j = max(j, 0)\n # del self.values[i:j]\n # del self.keys[i:j]\n\n def __add__(self, other):\n raise NotImplementedError()\n # if isinstance(other, UserList):\n # return self.__class__(self.values + other.data)\n # elif isinstance(other, type(self.values)):\n # return self.__class__(self.values + other)\n # else:\n # return self.__class__(self.values + list(other))\n\n def __radd__(self, other):\n raise NotImplementedError()\n # if isinstance(other, UserList):\n # return self.__class__(other.data + self.values)\n # elif isinstance(other, type(self.values)):\n # return self.__class__(other + self.values)\n # return self.__class__(list(other) + self.values)\n\n def __iadd__(self, other):\n raise NotImplementedError()\n # if isinstance(other, UserList):\n # self.values += other.data\n # elif isinstance(other, type(self.values)):\n # self.values += other\n # else:\n # self.values += list(other)\n # return self\n\n def __mul__(self, n):\n raise NotImplementedError()\n # return self.__class__(self.values*n)\n __rmul__ = __mul__\n\n def __imul__(self, n):\n raise NotImplementedError()\n # self.values *= n\n # return self\n\n def append(self, *value, **key_and_value):\n \"\"\"\n Append an item to the list, either given as plain value or as a\n keyword-arg pair.\n Example:\n taggedlist.append(4)\n or\n taggedlist.append(k=4)\n \"\"\"\n if len(value) == 1 and not key_and_value:\n key = None\n value = value[0]\n elif len(key_and_value) == 1 and not value:\n [(key, value)] = key_and_value.items()\n else:\n raise ValueError(\"Only either one single value or one single pair \"\n \"of key/value is allowed\")\n self.values.append(value)\n self.keys.append(key)\n\n def insert(self, i, *value, **key_and_value):\n \"\"\"\n Insert an item in the list at position i, either given as plain value\n or as a keyword-arg pair.\n Example:\n taggedlist.insert(4, 'abc)\n or\n taggedlist.append(4, k='abc')\n \"\"\"\n if len(value) == 1 and not key_and_value:\n key = None\n value = value[0]\n elif len(key_and_value) == 1 and not value:\n [(key, value)] = key_and_value.items()\n else:\n raise ValueError(\"Only either one single value or one single pair \"\n \"of key/value is allowed\")\n self.values.insert(i, value)\n self.keys.insert(i, key)\n\n def pop(self, i=-1):\n \"\"\"\n Remove an item from the list. By default the last item will be removed.\n If an item at a specific position should be removed, pass an additional\n index arguemnt.\n \"\"\"\n return self.values.pop(i)\n\n def remove(self, item):\n raise NotImplementedError()\n # self.values.remove(item)\n\n def count(self, item):\n return self.values.count(item)\n\n def index(self, item, *args):\n return self.values.index(item, *args)\n\n def reverse(self):\n self.values.reverse()\n self.keys.reverse()\n\n def sort(self, *args, **kwds):\n raise NotImplementedError()\n # self.values.sort(*args, **kwds)\n\n def extend(self, other):\n raise NotImplementedError()\n # if isinstance(other, UserList):\n # self.values.extend(other.data)\n # else:\n # self.values.extend(other)\n\n\nclass AttrArray(numpy.ndarray):\n \"\"\"\n numpy.ndarray with additional \"attr\"-container.\n Used as base class for TaggedArray.\n \"\"\"\n attr = None\n\n def __repr__(self):\n r = super(AttrArray, self).__repr__()\n if hasattr(self, 'attr'):\n return r[:-1] + ', attr=' + repr(self.attr) + ')'\n return r\n\n @classmethod\n def new(cls, data, attr):\n \"\"\"\n Factory method to create AttrArray objects from ndarrays or Python\n lists.\n Usage:\n AttrArray.new(array([1, 2, 3, 4]), {'attr1': val1, 'attr2': val2})\n \"\"\"\n if not isinstance(data, numpy.ndarray):\n # assume it is a Python list or any other valid data type\n # for arrays\n arr = numpy.array(data)\n else:\n arr = data\n\n attrArr = arr.view(cls)\n attrArr.attr = attr\n return attrArr\n\n\ndef asAttrArray(data, attr):\n return AttrArray.new(data, attr)\n\n\nclass TaggedArray(AttrArray):\n \"\"\"\n A tagged array is useful for additionally addressing individual items by\n name instead of only by index. In contrast to dictionaries multiple items\n can have the same name or key. However only the first one will be found.\n\n In many cases a TaggedArray behaves like a normal array and is the\n equivalent for TaggedList.\n This class is basically only useful to translate results created by R into\n something useful in Python.\n\n Instances of TaggedArray should only be created using the factory function\n 'asTaggedArray([values)], [tags])', where 'values' and 'tags' can be plain\n python lists or numpy-arrays.\n\n Example:\n l = asTaggedArray(array([1, 2, 3, 4]), ['v1', 'v2', 'v3', 'v4'])\n l[0] # returns 1\n l['v1'] # returns 1\n l['v2'] # returns 2 (not 4 !)\n l[3] # returns 4\n\n It is recommended not to do lots of manipulations that modify the\n structure of the arrary. This could lead to mismatched btw. tags and\n values (those are only very loosely coupled internally). However any type\n of mathematics like multiplying the array should be possible without\n problems.\n \"\"\"\n attr = []\n\n def __repr__(self):\n r = super(AttrArray, self).__repr__()\n if hasattr(self, 'attr'):\n return r[:-1] + ', key=' + repr(self.attr) + ')'\n return r\n\n def __getitem__(self, idx_or_name):\n try:\n return numpy.ndarray.__getitem__(self, idx_or_name)\n except:\n pass\n try:\n return numpy.ndarray.__getitem__(self,\n self.attr.index(idx_or_name))\n except ValueError:\n raise KeyError('No key \"%s\" available for array' % idx_or_name)\n\n def keys(self):\n return self.attr[:]\n\n @classmethod\n def new(cls, data, tags):\n \"\"\"\n Factory method to create TaggedArray objects from ndarrays or Python\n lists.\n Check the docs in TaggedArray for more information.\n Usage:\n l = TaggedArray.new(array([1, 2, 3, 4]), ['v1', 'v2', 'v3', 'v4'])\n l[0] # returns 1\n l['v1'] # returns 1\n l['v2'] # returns 2 (not 4 !)\n l[3] # returns 4\n \"\"\"\n if len(tags) != len(data):\n raise ValueError('Number of keys must match size of array')\n if not isinstance(data, numpy.ndarray):\n # assume it is a Python list or any other valid data type\n # for arrays\n arr = numpy.array(data)\n else:\n arr = data\n\n taggedArr = arr.view(cls)\n taggedArr.attr = tags\n return taggedArr\n\n\ndef asTaggedArray(data, tags):\n return TaggedArray.new(data, tags)\n" ]
[ [ "numpy.array", "numpy.ndarray.__getitem__" ] ]
satyenataagyaa/drl-portfolio-management
[ "7fccc39bd3c19b6ea5d6511444e7c4bb60a49a8b" ]
[ "src/model/ddpg/critic.py" ]
[ "\"\"\"\nCritic Network definition, the input is (o, a_{t-1}, a_t) since (o, a_{t-1}) is the state.\nBasically, it evaluates the value of (current action, previous action and observation) pair\n\"\"\"\n\nimport tensorflow as tf\nimport tflearn\n\n\nclass CriticNetwork(object):\n \"\"\"\n Input to the network is the state and action, output is Q(s,a).\n The action must be obtained from the output of the Actor network.\n \"\"\"\n\n def __init__(self, sess, state_dim, action_dim, learning_rate, tau, num_actor_vars):\n self.sess = sess\n assert isinstance(state_dim, list), 'state_dim must be a list.'\n self.s_dim = state_dim\n assert isinstance(action_dim, list), 'action_dim must be a list.'\n self.a_dim = action_dim\n self.learning_rate = learning_rate\n self.tau = tau\n\n # Create the critic network\n self.inputs, self.action, self.out = self.create_critic_network()\n\n self.network_params = tf.trainable_variables()[num_actor_vars:]\n\n # Target Network\n self.target_inputs, self.target_action, self.target_out = self.create_critic_network()\n\n self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]\n\n # Op for periodically updating target network with online network\n # weights with regularization\n self.update_target_network_params = \\\n [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \\\n + tf.multiply(self.target_network_params[i], 1. - self.tau))\n for i in range(len(self.target_network_params))]\n\n # Network target (y_i)\n self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])\n\n # Define loss and optimization Op\n self.loss = tflearn.mean_square(self.predicted_q_value, self.out)\n self.optimize = tf.train.AdamOptimizer(\n self.learning_rate).minimize(self.loss)\n\n # Get the gradient of the net w.r.t. the action.\n # For each action in the minibatch (i.e., for each x in xs),\n # this will sum up the gradients of each critic output in the minibatch\n # w.r.t. that action. Each output is independent of all\n # actions except for one.\n self.action_grads = tf.gradients(self.out, self.action)\n\n def create_critic_network(self):\n raise NotImplementedError('Create critic should return (inputs, action, out)')\n\n def train(self, inputs, action, predicted_q_value):\n return self.sess.run([self.out, self.optimize], feed_dict={\n self.inputs: inputs,\n self.action: action,\n self.predicted_q_value: predicted_q_value\n })\n\n def predict(self, inputs, action):\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs,\n self.action: action\n })\n\n def predict_target(self, inputs, action):\n return self.sess.run(self.target_out, feed_dict={\n self.target_inputs: inputs,\n self.target_action: action\n })\n\n def action_gradients(self, inputs, actions):\n return self.sess.run(self.action_grads, feed_dict={\n self.inputs: inputs,\n self.action: actions\n })\n\n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n" ]
[ [ "tensorflow.multiply", "tensorflow.gradients", "tensorflow.placeholder", "tensorflow.train.AdamOptimizer", "tensorflow.trainable_variables" ] ]
Yash-567/ClassyVision
[ "356ddca8c12c871cc950f085ab514e95aa590e11" ]
[ "classy_vision/tasks/classification_task.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport enum\nimport json\nimport logging\nimport math\nimport multiprocessing as mp\nimport time\nfrom itertools import chain\nfrom typing import Any, Dict, List, NamedTuple, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom classy_vision.dataset import ClassyDataset, build_dataset\nfrom classy_vision.dataset.transforms.mixup import MixupTransform\nfrom classy_vision.generic.distributed_util import (\n all_reduce_mean,\n barrier,\n init_distributed_data_parallel_model,\n is_distributed_training_run,\n)\nfrom classy_vision.generic.util import (\n Timer,\n copy_model_to_gpu,\n load_and_broadcast_checkpoint,\n recursive_copy_to_gpu,\n split_batchnorm_params,\n update_classy_state,\n)\nfrom classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks\nfrom classy_vision.losses import ClassyLoss, build_loss\nfrom classy_vision.meters import ClassyMeter, build_meters\nfrom classy_vision.models import ClassyModel, build_model\nfrom classy_vision.optim import (\n ClassyOptimizer,\n build_optimizer,\n build_optimizer_schedulers,\n)\nfrom torch.distributed import broadcast\n\nfrom . import register_task\nfrom .classy_task import ClassyTask\n\n\ntry:\n import apex\n\n apex_available = True\nexcept ImportError:\n apex_available = False\n\n\nclass BroadcastBuffersMode(enum.Enum):\n DISABLED = enum.auto()\n # Enable DistributedDataParallel's broadcast_buffers option, synchronizing\n # model buffers every forward pass.\n FORWARD_PASS = enum.auto()\n # Similar to FORWARD_PASS, but only synchronizes model buffers once\n # per epoch, between train and test phases. If your motivation for\n # synchronizing buffers is for buffers to be consistent during eval, use\n # this instead of FORWARD_PASS to reduce training overhead.\n BEFORE_EVAL = enum.auto()\n\n\nclass BatchNormSyncMode(enum.Enum):\n DISABLED = enum.auto() # No Synchronized Batch Normalization\n PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm\n APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed\n\n\nclass LastBatchInfo(NamedTuple):\n loss: torch.Tensor\n output: torch.Tensor\n target: torch.Tensor\n sample: Dict[str, Any]\n step_data: Dict[str, Any]\n\n\n@register_task(\"classification_task\")\nclass ClassificationTask(ClassyTask):\n \"\"\"Basic classification training task.\n\n This task encapsultates all of the components and steps needed to\n train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.\n\n Assumes a train / test phase per each epoch and that the datasets\n have the same API as the map-style Dataset class in\n `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html\n #torch.utils.data.Dataset>`_ (in particular, this task makes use of\n the len). If you are using an `IterableDataset <https://pytorch.org/docs/\n stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task\n may be appropriate.\n\n\n :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used\n for computing the loss in each forward pass\n :var datasets: Mapping from a ``phase_type`` in [\"train\", \"test']\n to dataset used for training (or testing)\n :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)\n to calculate during training\n :var num_epochs: Number of epochs (passes over dataset) to train\n :var test_only: Used to only run the test phase\n :var base_model: Model to be trained, unwrapped in DDP or DP wrappers\n :var optimizer: Optimizer used in train step\n :var optimizer_schedulers: Dictionary. Key is the name of the optimizer\n option (e.g. lr), value is a ClassyParamScheduler\n :var checkpoint: Serializable dict which represents state in training\n :var phases: List of phase specific information, e.g. if phase is\n train / test.\n :var hooks: List of hooks to apply during training\n :var train: Phase type, if true it means we are training,\n false means testing\n :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)\n :var phase_idx: Current phase id, first phase is 0, if task has not started\n training then returns -1\n :var train_phase_idx: Only counts train phases\n :var num_updates: Number of total parameter updates applied to model\n by the optimizer\n :var data_iterator: Iterator which can be used to obtain batches\n :var losses: Loss curve\n :var perf_log: list of training speed measurements, to be logged\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a ClassificationTask\n \"\"\"\n super().__init__()\n\n self.base_loss = None\n self.datasets = {}\n self.meters = []\n self.num_epochs = 1\n self.test_phase_period = 1\n self.train_phases_per_epoch = 0\n self.test_only = False\n self.base_model = None\n self.optimizer = None\n self.optimizer_schedulers = {}\n self.checkpoint_dict = None\n self.checkpoint_path = None\n self.phases = []\n self.hooks = []\n self.train = True\n self.distributed_model = None\n self.distributed_loss = None\n self.phase_idx = -1\n self.train_phase_idx = -1\n self.num_updates = 0\n self.data_iterator = None\n self.losses = []\n self.broadcast_buffers_mode: BroadcastBuffersMode = (\n BroadcastBuffersMode.BEFORE_EVAL\n )\n self.amp_args = None\n self.mixup_transform = None\n self.perf_log = []\n self.last_batch = None\n self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED\n self.find_unused_parameters = True\n self.use_gpu = torch.cuda.is_available()\n self.dataloader_mp_context = \"spawn\"\n self.bn_weight_decay = False\n self._train_only = True\n\n def set_use_gpu(self, use_gpu: bool):\n self.use_gpu = use_gpu\n\n assert (\n not self.use_gpu or torch.cuda.is_available()\n ), \"CUDA required to train on GPUs\"\n\n return self\n\n def set_checkpoint(self, checkpoint_path: str):\n \"\"\"Sets checkpoint on task.\n\n Args:\n checkpoint_path: The path to load the checkpoint from. Can be a file or a\n directory. See :func:`load_checkpoint` for more information.\n \"\"\"\n self.checkpoint_path = checkpoint_path\n return self\n\n def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):\n \"\"\"Sets the checkpoint dict in the task. Only used for testing.\n\n Args:\n checkpoint_dict: A serializable dict representing current task state\n \"\"\"\n self.checkpoint_dict = checkpoint_dict\n return self\n\n def set_num_epochs(self, num_epochs: Union[int, float]):\n \"\"\"Set number of epochs to be run.\n\n Args:\n num_epochs: Number of epochs to run task\n \"\"\"\n self.num_epochs = num_epochs\n return self\n\n def set_test_phase_period(self, test_phase_period: int):\n \"\"\"Set the period of test phase.\n\n Args:\n test_phase_period: The period of test phase\n \"\"\"\n self.test_phase_period = test_phase_period\n return self\n\n def set_dataset(self, dataset: ClassyDataset, phase_type: str):\n \"\"\"Set dataset for phase type on task\n\n Args:\n dataset: ClassyDataset for returning samples.\n phase_type: str must be one of \"train\" or \"test\"\n \"\"\"\n assert phase_type in [\n \"train\",\n \"test\",\n ], \"phase_type must be in ['train', 'test']\"\n self.datasets[phase_type] = dataset\n if phase_type == \"train\":\n self.train_phases_per_epoch = getattr(dataset, \"phases_per_epoch\", 1)\n else:\n self._train_only = False\n return self\n\n def set_dataloader_mp_context(self, dataloader_mp_context: str):\n \"\"\"Set the multiprocessing context used by the dataloader.\n\n The context can be either 'spawn', 'fork' or 'forkserver'. See\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context\n for more details.\"\"\"\n\n self.dataloader_mp_context = dataloader_mp_context\n return self\n\n def set_optimizer(self, optimizer: ClassyOptimizer):\n \"\"\"Set optimizer for task\n\n Args:\n optimizer: optimizer for task\n \"\"\"\n self.optimizer = optimizer\n return self\n\n def set_loss(self, loss: ClassyLoss):\n \"\"\"Set loss function for task\n\n Args:\n loss: loss for task\n \"\"\"\n self.base_loss = loss\n return self\n\n def set_meters(self, meters: List[\"ClassyMeter\"]):\n \"\"\"Set meters for task\n\n Args:\n meters: list of meters to compute during training\n \"\"\"\n self.meters = meters\n return self\n\n def set_distributed_options(\n self,\n broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,\n batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,\n batch_norm_sync_group_size: int = 0,\n find_unused_parameters: bool = True,\n ):\n \"\"\"Set distributed options.\n\n Args:\n broadcast_buffers_mode: Broadcast buffers mode. See\n :class:`BroadcastBuffersMode` for options.\n batch_norm_sync_mode: Batch normalization synchronization mode. See\n :class:`BatchNormSyncMode` for options.\n batch_norm_sync_group_size: Group size to use for synchronized batch norm.\n 0 means that the stats are synchronized across all replicas. For\n efficient synchronization, set it to the number of GPUs in a node (\n usually 8).\n find_unused_parameters: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n\n Raises:\n RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex\n is not installed.\n \"\"\"\n self.broadcast_buffers_mode = broadcast_buffers_mode\n\n if batch_norm_sync_group_size > 0:\n if not batch_norm_sync_mode == BatchNormSyncMode.APEX:\n # this should ideally work with PyTorch Sync BN as well, but it\n # fails while initializing DDP for some reason.\n raise ValueError(\n \"batch_norm_sync_group_size can be > 0 only when \"\n \"Apex Synchronized Batch Normalization is being used.\"\n )\n self.batch_norm_sync_group_size = batch_norm_sync_group_size\n\n if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:\n logging.info(\"Synchronized Batch Normalization is disabled\")\n else:\n if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:\n raise RuntimeError(\"apex is not installed\")\n msg = f\"Using Synchronized Batch Normalization using {batch_norm_sync_mode}\"\n if self.batch_norm_sync_group_size > 0:\n msg += f\" and group size {batch_norm_sync_group_size}\"\n logging.info(msg)\n self.batch_norm_sync_mode = batch_norm_sync_mode\n\n self.find_unused_parameters = find_unused_parameters\n\n return self\n\n def set_hooks(self, hooks: List[\"ClassyHook\"]):\n \"\"\"Set hooks for task\n\n Args:\n hooks: List of hooks to apply during training\n \"\"\"\n from classy_vision.hooks import ClassyHook\n\n assert isinstance(hooks, list)\n assert all(isinstance(hook, ClassyHook) for hook in hooks)\n assert len({hook.name() for hook in hooks}) == len(\n hooks\n ), \"Cannot have repeated hooks of the same class\"\n # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks\n # may change the state of the model, and we want to save changed state in the checkpoint.\n # This is temporary fix.\n non_checkpoint_hooks = [\n hook for hook in hooks if not isinstance(hook, CheckpointHook)\n ]\n checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]\n hooks = non_checkpoint_hooks + checkpoint_hooks\n self.hooks = hooks\n return self\n\n def set_model(self, model: ClassyModel):\n \"\"\"Set model for task\n\n Args:\n model: Model to be trained\n \"\"\"\n self.base_model = model\n return self\n\n def set_test_only(self, test_only: bool):\n \"\"\"Set test only flag\n\n Args:\n test_only: If true, only test phases will be run\n \"\"\"\n self.test_only = test_only\n return self\n\n def set_bn_weight_decay(self, bn_weight_decay: bool):\n assert type(bn_weight_decay) == bool\n\n self.bn_weight_decay = bn_weight_decay\n return self\n\n def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):\n \"\"\"Disable / enable apex.amp and set the automatic mixed precision parameters.\n\n apex.amp can be utilized for mixed / half precision training.\n\n Args:\n amp_args: Dictionary containing arguments to be passed to\n amp.initialize. Set to None to disable amp. To enable mixed\n precision training, pass amp_args={\"opt_level\": \"O1\"} here.\n See https://nvidia.github.io/apex/amp.html for more info.\n\n Raises:\n RuntimeError: If opt_level is not None and apex is not installed.\n\n Warning: apex needs to be installed to utilize this feature.\n \"\"\"\n self.amp_args = amp_args\n\n if amp_args is None:\n logging.info(f\"AMP disabled\")\n else:\n if not apex_available:\n raise RuntimeError(\"apex is not installed, cannot enable amp\")\n\n logging.info(f\"AMP enabled with args {amp_args}\")\n return self\n\n def set_mixup_transform(self, mixup_transform: Optional[\"MixupTransform\"]):\n \"\"\"Disable / enable mixup transform for data augmentation\n\n Args::\n mixup_transform: a callable object which performs mixup data augmentation\n \"\"\"\n self.mixup_transform = mixup_transform\n if mixup_transform is None:\n logging.info(f\"mixup disabled\")\n else:\n logging.info(f\"mixup enabled\")\n return self\n\n def set_optimizer_schedulers(self, schedulers):\n self.optimizer_schedulers = schedulers\n return self\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"ClassificationTask\":\n \"\"\"Instantiates a ClassificationTask from a configuration.\n\n Args:\n config: A configuration for a ClassificationTask.\n See :func:`__init__` for parameters expected in the config.\n\n Returns:\n A ClassificationTask instance.\n \"\"\"\n test_only = config.get(\"test_only\", False)\n if not test_only:\n # TODO Make distinction between epochs and phases in optimizer clear\n train_phases_per_epoch = config[\"dataset\"][\"train\"].get(\n \"phases_per_epoch\", 1\n )\n\n optimizer_config = config[\"optimizer\"]\n optimizer_config[\"num_epochs\"] = (\n config[\"num_epochs\"] * train_phases_per_epoch\n )\n optimizer = build_optimizer(optimizer_config)\n param_schedulers = build_optimizer_schedulers(optimizer_config)\n\n datasets = {}\n phase_types = [\"train\", \"test\"]\n for phase_type in phase_types:\n if phase_type in config[\"dataset\"]:\n datasets[phase_type] = build_dataset(config[\"dataset\"][phase_type])\n loss = build_loss(config[\"loss\"])\n amp_args = config.get(\"amp_args\")\n meters = build_meters(config.get(\"meters\", {}))\n model = build_model(config[\"model\"])\n\n mixup_transform = None\n if config.get(\"mixup\") is not None:\n assert \"alpha\" in config[\"mixup\"], \"key alpha is missing in mixup dict\"\n mixup_transform = MixupTransform(\n config[\"mixup\"][\"alpha\"], config[\"mixup\"].get(\"num_classes\")\n )\n\n # hooks config is optional\n hooks_config = config.get(\"hooks\")\n hooks = []\n if hooks_config is not None:\n hooks = build_hooks(hooks_config)\n\n distributed_config = config.get(\"distributed\", {})\n distributed_options = {\n \"broadcast_buffers_mode\": BroadcastBuffersMode[\n distributed_config.get(\"broadcast_buffers\", \"before_eval\").upper()\n ],\n \"batch_norm_sync_mode\": BatchNormSyncMode[\n distributed_config.get(\"batch_norm_sync_mode\", \"disabled\").upper()\n ],\n \"batch_norm_sync_group_size\": distributed_config.get(\n \"batch_norm_sync_group_size\", 0\n ),\n \"find_unused_parameters\": distributed_config.get(\n \"find_unused_parameters\", True\n ),\n }\n\n task = (\n cls()\n .set_num_epochs(config[\"num_epochs\"])\n .set_test_phase_period(config.get(\"test_phase_period\", 1))\n .set_loss(loss)\n .set_test_only(test_only)\n .set_model(model)\n .set_meters(meters)\n .set_amp_args(amp_args)\n .set_mixup_transform(mixup_transform)\n .set_distributed_options(**distributed_options)\n .set_hooks(hooks)\n .set_bn_weight_decay(config.get(\"bn_weight_decay\", False))\n )\n\n if not test_only:\n task.set_optimizer(optimizer)\n task.set_optimizer_schedulers(param_schedulers)\n\n use_gpu = config.get(\"use_gpu\")\n if use_gpu is not None:\n task.set_use_gpu(use_gpu)\n\n for phase_type in datasets:\n task.set_dataset(datasets[phase_type], phase_type)\n\n # NOTE: this is a private member and only meant to be used for\n # logging/debugging purposes. See __repr__ implementation\n task._config = config\n\n return task\n\n @property\n def num_batches_per_phase(self):\n \"\"\"Returns number of batches in current phase iterator\n \"\"\"\n return len(self.data_iterator)\n\n @property\n def model(self):\n \"\"\"Returns model used in training (can be wrapped with DDP)\n \"\"\"\n return (\n self.distributed_model if is_distributed_training_run() else self.base_model\n )\n\n @property\n def loss(self):\n \"\"\"Returns loss used in training (can be wrapped with DDP)\n \"\"\"\n return self.distributed_loss if self.distributed_loss else self.base_loss\n\n @property\n def phase_type(self):\n \"\"\"Returns current phase type. String with value \"train\" or \"test\"\n \"\"\"\n return \"train\" if self.train else \"test\"\n\n @property\n def eval_phase_idx(self):\n \"\"\"Returns current evaluation phase\n \"\"\"\n return self.phase_idx - self.train_phase_idx - 1\n\n def get_data_iterator(self):\n \"\"\"Returns data iterator for current phase\n \"\"\"\n return self.data_iterator\n\n def get_total_training_phases(self):\n \"\"\"\n Returns the total number of \"train\" phases in the task\n \"\"\"\n num_training_phases = 0\n for phase in self.phases:\n if phase[\"train\"] is True:\n num_training_phases += 1\n return num_training_phases\n\n def get_total_test_phases(self):\n \"\"\"\n Returns the total number of \"test\" phases in the task\n \"\"\"\n num_test_phases = 0\n for phase in self.phases:\n if phase[\"train\"] is False:\n num_test_phases += 1\n return num_test_phases\n\n def _build_phases(self):\n \"\"\"Returns list of phases from config.\n\n These phases will look like:\n {\n train: is this a train or test phase?\n optimizer: optimizer settings\n }\n\n - If this is a test only run, then only test phases will be\n generated\n - If this is a training run with both train and test datasets, then x phases =\n x train phases + x test phases, interleaved. If test_phase_period > 1, test\n phases are only added after test_phase_period train phases. The last phase is\n always a test phase.\n - If this is a training run with only a train dataset, then x phases = x train\n phases.\n \"\"\"\n if not self.test_only:\n phases = [\n {\"train\": True}\n for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))\n ]\n\n if self._train_only:\n return phases\n\n final_phases = []\n for i, phase in enumerate(phases):\n final_phases.append(phase)\n if (i + 1) % self.test_phase_period == 0:\n final_phases.append({\"train\": False})\n if final_phases[-1][\"train\"]:\n final_phases.append({\"train\": False})\n return final_phases\n\n return [{\"train\": False} for _ in range(self.num_epochs)]\n\n def build_dataloader(self, phase_type, pin_memory, **kwargs):\n \"\"\"Builds a dataloader iterable for a particular phase type.\n\n Args:\n phase_type: \"train\" or \"test\" iterable\n pin_memory: if true pin memory on GPU. See PyTorch dataloader\n documentation for details on ``pin_memory``.\n Returns:\n Returns a iterable over the dataset\n \"\"\"\n return self.datasets[phase_type].iterator(\n pin_memory=pin_memory, phase_type=phase_type, **kwargs\n )\n\n def build_dataloaders(self, pin_memory, **kwargs):\n \"\"\"Build a dataloader for each phase type\n\n Args:\n pin_memory: if true pin memory on GPU. See PyTorch dataloader\n documentation for details on pin_memory.\n Returns:\n Returns an iterable over the dataset associated with each phase_type\n \"\"\"\n return {\n phase_type: self.build_dataloader(\n phase_type, pin_memory=pin_memory, **kwargs\n )\n for phase_type in self.datasets.keys()\n }\n\n def prepare_optimizer(self, optimizer, model, loss=None):\n bn_params, other_params = split_batchnorm_params(model)\n if loss is not None:\n bn_params_loss, params_loss = split_batchnorm_params(loss)\n bn_params = bn_params + bn_params_loss\n other_params = other_params + params_loss\n\n bn_schedulers = self.optimizer_schedulers.copy()\n if not self.bn_weight_decay:\n bn_schedulers[\"weight_decay\"] = 0\n\n param_groups = [{\"params\": other_params, **self.optimizer_schedulers}]\n if len(bn_params) > 0:\n param_groups.append({\"params\": bn_params, **bn_schedulers})\n self.optimizer.set_param_groups(param_groups)\n\n def prepare(self):\n \"\"\"Prepares task for training, populates all derived attributes \"\"\"\n\n pin_memory = self.use_gpu and torch.cuda.device_count() > 1\n\n self.phases = self._build_phases()\n self.train = False if self.test_only else self.train\n self.dataloaders = self.build_dataloaders(\n current_phase_id=0,\n pin_memory=pin_memory,\n multiprocessing_context=mp.get_context(self.dataloader_mp_context),\n )\n\n if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:\n self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)\n elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:\n sync_bn_process_group = apex.parallel.create_syncbn_process_group(\n self.batch_norm_sync_group_size\n )\n self.base_model = apex.parallel.convert_syncbn_model(\n self.base_model, process_group=sync_bn_process_group\n )\n\n # move the model and loss to the right device\n if self.use_gpu:\n self.base_model, self.base_loss = copy_model_to_gpu(\n self.base_model, self.base_loss\n )\n else:\n self.base_loss.cpu()\n self.base_model.cpu()\n\n if self.optimizer is not None:\n self.prepare_optimizer(\n optimizer=self.optimizer, model=self.base_model, loss=self.base_loss\n )\n\n if self.amp_args is not None:\n # Initialize apex.amp. This updates the model and the PyTorch optimizer (\n # if training, which is wrapped by the ClassyOptimizer in self.optimizer).\n # Please note this must happen before loading the checkpoint, cause\n # there's amp state to be restored.\n\n if self.optimizer is None:\n self.base_model = apex.amp.initialize(\n self.base_model, optimizers=None, **self.amp_args\n )\n else:\n self.base_model, self.optimizer.optimizer = apex.amp.initialize(\n self.base_model, self.optimizer.optimizer, **self.amp_args\n )\n\n if self.checkpoint_path:\n self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)\n\n classy_state_dict = (\n None\n if self.checkpoint_dict is None\n else self.checkpoint_dict[\"classy_state_dict\"]\n )\n\n if classy_state_dict is not None:\n state_load_success = update_classy_state(self, classy_state_dict)\n assert (\n state_load_success\n ), \"Update classy state from checkpoint was unsuccessful.\"\n\n self.init_distributed_data_parallel_model()\n\n def init_distributed_data_parallel_model(self):\n \"\"\"\n Initialize\n `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/\n docs/stable/nn.html#distributeddataparallel>`_.\n\n Needed for distributed training. This is where a model should be wrapped by DDP.\n \"\"\"\n if not is_distributed_training_run():\n return\n assert (\n self.distributed_model is None\n ), \"init_ddp_non_elastic must only be called once\"\n\n broadcast_buffers = (\n self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS\n )\n self.distributed_model = init_distributed_data_parallel_model(\n self.base_model,\n broadcast_buffers=broadcast_buffers,\n find_unused_parameters=self.find_unused_parameters,\n )\n if (\n isinstance(self.base_loss, ClassyLoss)\n and self.base_loss.has_learned_parameters()\n ):\n logging.info(\"Initializing distributed loss\")\n self.distributed_loss = init_distributed_data_parallel_model(\n self.base_loss,\n broadcast_buffers=broadcast_buffers,\n find_unused_parameters=self.find_unused_parameters,\n )\n\n @property\n def where(self):\n \"\"\"Returns the proportion of training that has completed. If in test\n only mode, returns proportion of testing completed\n\n Returned value is a float in the range [0, 1)\n \"\"\"\n current_step = self.num_updates / self.get_global_batchsize()\n num_phases = (\n self.get_total_test_phases()\n if self.test_only\n else self.get_total_training_phases()\n )\n\n if self.num_batches_per_phase <= 0:\n raise RuntimeError(\"No batches to read. Is the dataset empty?\")\n\n num_steps = num_phases * self.num_batches_per_phase\n where = current_step / num_steps\n\n return where\n\n def get_classy_state(self, deep_copy: bool = False):\n \"\"\"Returns serialiable state of task\n\n Args:\n deep_copy: If true, does a deep copy of state before returning.\n \"\"\"\n optimizer_state = {}\n if self.optimizer is not None:\n optimizer_state = self.optimizer.get_classy_state()\n\n classy_state_dict = {\n \"train\": self.train,\n \"base_model\": self.base_model.get_classy_state(),\n \"meters\": [meter.get_classy_state() for meter in self.meters],\n \"optimizer\": optimizer_state,\n \"phase_idx\": self.phase_idx,\n \"train_phase_idx\": self.train_phase_idx,\n \"num_updates\": self.num_updates,\n \"losses\": self.losses,\n \"hooks\": {hook.name(): hook.get_classy_state() for hook in self.hooks},\n \"loss\": {},\n }\n if \"train\" in self.datasets and self._is_checkpointable_dataset(\n self.datasets[\"train\"]\n ):\n classy_state_dict[\"train_dataset_iterator\"] = self.datasets[\n \"train\"\n ].get_classy_state()\n\n if isinstance(self.base_loss, ClassyLoss):\n classy_state_dict[\"loss\"] = self.base_loss.get_classy_state()\n if self.amp_args is not None:\n classy_state_dict[\"amp\"] = apex.amp.state_dict()\n if deep_copy:\n classy_state_dict = copy.deepcopy(classy_state_dict)\n return classy_state_dict\n\n def set_classy_state(self, state):\n \"\"\"Set task state\n\n Args:\n state: Dict containing state of a task\n \"\"\"\n # some settings are different in test only\n self.train = False if self.test_only else state[\"train\"]\n if not self.test_only:\n self.phase_idx = state[\"phase_idx\"]\n self.num_updates = state[\"num_updates\"]\n self.train_phase_idx = state[\"train_phase_idx\"]\n self.losses = state[\"losses\"]\n for meter, meter_state in zip(self.meters, state[\"meters\"]):\n meter.set_classy_state(meter_state)\n\n self.base_model.set_classy_state(state[\"base_model\"])\n if self.optimizer is not None:\n self.optimizer.set_classy_state(state[\"optimizer\"])\n if state.get(\"loss\") and isinstance(self.base_loss, ClassyLoss):\n self.base_loss.set_classy_state(state[\"loss\"])\n\n if \"amp\" in state:\n apex.amp.load_state_dict(state[\"amp\"])\n\n for hook in self.hooks:\n # we still want to be able to run when new hooks are added or old\n # hooks are removed\n if hook.name() in state[\"hooks\"]:\n hook.set_classy_state(state[\"hooks\"][hook.name()])\n else:\n logging.warn(f\"No state found for hook: {hook.name()}\")\n\n if \"train\" in self.datasets and self._is_checkpointable_dataset(\n self.datasets[\"train\"]\n ):\n self.datasets[\"train\"].set_classy_state(state.get(\"train_dataset_iterator\"))\n\n # TODO (mannatsingh): Figure out how to set the state of the dataloaders\n # Re-build dataloader & re-create iterator.\n self._recreate_data_loader_from_dataset()\n self.create_data_iterator()\n # Set up pytorch module in train vs eval mode, update optimizer.\n self._set_model_train_mode()\n\n @staticmethod\n def _is_checkpointable_dataset(dataset):\n return hasattr(dataset, \"get_classy_state\") and hasattr(\n dataset, \"set_classy_state\"\n )\n\n def eval_step(self):\n self.last_batch = None\n\n # Process next sample\n with Timer() as timer:\n sample = next(self.get_data_iterator())\n\n assert isinstance(sample, dict) and \"input\" in sample and \"target\" in sample, (\n f\"Returned sample [{sample}] is not a map with 'input' and\"\n + \"'target' keys\"\n )\n\n target = sample[\"target\"]\n if self.use_gpu:\n sample = recursive_copy_to_gpu(sample, non_blocking=True)\n\n with torch.no_grad():\n output = self.model(sample[\"input\"])\n\n local_loss = self.compute_loss(output, sample)\n\n loss = local_loss.detach().clone()\n\n self.check_inf_nan(loss)\n\n self.losses.append(loss.data.cpu().item() * target.size(0))\n\n self.update_meters(output, sample)\n\n # Move some data to the task so hooks get a chance to access it\n self.last_batch = LastBatchInfo(\n loss=loss,\n output=output,\n target=target,\n sample=sample,\n step_data={\"sample_fetch_time\": timer.elapsed_time},\n )\n\n def check_inf_nan(self, loss):\n if loss == float(\"inf\") or loss == float(\"-inf\") or loss != loss:\n raise FloatingPointError(f\"Loss is infinity or NaN: {loss}\")\n\n def train_step(self):\n \"\"\"Train step to be executed in train loop.\"\"\"\n\n self.last_batch = None\n\n # Process next sample\n with Timer() as timer:\n sample = next(self.get_data_iterator())\n\n assert isinstance(sample, dict) and \"input\" in sample and \"target\" in sample, (\n f\"Returned sample [{sample}] is not a map with 'input' and\"\n + \"'target' keys\"\n )\n\n # Copy sample to GPU\n target = sample[\"target\"]\n if self.use_gpu:\n sample = recursive_copy_to_gpu(sample, non_blocking=True)\n\n if self.mixup_transform is not None:\n sample = self.mixup_transform(sample)\n\n with torch.enable_grad():\n # Forward pass\n output = self.model(sample[\"input\"])\n\n local_loss = self.compute_loss(output, sample)\n\n loss = local_loss.detach().clone()\n\n self.losses.append(loss.data.cpu().item() * target.size(0))\n\n self.update_meters(output, sample)\n\n # Run backwards pass / update optimizer\n if self.amp_args is not None:\n self.optimizer.zero_grad()\n with apex.amp.scale_loss(\n local_loss, self.optimizer.optimizer\n ) as scaled_loss:\n scaled_loss.backward()\n else:\n self.optimizer.backward(local_loss)\n\n self.check_inf_nan(loss)\n\n self.optimizer.step(where=self.where)\n\n self.num_updates += self.get_global_batchsize()\n\n # Move some data to the task so hooks get a chance to access it\n self.last_batch = LastBatchInfo(\n loss=loss,\n output=output,\n target=target,\n sample=sample,\n step_data={\"sample_fetch_time\": timer.elapsed_time},\n )\n\n def compute_loss(self, model_output, sample):\n return self.loss(model_output, sample[\"target\"])\n\n def update_meters(self, model_output, sample):\n target = sample[\"target\"].detach().cpu()\n model_output = model_output.detach().cpu()\n\n # Update meters\n for meter in self.meters:\n meter.update(model_output, target, is_train=self.train)\n\n def synchronize_losses(self):\n \"\"\"Average the losses across the different replicas\"\"\"\n\n # Average losses across nodes\n losses_tensor = torch.tensor(self.losses)\n synchronized_losses_tensor = all_reduce_mean(losses_tensor)\n self.losses = synchronized_losses_tensor.tolist()\n\n def advance_phase(self):\n \"\"\"Performs bookkeeping / task updates between phases\n\n Increments phase idx, resets meters, resets loss history,\n resets counters, shuffles dataset, rebuilds iterators, and\n sets the train / test state for phase.\n \"\"\"\n logging.debug(\"Advancing phase\")\n # Reset meters for next phase / epoch\n for meter in self.meters:\n meter.reset()\n\n # Reset loss history for next epoch\n self.losses = []\n\n # Setup new phase\n self.phase_idx += 1\n phase = self.phases[self.phase_idx]\n self.train = True if phase[\"train\"] else False\n if self.train:\n self.train_phase_idx += 1\n\n # Re-build dataloader & re-create iterator anytime membership changes.\n self._recreate_data_loader_from_dataset()\n self.create_data_iterator()\n # Set up pytorch module in train vs eval mode, update optimizer.\n self._set_model_train_mode()\n\n # Update the optimizer schedule\n if self.train and self.train_phase_idx >= 0:\n self.optimizer.on_epoch(where=self.where)\n\n def done_training(self):\n \"\"\"Stop condition for training\n \"\"\"\n return self.phase_idx + 1 >= len(self.phases)\n\n def _recreate_data_loader_from_dataset(self, phase_type=None):\n \"\"\"\n This utility is invoked to re-create the data loader object\n for the current phase of execution, using the existing dataset.\n This is sufficient when advancing phases.\n \"\"\"\n if phase_type is None:\n phase_type = self.phase_type\n\n logging.debug(\"Recreating data loader for new phase\")\n num_workers = 0\n if hasattr(self.dataloaders[phase_type], \"num_workers\"):\n num_workers = self.dataloaders[phase_type].num_workers\n pin_memory = False\n if hasattr(self.dataloaders[phase_type], \"pin_memory\"):\n pin_memory = self.dataloaders[phase_type].pin_memory\n multiprocessing_context = None\n if hasattr(self.dataloaders[phase_type], \"multiprocessing_context\"):\n multiprocessing_context = self.dataloaders[\n phase_type\n ].multiprocessing_context\n if phase_type == \"test\":\n current_phase_id = 0\n else:\n current_phase_id = max(self.train_phase_idx, 0)\n\n self.dataloaders[phase_type] = self.build_dataloader(\n phase_type=phase_type,\n num_workers=num_workers,\n pin_memory=pin_memory,\n multiprocessing_context=multiprocessing_context,\n current_phase_id=current_phase_id,\n )\n\n def create_data_iterator(self):\n \"\"\"Creates data iterator for phase.\n \"\"\"\n # Delete iterator explicitly so that all dataloader processes\n # are cleaned up.\n del self.data_iterator\n self.data_iterator = iter(self.dataloaders[self.phase_type])\n\n def _set_model_train_mode(self):\n \"\"\"Set train mode for model\n \"\"\"\n phase = self.phases[self.phase_idx]\n self.base_model.train(phase[\"train\"])\n self.base_loss.train(phase[\"train\"])\n\n if (\n self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL\n and not self.train\n ):\n self._broadcast_buffers()\n\n def _broadcast_buffers(self):\n \"\"\"Explicitly synchronize buffers across all devices.\"\"\"\n if self.distributed_model is None:\n return\n buffers = list(self.base_model.buffers())\n if len(buffers) > 0:\n logging.info(\"Synchronizing buffers before evaluation.\")\n for buffer in buffers:\n broadcast(buffer, 0, group=self.distributed_model.process_group)\n\n # TODO: Functions below should be better abstracted into the dataloader\n # abstraction\n def get_batchsize_per_replica(self):\n \"\"\"Return local replica's batchsize for dataset (e.g. batchsize per GPU)\n \"\"\"\n # TODO(T47573564) - cleaner abstraction\n return self.dataloaders[self.phase_type].dataset.get_batchsize_per_replica()\n\n def get_global_batchsize(self):\n \"\"\"Return global batchsize across all trainers\n \"\"\"\n return self.dataloaders[self.phase_type].dataset.get_global_batchsize()\n\n def on_start(self):\n for hook in self.hooks:\n hook.on_start(self)\n\n def on_phase_start(self):\n self.phase_start_time_total = time.perf_counter()\n\n self.advance_phase()\n\n for hook in self.hooks:\n hook.on_phase_start(self)\n\n self.phase_start_time_train = time.perf_counter()\n\n def on_phase_end(self):\n self.log_phase_end(\"train\")\n\n logging.debug(\"Syncing losses on phase end...\")\n self.synchronize_losses()\n logging.debug(\"...losses synced\")\n\n logging.debug(\"Syncing meters on phase end...\")\n for meter in self.meters:\n meter.sync_state()\n logging.debug(\"...meters synced\")\n barrier()\n\n for hook in self.hooks:\n hook.on_phase_end(self)\n self.perf_log = []\n\n self.log_phase_end(\"total\")\n\n def on_end(self):\n for hook in self.hooks:\n hook.on_end(self)\n\n def log_phase_end(self, tag):\n if not self.train:\n return\n\n start_time = (\n self.phase_start_time_train\n if tag == \"train\"\n else self.phase_start_time_total\n )\n phase_duration = time.perf_counter() - start_time\n im_per_sec = (\n self.get_global_batchsize() * self.num_batches_per_phase\n ) / phase_duration\n self.perf_log.append(\n {\n \"tag\": tag,\n \"phase_idx\": self.train_phase_idx,\n \"epoch_duration\": phase_duration,\n \"im_per_sec\": im_per_sec,\n }\n )\n\n def __repr__(self):\n if hasattr(self, \"_config\"):\n config = json.dumps(self._config, indent=4)\n return f\"{super().__repr__()} initialized with config:\\n{config}\"\n\n return super().__repr__()\n" ]
[ [ "torch.distributed.broadcast", "torch.enable_grad", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.tensor", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.device_count" ] ]
hencockable/deep-head-pose
[ "f6aa02edb7f7b243aa2df4f9cc233310ed73d7c0" ]
[ "code/my_scripts/make_train_val_test_sets.py" ]
[ "import pandas as pd\nimport os\n\n# all vps\n# 'VP3', 'VP8', 'VP7', 'VP14', 'VP2', 'VP13', 'VP16', 'VP12', 'VP10', 'VP19', 'VP18', 'VP5', 'VP11'\n\ndata_path = \"../../source/train_val_test_vps/data/\"\nl4_path = \"../../source/train_val_test_vps/l4/\"\nl4_no_pca_path = \"../../source/train_val_test_vps/l4_no_pca/\"\nout_path = \"../../source/train_val_test_sets/\"\n\nfiles = set(os.listdir(data_path))\nvps = set([file.split(\"_\")[4] for file in files])\n\nfor vp in vps:\n meta_data_df = pd.DataFrame([])\n label_df = pd.DataFrame([])\n hp_df = pd.DataFrame([])\n l4_df = pd.DataFrame([])\n l4_no_pca_df = pd.DataFrame([])\n\n for file in files:\n if vp in file:\n data_df = pd.read_csv(data_path + file)\n hp_df = hp_df.append(data_df[[\"yaw\", \"pitch\", \"roll\"]])\n label_df = label_df.append(data_df[[\"label\"]])\n data_df.drop(columns=[\"yaw\", \"pitch\", \"roll\", \"label\"], inplace=True)\n meta_data_df = meta_data_df.append(data_df)\n l4_df = l4_df.append(pd.read_csv(l4_path + file[:-8] + \"l4.csv\"))\n l4_no_pca_df = l4_no_pca_df.append(pd.read_csv(l4_no_pca_path + file[:-8] + \"l4_no_pca.csv\"))\n\n meta_data_df.to_csv(\"{}meta_data/{}.csv\".format(out_path, vp[2:]), index=False)\n label_df.to_csv(\"{}labels/{}.csv\".format(out_path, vp[2:]), index=False)\n hp_df.to_csv(\"{}hp/{}.csv\".format(out_path, vp[2:]), index=False)\n l4_df.to_csv(\"{}l4/{}.csv\".format(out_path, vp[2:]), index=False)\n l4_no_pca_df.to_csv(\"{}l4_no_pca/{}.csv\".format(out_path, vp[2:]), index=False)\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
MZSHAN/SparseReconstruction
[ "c6f553acbd6cf5ad72038fae5d5454e42e11cdcb" ]
[ "EpipolarHelpers.py" ]
[ "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\n\n\n# Function has been tested\ndef triangulate(projection_matrix1, pts1, projection_matrix2, pts2):\n \"\"\"\n Function takes a the projection matrices of two cameras and the corresponding points in the pixel space of the\n 2 camera. It returns the 3D location of the corresponding points.\n Reprojection of Point P in the pixel space using the projection matrix. This should be equal to the actual pixel\n location in Image. Solve:\n - Linear constraints from p X MP = 0 ; p is pixel coordinate and P is 3d point\n - Minimizing non linear reprojection error\n\n Args:\n projection_matrix1: (3 X 4) projection matrix of camera 1\n pts1: (N x 2) matrix with 2D points in camera 1\n projection_matrix2: (3 X 4) projection matrix of camera 2\n pts2: (N x 2) matrix with corresponding 2D points in camera 2\n Returns:\n (N x 3) world coordinates corresponding to pts1 and pts2\n \"\"\"\n # Get coordinates as column vectors - eases matrix operations\n x1, y1 = np.expand_dims(pts1[:, 0], axis = 1), np.expand_dims(pts1[:, 1], axis = 1)\n x2, y2 = np.expand_dims(pts2[:, 0], axis = 1), np.expand_dims(pts2[:, 1], axis = 1)\n\n\n # Calculate contraints for all points\n constraint1 = x1 * projection_matrix1[2] - projection_matrix1[0]\n constraint2 = y1 * projection_matrix1[2] - projection_matrix1[1]\n constraint3 = x2 * projection_matrix2[2] - projection_matrix2[0]\n constraint4 = y2 * projection_matrix2[2] - projection_matrix2[1]\n\n N = len(pts2)\n world_coords = np.zeros((N, 3))\n\n # triangulate\n for i in range(N):\n A = np.vstack((constraint1[i], constraint2[i], constraint3[i], constraint4[i]))\n U, S, Vh = np.linalg.svd(A)\n assert(Vh[-1].shape == (4, )) # TODO: Remove later\n P = Vh[-1]\n world_coords[i] = P[:3] / P[-1] # Convert from homogenous to world - 4D to 3D coordinates\n\n world_coords_homo = np.concatenate((world_coords, np.ones((N, 1))), axis=1) # Convert to homog for projection\n\n # compute reprojection error\n projected_pts1 = np.dot(projection_matrix1, world_coords_homo.T)\n projected_pts1 = np.transpose(projected_pts1[:2] / projected_pts1[2])\n projected_pts2 = np.dot(projection_matrix2, world_coords_homo.T)\n projected_pts2 = np.transpose(projected_pts2[:2] / projected_pts2[2])\n\n assert(pts1.shape == projected_pts1.shape)\n assert(pts2.shape == projected_pts2.shape)\n\n reprojection_error = np.sum((projected_pts1 - pts1)**2 + (projected_pts2 - pts2)**2)\n avg_reprojection_error = math.sqrt(reprojection_error) / len(pts1)\n print (f\"The total reprojection error is {avg_reprojection_error}\\n\")\n\n return world_coords\n\n# Function hs been tested\ndef camera2_extrisics_from_essential_matrix(essential_matrix):\n \"\"\"\n Function take essential matrix as input and returns the 4 possible rotations and translations between the stereo\n cameras\n\n First the sigular value contraint of Essential Matrix is enforced\n Essential Matrix = [Tx]R. [Tx] is a skew symmetric matrix. 3D skew symmetric matrices have 2 equal singular values\n Since R is just a rotation, essential matrix is has 2 singular values which are equal\n\n E = [Tx]R = U(Sigma)V, then with W as skew symmetric matrix as shown below can be used to get t and R as shown below\n so that they satisfy [Tx]R = E\n But t and -t, R and R.T so obtained satisfy the properties. All 4 combinations are returned from this function, but\n only one is correct. Correct can be found by a pair of corresponding points downstream.\n\n Refer: https://en.wikipedia.org/wiki/Essential_matrix#Extracting_rotation_and_translation\n and Hartley Zisserman 9.6.2\n\n Args:\n essential_matrix: 3 X 3 essential matrix for camera pair\n Returs:\n (3 X 4 X 4) matrix with possible Extrinsics - rotation and translation between 2 cameras\n \"\"\"\n # correct for equal singular values\n U, S, Vh = np.linalg.svd(essential_matrix)\n mean_s = S[0:2].mean()\n S = np.eye(len(S)) * mean_s; S[-1, -1] = 0\n essential_matrix = np.dot(U, np.dot(S, Vh))\n U, S, Vh = np.linalg.svd(essential_matrix) # Recalculate SVD\n\n\n W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n # This is one of the rotation matrices(W/W.T) - ensures that we have a projection and space is not flipped\n if np.linalg.det(np.dot(U, np.dot(W, Vh))) < 0:\n W = -W\n\n # All possible Extrinsic matrices are [UWV.T | u3], [UWV.T | -u3], [U(W.T)V.T | u3], [U(W.T)V.T | -u3]\n T = U[:, 2].reshape(-1, 1)/abs(U[:, 2]).max()\n R1 = np.dot(U, np.dot(W, Vh))\n R2 = np.dot(U, np.dot(W.T, Vh))\n\n Extrinsics = np.zeros((4, 3, 4))\n Extrinsics[0] = np.concatenate((R1, T), axis = 1)\n Extrinsics[1] = np.concatenate((R1, -T), axis = 1)\n Extrinsics[2] = np.concatenate((R2, T), axis = 1)\n Extrinsics[3] = np.concatenate((R2, -T), axis = 1)\n\n return Extrinsics\n\n\n# Function hs been tested\ndef get_projection_matrices(essential_matrix, cam_intrinsic1, cam_intrinsic2, pt1, pt2):\n \"\"\"\n Function returns the projection matrices of the two cameras in a stereo system\n Function first calculates 4 possible Rotations and Translations matrices from the essential matrix\n Out of the 4 possiblities, one is selected based on min projection error\n Both Camera intrincs along with rotation and translation give the Projection matrices for both cameras\n\n Args:\n essential_matrix: (3X3) essential matrix\n cam_intrinsic1: (3 x 3) matrix, intrinsic parameters for camera 1\n cam_intrinsic2: (3 x 3) matrix, intrinsic parameters for camera 2\n pt1: (2, ) numpy array, a 2D piont in image1\n pt2: (2, ) numpy array, a 2D piont in image2 corresponding to pt1\n\n returns:\n projection_mat1\n \"\"\"\n poss_extrinsics = camera2_extrisics_from_essential_matrix(essential_matrix)\n\n # Make homogenous 2D i.e. 3D points\n pt1, pt2 = np.append(pt1, [1]), np.append(pt1, [1])\n pt1, pt2 = np.expand_dims(pt1, axis=1), np.expand_dims(pt2, axis=1)\n assert(pt1.shape == (3, 1))\n pt1, pt2 = np.linalg.inv(cam_intrinsic1).dot(pt1), np.linalg.inv(cam_intrinsic2).dot(pt2) # From pixel to image space\n pt1 = np.vstack((pt1, [[1]])) # Make 3D homogenous i.e. 4D points\n assert(pt1.shape == (4, 1))\n\n min_err, extrinsic2 = float('inf'), None\n for ext in poss_extrinsics:\n pred_pt2 = np.dot(ext, pt1)\n assert(pred_pt2.shape == pt2.shape)\n err = np.sum((pred_pt2 - pt2)**2)\n if err < min_err:\n min_err = err\n extrinsic2 = ext\n\n extrinsic1 = np.concatenate((np.eye(3), np.zeros((3, 1))), axis = 1)\n projection_mat1 = np.dot(cam_intrinsic1, extrinsic1)\n projection_mat2 = np.dot(cam_intrinsic2, extrinsic2)\n\n assert(projection_mat1.shape == (3, 4))\n assert(projection_mat2.shape == (3, 4))\n\n return projection_mat1, projection_mat2\n\n\n# test using some corresp noisy.npz\ndef ransac_fundamental_matrix(pts1, pts2, normalization_factor):\n \"\"\"\n Function calculates the best fundamental matrix through Ransac\n\n Ransac requires the function to be calculated using min num of points. Hence seven point algorithm is used.\n\n Args:\n pts1: (N x 2) Numpy array of 2D points from image 1\n pts2: (N x 2) Numpy array of corrsponding feature points from image2\n normalization_factor: maximum of width and height of the images\n Returns:\n ( 3 X 3) best estimate of the fundamental matrix\n indices of in inlier points from pts1, pts2\n \"\"\"\n assert(pts1.shape == pts2.shape)\n N = len(pts1)\n threshold = 1\n\n # Cconvert points to homogenous coordinates\n homo_pts1 = np.hstack((pts1, np.ones((N, 1))))\n homo_pts2 = np.hstack((pts2, np.ones((N, 1))))\n fundamental_matrix, inliers, max_inliers = None, None, -1\n\n # Run ransac for 1000 iterations\n for i in range(1000):\n # calculate sample fundamental matrix using 7 points\n ind = np.random.randint(0, N, 7)\n sample_fundamental_matrices = sevenpoint(pts1[ind], pts2[ind], normalization_factor)\n\n for sample_fund_matrix in sample_fundamental_matrices:\n # Use sample fundamental matrix to get epipolar lines for all points\n epipolar_lines = np.dot(sample_fund_matrix, homo_pts1.T)\n assert(epipolar_lines.shape == (3, N))\n epipolar_lines = epipolar_lines / (epipolar_lines[0][:]**2 + epipolar_lines[1][:]**2)\n epipolar_lines = epipolar_lines.T\n\n # Calculate distance of points from epipolar lines\n distance = abs(np.sum(homo_pts2 * epipolar_lines, axis=1))\n curr_inliers = np.arange(N).reshape(N, 1)[distance < threshold]\n\n if len(curr_inliers) > max_inliers:\n inliers = curr_inliers\n max_inliers = len(curr_inliers)\n fundamental_matrix = sample_fund_matrix\n\n print (\"For ransac fundamental matrix: Average #inlier is :\", max_inliers / N)\n\n return fundamental_matrix, inliers\n\n#This function has been taken from the internet.\n#Returns list of estimated fundamental matrices\ndef sevenpoint(pts1, pts2, M):\n # normalize the coordinates\n x1, y1 = pts1[:, 0], pts1[:, 1]\n x2, y2 = pts2[:, 0], pts2[:, 1]\n x1, y1, x2, y2 = x1 / M, y1 / M, x2 / M, y2 / M\n # normalization matrix\n T = np.array([[1. / M, 0, 0], [0, 1. / M, 0], [0, 0, 1]])\n\n A = np.transpose(np.vstack((x2 * x1, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, np.ones(x1.shape))))\n\n # get F by SVD decomposition\n u, s, vh = np.linalg.svd(A)\n f1 = vh[-1, :]\n f2 = vh[-2, :]\n F1 = np.reshape(f1, (3, 3))\n F2 = np.reshape(f2, (3, 3))\n\n fun = lambda alpha: np.linalg.det(alpha * F1 + (1 - alpha) * F2)\n # get the coefficients of the polynomial\n a0 = fun(0)\n a1 = 2*(fun(1)-fun(-1))/3 - (fun(2)-fun(-2))/12\n a2 = (fun(1)+fun(-1))/2 - a0\n a3 = (fun(1)-fun(-1))/2 - a1\n # solve for alpha\n alpha = np.roots([a3, a2, a1, a0])\n\n Farray = [a*F1+(1-a)*F2 for a in alpha]\n # refine F\n Farray = [utils.refineF(F, pts1/M, pts2/M) for F in Farray]\n # denormalize F\n Farray = [np.dot(np.transpose(T), np.dot(F, T)) for F in Farray]\n\n return Farray\n\n\ndef _get_normalization_matrix(pts1, pts2, M):\n return np.diag([1.0/M, 1.0/M, 1.0])\n\n\ndef get_fundamental_matrix_eight_point(pts1, pts2, M):\n \"\"\"\n Do not understand the geometric local refining part yet\n\n refine the solution by using local minimization.\n make a good solution better by locally minimizing a geometric cost function.\n call from eightpoint before unscaling F.\n \"\"\"\n assert(len(pts1) == len(pts2), \"Correspondence mismatch\")\n\n norm_matrix = _get_normalization_matrix(pts1, pts2, M)\n #Not coverting pts to homogenous co-ordinates to save memory. Rather multiply by 2X2 norm matrix for same points\n norm_pt1 = np.dot(norm_matrix[0:2, 0:2], pts1.T).T\n norm_pt2 = np.dot(norm_matrix[0:2, 0:2], pts2.T).T\n\n x1, y1 = norm_pt1[:, 0], norm_pt1[:, 1]\n x2, y2 = norm_pt2[:, 0], norm_pt2[:, 1]\n feature_matrix = np.transpose(np.stack((x1*x2, y2*x1, x1, x2*y1, y1*y2, y1, x2, y2, np.ones(x1.shape))))\n\n u, s, vh = np.linalg.svd(feature_matrix)\n unconstrained_fund_matrix = vh[-1].reshape(3, 3)\n\n unconstrained_fund_matrix = utils.refineF(unconstrained_fund_matrix, norm_pt1, norm_pt2)\n\n U, S, Vh = np.linalg.svd(unconstrained_fund_matrix)\n S[-1] = 0\n sing_values = np.diag(S)\n unnormazized_fund_matrix = np.dot(U, np.dot(sing_values, Vh))\n\n fundamental_matrix = np.dot(norm_matrix.T, np.dot(unnormazized_fund_matrix, norm_matrix))\n return fundamental_matrix\n\n\ndef _get_epipolar_line_pixels(epipolar_line, im2):\n if epipolar_line[0] == 0 and epipolar_line[1] == 0:\n raise Exception(\"Line vector can not be zero\")\n\n im_y, im_x, _ = im2.shape\n\n epipolar_coordinates = []\n if epipolar_line[0] != 0:\n for y in range(im_y):\n x = -(epipolar_line[1] * y + epipolar_line[2])/epipolar_line[0]\n epipolar_coordinates.append([round(x), y])\n else:\n for x in range(im_x):\n y = -(epipolar_line[0] * x + epipolar_line[2])/epipolar_line[1]\n epipolar_coordinates.append([x, round(y)])\n return np.array(epipolar_coordinates)\n\n\ndef test_epipolar_line(im1, im2, fundamental_matrix):\n x1, y1 = 475, 96\n\n print (f\"Testing ..... plotting the epipolar line for {x1} and {y1} \")\n point = np.array([[x1, y1, 1]]).T\n\n epipolar_line = np.dot(fundamental_matrix.T, point)\n epipolar_line = np.squeeze(epipolar_line.T)\n\n epipolar_coordinates = _get_epipolar_line_pixels(epipolar_line, im2)\n\n plt.imshow(im2)\n plt.scatter(epipolar_coordinates[:,0], epipolar_coordinates[:, 1])\n plt.show()\n\n\ndef _valid_epipolar_coordinates(im2, epipolar_coordinates, center):\n ht, wd, _ = im2.shape\n valid_x = np.logical_and(epipolar_coordinates[:, 0] < wd - center, epipolar_coordinates[:, 0] >= center)\n valid_y = np.logical_and(epipolar_coordinates[:, 1] < ht - center, epipolar_coordinates[:, 1] >= center)\n valid_epipolar = epipolar_coordinates[np.logical_and(valid_x, valid_y)]\n return valid_epipolar\n\n\ndef epipolar_correspondence(im1, im2, x1, y1, fundamental_matrix):\n \"\"\"\n This is a noob version that uses L2 distances to find the feature poitns. Fails for similar corners in diff locations\n >>Since there is not much change in the 2 images, the points should be searched locally for best results here\n\n Ideally SIFT discriptors should be used\n \"\"\"\n window_size = 11\n center = window_size//2\n sigma = 5\n\n point = np.array([[x1, y1, 1]]).T\n\n epipolar_line = np.dot(fundamental_matrix, point)\n epipolar_line = np.squeeze(epipolar_line.T)\n\n epipolar_coordinates = _get_epipolar_line_pixels(epipolar_line, im2)\n valid_coords = _valid_epipolar_coordinates(im2, epipolar_coordinates, center)\n\n target_patch = im1[y1-center:y1+center+1, x1-center:x1+center+1]\n\n #Generate a gaussian mask to weight error\n mask = np.ones((window_size, window_size))*center\n mask = np.repeat(np.array([range(window_size)]), window_size, axis=0) - mask\n mask = mask**2+np.transpose(mask)**2\n weight = np.exp(-0.5*mask/(sigma**2))\n weight /= np.sum(weight)\n\n correspond, min_error = None, float(\"inf\")\n for x, y in valid_coords:\n source_patch = im2[y-center:y+center+1, x-center:x+center+1]\n\n error = ((target_patch - source_patch)**2).transpose(2, 0, 1)\n weighed_error = np.sum(np.multiply(error, weight))\n\n if weighed_error < min_error:\n correspond = [x, y]\n min_error = weighed_error\n\n return correspond\n\n\ndef epipolarCorrespondence(im1, im2, x1, y1, F):\n # set the size of the window\n x1, y1 = int(round(x1)), int(round(y1))\n window_size = 11\n center = window_size//2\n sigma = 5\n search_range = 40\n\n # create gaussian weight matrix\n mask = np.ones((window_size, window_size))*center\n mask = np.repeat(np.array([range(window_size)]), window_size, axis=0) - mask\n mask = np.sqrt(mask**2+np.transpose(mask)**2)\n weight = np.exp(-0.5*(mask**2)/(sigma**2))\n weight /= np.sum(weight)\n\n if len(im1.shape) > 2:\n weight = np.repeat(np.expand_dims(weight, axis=2), im1.shape[-1], axis=2)\n\n # get the epipolar line\n p = np.array([[x1], [y1], [1]])\n l2 = np.dot(F, p)\n\n # get the patch around the pixel in image1\n patch1 = im1[y1-center:y1+center+1, x1-center:x1+center+1]\n # get the points on the epipolar line\n h, w, _ = im2.shape\n Y = np.array(range(y1-search_range, y1+search_range))\n X = np.round(-(l2[1]*Y+l2[2])/l2[0]).astype(np.int)\n valid = (X >= center) & (X < w - center) & (Y >= center) & (Y < h - center)\n X, Y = X[valid], Y[valid]\n\n min_dist = None\n x2, y2 = None, None\n for i in range(len(X)):\n # get the patch around the pixel in image2\n patch2 = im2[Y[i]-center:Y[i]+center+1, X[i]-center:X[i]+center+1]\n # calculate the distance\n dist = np.sum((patch1-patch2)**2*weight)\n if min_dist is None or dist < min_dist:\n min_dist = dist\n x2, y2 = X[i], Y[i]\n\n return x2, y2\n\n\n# Function has been tested\ndef get_essential_matrix(fundamental_matrix, cam_intrinsic1, cam_intrinsic2):\n essential_matrix = np.dot(cam_intrinsic2.T, np.dot(fundamental_matrix, cam_intrinsic1))\n return essential_matrix" ]
[ [ "numpy.diag", "numpy.dot", "matplotlib.pyplot.imshow", "numpy.expand_dims", "numpy.squeeze", "numpy.concatenate", "numpy.round", "numpy.exp", "numpy.random.randint", "numpy.linalg.svd", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.roots", "numpy.linalg.det", "numpy.zeros", "numpy.multiply", "numpy.linalg.inv", "numpy.append", "numpy.transpose", "numpy.logical_and", "matplotlib.pyplot.show", "numpy.array", "numpy.sum", "matplotlib.pyplot.scatter", "numpy.ones", "numpy.vstack" ] ]
ayushdg/cudf
[ "2a82eca39149d474dc9ffb1cb95dcea4a58a78cf" ]
[ "python/cudf/cudf/tests/test_indexing.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION.\n\nfrom itertools import combinations\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120\nfrom cudf.tests import utils\nfrom cudf.tests.utils import INTEGER_TYPES, assert_eq, assert_exceptions_equal\n\nindex_dtypes = INTEGER_TYPES\n\n\n@pytest.fixture\ndef pdf_gdf():\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [\"c\", \"d\", \"e\"]}, index=[\"one\", \"two\", \"three\"]\n )\n gdf = cudf.from_pandas(pdf)\n return pdf, gdf\n\n\n@pytest.fixture\ndef pdf_gdf_multi():\n pdf = pd.DataFrame(np.random.rand(7, 5))\n pdfIndex = pd.MultiIndex(\n [\n [\"a\", \"b\", \"c\"],\n [\"house\", \"store\", \"forest\"],\n [\"clouds\", \"clear\", \"storm\"],\n [\"fire\", \"smoke\", \"clear\"],\n ],\n [\n [0, 0, 0, 0, 1, 1, 2],\n [1, 1, 1, 1, 0, 0, 2],\n [0, 0, 2, 2, 2, 0, 1],\n [0, 0, 0, 1, 2, 0, 1],\n ],\n )\n pdfIndex.names = [\"alpha\", \"location\", \"weather\", \"sign\"]\n pdf.index = pdfIndex\n gdf = cudf.from_pandas(pdf)\n return pdf, gdf\n\n\n@pytest.mark.parametrize(\n \"i1, i2, i3\",\n (\n [\n (slice(None, 12), slice(3, None), slice(None, None, 2)),\n (range(12), range(3, 12), range(0, 9, 2)),\n (np.arange(12), np.arange(3, 12), np.arange(0, 9, 2)),\n (list(range(12)), list(range(3, 12)), list(range(0, 9, 2))),\n (\n pd.Series(range(12)),\n pd.Series(range(3, 12)),\n pd.Series(range(0, 9, 2)),\n ),\n (\n cudf.Series(range(12)),\n cudf.Series(range(3, 12)),\n cudf.Series(range(0, 9, 2)),\n ),\n (\n [i in range(12) for i in range(20)],\n [i in range(3, 12) for i in range(12)],\n [i in range(0, 9, 2) for i in range(9)],\n ),\n (\n np.array([i in range(12) for i in range(20)], dtype=bool),\n np.array([i in range(3, 12) for i in range(12)], dtype=bool),\n np.array([i in range(0, 9, 2) for i in range(9)], dtype=bool),\n ),\n ]\n + [\n (\n np.arange(12, dtype=t),\n np.arange(3, 12, dtype=t),\n np.arange(0, 9, 2, dtype=t),\n )\n for t in index_dtypes\n ]\n ),\n ids=(\n [\n \"slice\",\n \"range\",\n \"numpy.array\",\n \"list\",\n \"pandas.Series\",\n \"Series\",\n \"list[bool]\",\n \"numpy.array[bool]\",\n ]\n + [\"numpy.array[%s]\" % np.dtype(t).type.__name__ for t in index_dtypes]\n ),\n)\ndef test_series_indexing(i1, i2, i3):\n a1 = np.arange(20)\n series = cudf.Series(a1)\n # Indexing\n sr1 = series.iloc[i1]\n assert sr1.null_count == 0\n np.testing.assert_equal(sr1.to_array(), a1[:12])\n sr2 = sr1.iloc[i2]\n assert sr2.null_count == 0\n np.testing.assert_equal(sr2.to_array(), a1[3:12])\n # Index with stride\n sr3 = sr2.iloc[i3]\n assert sr3.null_count == 0\n np.testing.assert_equal(sr3.to_array(), a1[3:12:2])\n\n # Integer indexing\n if isinstance(i1, range):\n for i in i1: # Python int-s\n assert series[i] == a1[i]\n if isinstance(i1, np.ndarray) and i1.dtype in index_dtypes:\n for i in i1: # numpy integers\n assert series[i] == a1[i]\n\n\ndef test_series_indexing_large_size():\n n_elem = 100_000\n gsr = cudf.Series(cupy.ones(n_elem))\n gsr[0] = None\n got = gsr[gsr.isna()]\n expect = cudf.Series([None], dtype=\"float64\")\n\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\"psr\", [pd.Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])])\n@pytest.mark.parametrize(\n \"arg\", [\"b\", [\"a\", \"c\"], slice(1, 2, 1), [True, False, True]]\n)\ndef test_series_get_item(psr, arg):\n gsr = cudf.from_pandas(psr)\n\n expect = psr[arg]\n got = gsr[arg]\n\n assert_eq(expect, got)\n\n\ndef test_dataframe_column_name_indexing():\n df = cudf.DataFrame()\n data = np.asarray(range(10), dtype=np.int32)\n df[\"a\"] = data\n df[1] = data\n np.testing.assert_equal(\n df[\"a\"].to_array(), np.asarray(range(10), dtype=np.int32)\n )\n np.testing.assert_equal(\n df[1].to_array(), np.asarray(range(10), dtype=np.int32)\n )\n\n pdf = pd.DataFrame()\n nelem = 10\n pdf[\"key1\"] = np.random.randint(0, 5, nelem)\n pdf[\"key2\"] = np.random.randint(0, 3, nelem)\n pdf[1] = np.arange(1, 1 + nelem)\n pdf[2] = np.random.random(nelem)\n df = cudf.from_pandas(pdf)\n\n assert_eq(df[df.columns], df)\n assert_eq(df[df.columns[:1]], df[[\"key1\"]])\n\n for i in range(1, len(pdf.columns) + 1):\n for idx in combinations(pdf.columns, i):\n assert pdf[list(idx)].equals(df[list(idx)].to_pandas())\n\n # test for only numeric columns\n df = pd.DataFrame()\n for i in range(0, 10):\n df[i] = range(nelem)\n gdf = cudf.DataFrame.from_pandas(df)\n assert_eq(gdf, df)\n\n assert_eq(gdf[gdf.columns], gdf)\n assert_eq(gdf[gdf.columns[:3]], gdf[[0, 1, 2]])\n\n\ndef test_dataframe_slicing():\n df = cudf.DataFrame()\n size = 123\n df[\"a\"] = ha = np.random.randint(low=0, high=100, size=size).astype(\n np.int32\n )\n df[\"b\"] = hb = np.random.random(size).astype(np.float32)\n df[\"c\"] = hc = np.random.randint(low=0, high=100, size=size).astype(\n np.int64\n )\n df[\"d\"] = hd = np.random.random(size).astype(np.float64)\n\n # Row slice first 10\n first_10 = df[:10]\n assert len(first_10) == 10\n assert tuple(first_10.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(first_10[\"a\"].to_array(), ha[:10])\n np.testing.assert_equal(first_10[\"b\"].to_array(), hb[:10])\n np.testing.assert_equal(first_10[\"c\"].to_array(), hc[:10])\n np.testing.assert_equal(first_10[\"d\"].to_array(), hd[:10])\n del first_10\n\n # Row slice last 10\n last_10 = df[-10:]\n assert len(last_10) == 10\n assert tuple(last_10.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(last_10[\"a\"].to_array(), ha[-10:])\n np.testing.assert_equal(last_10[\"b\"].to_array(), hb[-10:])\n np.testing.assert_equal(last_10[\"c\"].to_array(), hc[-10:])\n np.testing.assert_equal(last_10[\"d\"].to_array(), hd[-10:])\n del last_10\n\n # Row slice [begin:end]\n begin = 7\n end = 121\n subrange = df[begin:end]\n assert len(subrange) == end - begin\n assert tuple(subrange.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(subrange[\"a\"].to_array(), ha[begin:end])\n np.testing.assert_equal(subrange[\"b\"].to_array(), hb[begin:end])\n np.testing.assert_equal(subrange[\"c\"].to_array(), hc[begin:end])\n np.testing.assert_equal(subrange[\"d\"].to_array(), hd[begin:end])\n del subrange\n\n\n@pytest.mark.parametrize(\"step\", [1, 2, 5])\n@pytest.mark.parametrize(\"scalar\", [0, 20, 100])\ndef test_dataframe_loc(scalar, step):\n size = 123\n pdf = pd.DataFrame(\n {\n \"a\": np.random.randint(low=0, high=100, size=size),\n \"b\": np.random.random(size).astype(np.float32),\n \"c\": np.random.random(size).astype(np.float64),\n \"d\": np.random.random(size).astype(np.float64),\n }\n )\n\n df = cudf.DataFrame.from_pandas(pdf)\n\n assert_eq(df.loc[:, [\"a\"]], pdf.loc[:, [\"a\"]])\n\n assert_eq(df.loc[:, \"d\"], pdf.loc[:, \"d\"])\n\n # Scalar label\n assert_eq(df.loc[scalar], pdf.loc[scalar])\n\n # Full slice\n assert_eq(df.loc[:, \"c\"], pdf.loc[:, \"c\"])\n\n # Repeat with at[]\n assert_eq(df.loc[:, [\"a\"]], df.at[:, [\"a\"]])\n assert_eq(df.loc[:, \"d\"], df.at[:, \"d\"])\n assert_eq(df.loc[scalar], df.at[scalar])\n assert_eq(df.loc[:, \"c\"], df.at[:, \"c\"])\n\n begin = 110\n end = 122\n\n assert_eq(\n df.loc[begin:end:step, [\"c\", \"d\", \"a\"]],\n pdf.loc[begin:end:step, [\"c\", \"d\", \"a\"]],\n )\n\n assert_eq(df.loc[begin:end, [\"c\", \"d\"]], pdf.loc[begin:end, [\"c\", \"d\"]])\n\n # Slicing on columns:\n assert_eq(\n df.loc[begin:end:step, \"a\":\"c\"], pdf.loc[begin:end:step, \"a\":\"c\"]\n )\n\n # Slicing of size 1:\n assert_eq(df.loc[begin:begin, \"a\"], pdf.loc[begin:begin, \"a\"])\n\n # TODO: Pandas changes the dtype here when it shouldn't\n assert_eq(\n df.loc[begin, \"a\":\"a\"], pdf.loc[begin, \"a\":\"a\"], check_dtype=False\n )\n\n # Repeat with at[]\n assert_eq(\n df.loc[begin:end:step, [\"c\", \"d\", \"a\"]],\n df.at[begin:end:step, [\"c\", \"d\", \"a\"]],\n )\n assert_eq(df.loc[begin:end, [\"c\", \"d\"]], df.at[begin:end, [\"c\", \"d\"]])\n assert_eq(df.loc[begin:end:step, \"a\":\"c\"], df.at[begin:end:step, \"a\":\"c\"])\n assert_eq(df.loc[begin:begin, \"a\"], df.at[begin:begin, \"a\"])\n assert_eq(df.loc[begin, \"a\":\"a\"], df.at[begin, \"a\":\"a\"], check_dtype=False)\n\n # Make int64 index\n offset = 50\n df2 = df[offset:]\n pdf2 = pdf[offset:]\n begin = 117\n end = 122\n assert_eq(\n df2.loc[begin:end, [\"c\", \"d\", \"a\"]],\n pdf2.loc[begin:end, [\"c\", \"d\", \"a\"]],\n )\n\n # loc with list like indexing\n assert_eq(df.loc[[0]], pdf.loc[[0]])\n # loc with column like indexing\n assert_eq(df.loc[cudf.Series([0])], pdf.loc[pd.Series([0])])\n assert_eq(df.loc[cudf.Series([0])._column], pdf.loc[pd.Series([0])])\n assert_eq(df.loc[np.array([0])], pdf.loc[np.array([0])])\n\n\ndef test_dataframe_loc_duplicate_index_scalar():\n pdf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5]}, index=[1, 2, 1, 4, 2])\n gdf = cudf.DataFrame.from_pandas(pdf)\n\n assert_eq(pdf.loc[2], gdf.loc[2])\n\n\n@pytest.mark.parametrize(\n \"mask\",\n [[True, False, False, False, False], [True, False, True, False, True]],\n)\n@pytest.mark.parametrize(\"arg\", [\"a\", slice(\"a\", \"a\"), slice(\"a\", \"b\")])\ndef test_dataframe_loc_mask(mask, arg):\n pdf = pd.DataFrame(\n {\"a\": [\"a\", \"b\", \"c\", \"d\", \"e\"], \"b\": [\"f\", \"g\", \"h\", \"i\", \"j\"]}\n )\n gdf = cudf.DataFrame.from_pandas(pdf)\n\n assert_eq(pdf.loc[mask, arg], gdf.loc[mask, arg])\n\n\ndef test_dataframe_loc_outbound():\n df = cudf.DataFrame()\n size = 10\n df[\"a\"] = ha = np.random.randint(low=0, high=100, size=size).astype(\n np.int32\n )\n df[\"b\"] = hb = np.random.random(size).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n assert_exceptions_equal(lambda: pdf.loc[11], lambda: df.loc[11])\n\n\ndef test_series_loc_numerical():\n ps = pd.Series([1, 2, 3, 4, 5], index=[5, 6, 7, 8, 9])\n gs = cudf.Series.from_pandas(ps)\n\n assert_eq(ps.loc[5], gs.loc[5])\n assert_eq(ps.loc[6], gs.loc[6])\n assert_eq(ps.loc[6:8], gs.loc[6:8])\n assert_eq(ps.loc[:8], gs.loc[:8])\n assert_eq(ps.loc[6:], gs.loc[6:])\n assert_eq(ps.loc[::2], gs.loc[::2])\n assert_eq(ps.loc[[5, 8, 9]], gs.loc[[5, 8, 9]])\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n assert_eq(ps.loc[[5, 8, 9]], gs.loc[cupy.array([5, 8, 9])])\n\n\ndef test_series_loc_float_index():\n ps = pd.Series([1, 2, 3, 4, 5], index=[5.43, 6.34, 7.34, 8.0, 9.1])\n gs = cudf.Series.from_pandas(ps)\n\n assert_eq(ps.loc[5.43], gs.loc[5.43])\n assert_eq(ps.loc[8], gs.loc[8])\n assert_eq(ps.loc[6.1:8], gs.loc[6.1:8])\n assert_eq(ps.loc[:7.1], gs.loc[:7.1])\n assert_eq(ps.loc[6.345:], gs.loc[6.345:])\n assert_eq(ps.loc[::2], gs.loc[::2])\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\ndef test_series_loc_string():\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=[\"one\", \"two\", \"three\", \"four\", \"five\"]\n )\n gs = cudf.Series.from_pandas(ps)\n\n assert_eq(ps.loc[\"one\"], gs.loc[\"one\"])\n assert_eq(ps.loc[\"five\"], gs.loc[\"five\"])\n assert_eq(ps.loc[\"two\":\"four\"], gs.loc[\"two\":\"four\"])\n assert_eq(ps.loc[:\"four\"], gs.loc[:\"four\"])\n assert_eq(ps.loc[\"two\":], gs.loc[\"two\":])\n assert_eq(ps.loc[::2], gs.loc[::2])\n assert_eq(ps.loc[[\"one\", \"four\", \"five\"]], gs.loc[[\"one\", \"four\", \"five\"]])\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\ndef test_series_loc_datetime():\n if PANDAS_GE_110:\n kwargs = {\"check_freq\": False}\n else:\n kwargs = {}\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=pd.date_range(\"20010101\", \"20010105\")\n )\n gs = cudf.Series.from_pandas(ps)\n\n # a few different ways of specifying a datetime label:\n assert_eq(ps.loc[\"20010101\"], gs.loc[\"20010101\"])\n assert_eq(ps.loc[\"2001-01-01\"], gs.loc[\"2001-01-01\"])\n assert_eq(\n ps.loc[pd.to_datetime(\"2001-01-01\")],\n gs.loc[pd.to_datetime(\"2001-01-01\")],\n )\n assert_eq(\n ps.loc[np.datetime64(\"2001-01-01\")],\n gs.loc[np.datetime64(\"2001-01-01\")],\n )\n\n assert_eq(\n ps.loc[\"2001-01-02\":\"2001-01-05\"],\n gs.loc[\"2001-01-02\":\"2001-01-05\"],\n **kwargs,\n )\n assert_eq(ps.loc[\"2001-01-02\":], gs.loc[\"2001-01-02\":], **kwargs)\n assert_eq(ps.loc[:\"2001-01-04\"], gs.loc[:\"2001-01-04\"], **kwargs)\n assert_eq(ps.loc[::2], gs.loc[::2], **kwargs)\n\n assert_eq(\n ps.loc[[\"2001-01-01\", \"2001-01-04\", \"2001-01-05\"]],\n gs.loc[[\"2001-01-01\", \"2001-01-04\", \"2001-01-05\"]],\n )\n\n assert_eq(\n ps.loc[\n [\n pd.to_datetime(\"2001-01-01\"),\n pd.to_datetime(\"2001-01-04\"),\n pd.to_datetime(\"2001-01-05\"),\n ]\n ],\n gs.loc[\n [\n pd.to_datetime(\"2001-01-01\"),\n pd.to_datetime(\"2001-01-04\"),\n pd.to_datetime(\"2001-01-05\"),\n ]\n ],\n )\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n **kwargs,\n )\n\n just_less_than_max = ps.index.max() - pd.Timedelta(\"5m\")\n\n assert_eq(\n ps.loc[:just_less_than_max], gs.loc[:just_less_than_max], **kwargs\n )\n\n\ndef test_series_loc_categorical():\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=pd.Categorical([\"a\", \"b\", \"c\", \"d\", \"e\"])\n )\n gs = cudf.Series.from_pandas(ps)\n\n assert_eq(ps.loc[\"a\"], gs.loc[\"a\"])\n assert_eq(ps.loc[\"e\"], gs.loc[\"e\"])\n assert_eq(ps.loc[\"b\":\"d\"], gs.loc[\"b\":\"d\"])\n assert_eq(ps.loc[:\"d\"], gs.loc[:\"d\"])\n assert_eq(ps.loc[\"b\":], gs.loc[\"b\":])\n assert_eq(ps.loc[::2], gs.loc[::2])\n\n # order of categories changes, so we can only\n # compare values:\n assert_eq(\n ps.loc[[\"a\", \"d\", \"e\"]].values, gs.loc[[\"a\", \"d\", \"e\"]].to_array()\n )\n\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.DataFrame(\n {\"a\": [1, 2, 3, 4]},\n index=pd.MultiIndex.from_frame(\n pd.DataFrame(\n {\"A\": [2, 3, 1, 4], \"B\": [\"low\", \"high\", \"high\", \"low\"]}\n )\n ),\n ),\n pd.Series(\n [1, 2, 3, 4],\n index=pd.MultiIndex.from_frame(\n pd.DataFrame(\n {\"A\": [2, 3, 1, 4], \"B\": [\"low\", \"high\", \"high\", \"low\"]}\n )\n ),\n ),\n ],\n)\ndef test_dataframe_series_loc_multiindex(obj):\n pindex = pd.MultiIndex.from_frame(\n pd.DataFrame({\"A\": [3, 2], \"B\": [\"high\", \"low\"]})\n )\n\n gobj = cudf.from_pandas(obj)\n gindex = cudf.MultiIndex.from_pandas(pindex)\n\n # cudf MultiIndex as arg\n expected = obj.loc[pindex]\n got = gobj.loc[gindex]\n assert_eq(expected, got)\n\n # pandas MultiIndex as arg\n expected = obj.loc[pindex]\n got = gobj.loc[pindex]\n assert_eq(expected, got)\n\n\n@pytest.mark.parametrize(\"nelem\", [2, 5, 20, 100])\ndef test_series_iloc(nelem):\n\n # create random cudf.Series\n np.random.seed(12)\n ps = pd.Series(np.random.sample(nelem))\n\n # gpu cudf.Series\n gs = cudf.Series(ps)\n\n # positive tests for indexing\n np.testing.assert_allclose(gs.iloc[-1 * nelem], ps.iloc[-1 * nelem])\n np.testing.assert_allclose(gs.iloc[-1], ps.iloc[-1])\n np.testing.assert_allclose(gs.iloc[0], ps.iloc[0])\n np.testing.assert_allclose(gs.iloc[1], ps.iloc[1])\n np.testing.assert_allclose(gs.iloc[nelem - 1], ps.iloc[nelem - 1])\n\n # positive tests for slice\n np.testing.assert_allclose(gs.iloc[-1:1].to_array(), ps.iloc[-1:1])\n np.testing.assert_allclose(\n gs.iloc[nelem - 1 : -1].to_array(), ps.iloc[nelem - 1 : -1]\n )\n np.testing.assert_allclose(\n gs.iloc[0 : nelem - 1].to_pandas(), ps.iloc[0 : nelem - 1]\n )\n np.testing.assert_allclose(gs.iloc[0:nelem].to_pandas(), ps.iloc[0:nelem])\n np.testing.assert_allclose(gs.iloc[1:1].to_pandas(), ps.iloc[1:1])\n np.testing.assert_allclose(gs.iloc[1:2].to_pandas(), ps.iloc[1:2].values)\n np.testing.assert_allclose(\n gs.iloc[nelem - 1 : nelem + 1].to_pandas(),\n ps.iloc[nelem - 1 : nelem + 1],\n )\n np.testing.assert_allclose(\n gs.iloc[nelem : nelem * 2].to_pandas(), ps.iloc[nelem : nelem * 2]\n )\n\n\n@pytest.mark.parametrize(\"nelem\", [2, 5, 20, 100])\ndef test_dataframe_iloc(nelem):\n gdf = cudf.DataFrame()\n\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n assert_eq(gdf.iloc[-1:1], pdf.iloc[-1:1])\n assert_eq(gdf.iloc[nelem - 1 : -1], pdf.iloc[nelem - 1 : -1])\n assert_eq(gdf.iloc[0 : nelem - 1], pdf.iloc[0 : nelem - 1])\n assert_eq(gdf.iloc[0:nelem], pdf.iloc[0:nelem])\n assert_eq(gdf.iloc[1:1], pdf.iloc[1:1])\n assert_eq(gdf.iloc[1:2], pdf.iloc[1:2])\n assert_eq(gdf.iloc[nelem - 1 : nelem + 1], pdf.iloc[nelem - 1 : nelem + 1])\n assert_eq(gdf.iloc[nelem : nelem * 2], pdf.iloc[nelem : nelem * 2])\n\n assert_eq(gdf.iloc[-1 * nelem], pdf.iloc[-1 * nelem])\n assert_eq(gdf.iloc[-1], pdf.iloc[-1])\n assert_eq(gdf.iloc[0], pdf.iloc[0])\n assert_eq(gdf.iloc[1], pdf.iloc[1])\n assert_eq(gdf.iloc[nelem - 1], pdf.iloc[nelem - 1])\n\n # Repeat the above with iat[]\n assert_eq(gdf.iloc[-1:1], gdf.iat[-1:1])\n assert_eq(gdf.iloc[nelem - 1 : -1], gdf.iat[nelem - 1 : -1])\n assert_eq(gdf.iloc[0 : nelem - 1], gdf.iat[0 : nelem - 1])\n assert_eq(gdf.iloc[0:nelem], gdf.iat[0:nelem])\n assert_eq(gdf.iloc[1:1], gdf.iat[1:1])\n assert_eq(gdf.iloc[1:2], gdf.iat[1:2])\n assert_eq(gdf.iloc[nelem - 1 : nelem + 1], gdf.iat[nelem - 1 : nelem + 1])\n assert_eq(gdf.iloc[nelem : nelem * 2], gdf.iat[nelem : nelem * 2])\n\n assert_eq(gdf.iloc[-1 * nelem], gdf.iat[-1 * nelem])\n assert_eq(gdf.iloc[-1], gdf.iat[-1])\n assert_eq(gdf.iloc[0], gdf.iat[0])\n assert_eq(gdf.iloc[1], gdf.iat[1])\n assert_eq(gdf.iloc[nelem - 1], gdf.iat[nelem - 1])\n\n # iloc with list like indexing\n assert_eq(gdf.iloc[[0]], pdf.iloc[[0]])\n # iloc with column like indexing\n assert_eq(gdf.iloc[cudf.Series([0])], pdf.iloc[pd.Series([0])])\n assert_eq(gdf.iloc[cudf.Series([0])._column], pdf.iloc[pd.Series([0])])\n assert_eq(gdf.iloc[np.array([0])], pdf.loc[np.array([0])])\n\n\n@pytest.mark.xfail(raises=AssertionError, reason=\"Series.index are different\")\ndef test_dataframe_iloc_tuple():\n gdf = cudf.DataFrame()\n nelem = 123\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n # We don't support passing the column names into the index quite yet\n got = gdf.iloc[1, [1]]\n expect = pdf.iloc[1, [1]]\n\n assert_eq(got, expect)\n\n\n@pytest.mark.xfail(\n raises=IndexError, reason=\"positional indexers are out-of-bounds\"\n)\ndef test_dataframe_iloc_index_error():\n gdf = cudf.DataFrame()\n nelem = 123\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n def assert_col(g, p):\n np.testing.assert_equal(g[\"a\"].to_array(), p[\"a\"])\n np.testing.assert_equal(g[\"b\"].to_array(), p[\"b\"])\n\n assert_col(gdf.iloc[nelem * 2], pdf.iloc[nelem * 2])\n\n\n@pytest.mark.parametrize(\"ntake\", [0, 1, 10, 123, 122, 200])\ndef test_dataframe_take(ntake):\n np.random.seed(0)\n df = cudf.DataFrame()\n\n nelem = 123\n df[\"ii\"] = np.random.randint(0, 20, nelem)\n df[\"ff\"] = np.random.random(nelem)\n\n take_indices = np.random.randint(0, len(df), ntake)\n\n actual = df.take(take_indices)\n expected = df.to_pandas().take(take_indices)\n\n assert actual.ii.null_count == 0\n assert actual.ff.null_count == 0\n assert_eq(actual, expected)\n\n\n@pytest.mark.parametrize(\"ntake\", [1, 2, 8, 9])\ndef test_dataframe_take_with_multiIndex(ntake):\n np.random.seed(0)\n df = cudf.DataFrame(\n index=cudf.MultiIndex(\n levels=[[\"lama\", \"cow\", \"falcon\"], [\"speed\", \"weight\", \"length\"]],\n codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],\n )\n )\n\n nelem = 9\n df[\"ii\"] = np.random.randint(0, 20, nelem)\n df[\"ff\"] = np.random.random(nelem)\n\n take_indices = np.random.randint(0, len(df), ntake)\n\n actual = df.take(take_indices)\n expected = df.to_pandas().take(take_indices)\n\n assert_eq(actual, expected)\n\n\n@pytest.mark.parametrize(\"keep_index\", [True, False])\n@pytest.mark.parametrize(\"ntake\", [0, 1, 10, 123, 122, 200])\ndef test_series_take(ntake, keep_index):\n np.random.seed(0)\n nelem = 123\n\n psr = pd.Series(np.random.randint(0, 20, nelem))\n gsr = cudf.Series(psr)\n\n take_indices = np.random.randint(0, len(gsr), ntake)\n\n actual = gsr.take(take_indices, keep_index=keep_index)\n expected = psr.take(take_indices)\n\n if not keep_index:\n expected = expected.reset_index(drop=True)\n\n assert_eq(actual, expected)\n\n\ndef test_series_take_positional():\n psr = pd.Series([1, 2, 3, 4, 5], index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n gsr = cudf.Series.from_pandas(psr)\n\n take_indices = [1, 2, 0, 3]\n\n expect = psr.take(take_indices)\n got = gsr.take(take_indices, keep_index=True)\n\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\"nelem\", [0, 1, 5, 20, 100])\n@pytest.mark.parametrize(\"slice_start\", [None, 0, 1, 3, 10, -10])\n@pytest.mark.parametrize(\"slice_end\", [None, 0, 1, 30, 50, -1])\ndef test_dataframe_masked_slicing(nelem, slice_start, slice_end):\n gdf = cudf.DataFrame()\n gdf[\"a\"] = list(range(nelem))\n gdf[\"b\"] = list(range(nelem, 2 * nelem))\n gdf[\"a\"] = gdf[\"a\"].set_mask(utils.random_bitmask(nelem))\n gdf[\"b\"] = gdf[\"b\"].set_mask(utils.random_bitmask(nelem))\n\n def do_slice(x):\n return x[slice_start:slice_end]\n\n expect = do_slice(gdf.to_pandas())\n got = do_slice(gdf).to_pandas()\n\n assert_eq(expect, got, check_dtype=False)\n\n\n@pytest.mark.parametrize(\"dtype\", [int, float, str])\ndef test_empty_boolean_mask(dtype):\n gdf = cudf.datasets.randomdata(nrows=0, dtypes={\"a\": dtype})\n pdf = gdf.to_pandas()\n\n compare_val = dtype(1)\n\n expected = pdf[pdf.a == compare_val]\n got = gdf[gdf.a == compare_val]\n assert_eq(expected, got)\n\n expected = pdf.a[pdf.a == compare_val]\n got = gdf.a[gdf.a == compare_val]\n assert_eq(expected, got)\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n [1, 2, 3, 4],\n [1.0, 2.0, 3.0, 4.0],\n [\"one\", \"two\", \"three\", \"four\"],\n pd.Series([\"a\", \"b\", \"c\", \"d\"], dtype=\"category\"),\n pd.Series(pd.date_range(\"2010-01-01\", \"2010-01-04\")),\n ],\n)\n@pytest.mark.parametrize(\n \"mask\",\n [\n [True, True, True, True],\n [False, False, False, False],\n [True, False, True, False],\n [True, False, False, True],\n np.array([True, False, True, False]),\n pd.Series([True, False, True, False]),\n cudf.Series([True, False, True, False]),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"one\", \"some\", \"all\", \"none\"])\ndef test_series_apply_boolean_mask(data, mask, nulls):\n psr = pd.Series(data)\n\n if len(data) > 0:\n if nulls == \"one\":\n p = np.random.randint(0, 4)\n psr[p] = None\n elif nulls == \"some\":\n p1, p2 = np.random.randint(0, 4, (2,))\n psr[p1] = None\n psr[p2] = None\n elif nulls == \"all\":\n psr[:] = None\n\n gsr = cudf.from_pandas(psr)\n\n # TODO: from_pandas(psr) has dtype \"float64\"\n # when psr has dtype \"object\" and is all None\n if psr.dtype == \"object\" and nulls == \"all\":\n gsr = cudf.Series([None, None, None, None], dtype=\"object\")\n\n if isinstance(mask, cudf.Series):\n expect = psr[mask.to_pandas()]\n else:\n expect = psr[mask]\n got = gsr[mask]\n\n assert_eq(expect, got)\n\n\ndef test_dataframe_apply_boolean_mask():\n pdf = pd.DataFrame(\n {\n \"a\": [0, 1, 2, 3],\n \"b\": [0.1, 0.2, None, 0.3],\n \"c\": [\"a\", None, \"b\", \"c\"],\n }\n )\n gdf = cudf.DataFrame.from_pandas(pdf)\n assert_eq(pdf[[True, False, True, False]], gdf[[True, False, True, False]])\n\n\n\"\"\"\nThis test compares cudf and Pandas DataFrame boolean indexing.\n\"\"\"\n\n\n@pytest.mark.parametrize(\n \"mask_fn\", [lambda x: x, lambda x: np.array(x), lambda x: pd.Series(x)]\n)\ndef test_dataframe_boolean_mask(mask_fn):\n mask_base = [\n True,\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ]\n pdf = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n gdf = cudf.from_pandas(pdf)\n mask = mask_fn(mask_base)\n assert len(mask) == gdf.shape[0]\n pdf_masked = pdf[mask]\n gdf_masked = gdf[mask]\n assert pdf_masked.to_string().split() == gdf_masked.to_string().split()\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 4),\n (1, 4),\n ([0, 1], 4),\n ([0, 1], [4, 5]),\n (slice(0, 2), [4, 5]),\n (slice(1, None), [4, 5, 6, 7]),\n ([], 1),\n ([], []),\n (slice(None, None), 1),\n (slice(-1, -3), 7),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"none\", \"some\", \"all\"])\ndef test_series_setitem_basics(key, value, nulls):\n psr = pd.Series([1, 2, 3, 4, 5])\n if nulls == \"some\":\n psr[[0, 4]] = None\n elif nulls == \"all\":\n psr[:] = None\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr, check_dtype=False)\n\n\ndef test_series_setitem_null():\n gsr = cudf.Series([1, 2, 3, 4])\n gsr[0] = None\n\n expect = cudf.Series([None, 2, 3, 4])\n got = gsr\n assert_eq(expect, got)\n\n gsr = cudf.Series([None, 2, 3, 4])\n gsr[0] = 1\n\n expect = cudf.Series([1, 2, 3, 4])\n got = gsr\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 4),\n (1, 4),\n ([0, 1], 4),\n ([0, 1], [4, 5]),\n (slice(0, 2), [4, 5]),\n (slice(1, None), [4, 5, 6, 7]),\n ([], 1),\n ([], []),\n (slice(None, None), 1),\n (slice(-1, -3), 7),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"none\", \"some\", \"all\"])\ndef test_series_setitem_iloc(key, value, nulls):\n psr = pd.Series([1, 2, 3, 4, 5])\n if nulls == \"some\":\n psr[[0, 4]] = None\n elif nulls == \"all\":\n psr[:] = None\n gsr = cudf.from_pandas(psr)\n psr.iloc[key] = value\n gsr.iloc[key] = value\n assert_eq(psr, gsr, check_dtype=False)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 0.5),\n ([0, 1], 0.5),\n ([0, 1], [0.5, 2.5]),\n (slice(0, 2), [0.5, 0.25]),\n ],\n)\ndef test_series_setitem_dtype(key, value):\n psr = pd.Series([1, 2, 3], dtype=\"int32\")\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n\ndef test_series_setitem_datetime():\n psr = pd.Series([\"2001\", \"2002\", \"2003\"], dtype=\"datetime64[ns]\")\n gsr = cudf.from_pandas(psr)\n\n psr[0] = np.datetime64(\"2005\")\n gsr[0] = np.datetime64(\"2005\")\n\n assert_eq(psr, gsr)\n\n\n@pytest.mark.xfail(\n condition=not PANDAS_GE_120,\n reason=\"Pandas will coerce to object datatype here\",\n)\ndef test_series_setitem_datetime_coerced():\n psr = pd.Series([\"2001\", \"2002\", \"2003\"], dtype=\"datetime64[ns]\")\n gsr = cudf.from_pandas(psr)\n\n psr[0] = \"2005\"\n gsr[0] = \"2005\"\n\n assert_eq(psr, gsr)\n\n\ndef test_series_setitem_categorical():\n psr = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\"], dtype=\"category\")\n gsr = cudf.from_pandas(psr)\n\n psr[0] = \"d\"\n gsr[0] = \"d\"\n assert_eq(psr, gsr)\n\n psr = psr.cat.add_categories([\"e\"])\n gsr = gsr.cat.add_categories([\"e\"])\n psr[0] = \"e\"\n gsr[0] = \"e\"\n assert_eq(psr, gsr)\n\n psr[[0, 1]] = \"b\"\n gsr[[0, 1]] = \"b\"\n assert_eq(psr, gsr)\n\n psr[0:3] = \"e\"\n gsr[0:3] = \"e\"\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, \"d\"),\n (0, \"g\"),\n ([0, 1], \"g\"),\n ([0, 1], None),\n (slice(None, 2), \"g\"),\n (slice(None, 2), [\"g\", None]),\n ],\n)\ndef test_series_setitem_string(key, value):\n psr = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n psr = pd.Series([\"a\", None, \"c\", \"d\", \"e\"])\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (\"a\", 4),\n (\"b\", 4),\n (\"b\", np.int8(8)),\n (\"d\", 4),\n (\"d\", np.int8(16)),\n (\"d\", np.float32(16)),\n ([\"a\", \"b\"], 4),\n ([\"a\", \"b\"], [4, 5]),\n ([True, False, True], 4),\n ([False, False, False], 4),\n ([True, False, True], [4, 5]),\n ],\n)\ndef test_series_setitem_loc(key, value):\n psr = pd.Series([1, 2, 3], [\"a\", \"b\", \"c\"])\n gsr = cudf.from_pandas(psr)\n psr.loc[key] = value\n gsr.loc[key] = value\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (1, \"d\"),\n (2, \"e\"),\n (4, \"f\"),\n ([1, 3], \"g\"),\n ([1, 3], [\"g\", \"h\"]),\n ([True, False, True], \"i\"),\n ([False, False, False], \"j\"),\n ([True, False, True], [\"k\", \"l\"]),\n ],\n)\ndef test_series_setitem_loc_numeric_index(key, value):\n psr = pd.Series([\"a\", \"b\", \"c\"], [1, 2, 3])\n gsr = cudf.from_pandas(psr)\n psr.loc[key] = value\n gsr.loc[key] = value\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n ((0, 0), 5),\n ((slice(None), 0), 5),\n ((slice(None), 0), range(3)),\n ((slice(None, -1), 0), range(2)),\n (([0, 1], 0), 5),\n ],\n)\ndef test_dataframe_setitem_iloc(key, value, pdf_gdf):\n pdf, gdf = pdf_gdf\n pdf.iloc[key] = value\n gdf.iloc[key] = value\n assert_eq(pdf, gdf)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n ((\"one\", \"a\"), 5),\n ((slice(None), \"a\"), 5),\n ((slice(None), \"a\"), range(3)),\n ((slice(None, \"two\"), \"a\"), range(2)),\n (([\"one\", \"two\"], \"a\"), 5),\n ],\n)\ndef test_dataframe_setitem_loc(key, value, pdf_gdf):\n pdf, gdf = pdf_gdf\n pdf.loc[key] = value\n gdf.loc[key] = value\n assert_eq(pdf, gdf)\n\n\n@pytest.mark.parametrize(\n \"key,value\",\n [\n ((0, 0), 5.0),\n ((slice(None), 0), 5.0),\n ((slice(None), 0), np.arange(7, dtype=\"float64\")),\n ],\n)\ndef test_dataframe_setitem_iloc_multiindex(key, value, pdf_gdf_multi):\n pdf, gdf = pdf_gdf_multi\n\n pdf.iloc[key] = value\n gdf.iloc[key] = value\n\n assert_eq(pdf, gdf)\n\n\ndef test_boolean_indexing_single_row(pdf_gdf):\n pdf, gdf = pdf_gdf\n assert_eq(\n pdf.loc[[True, False, False], :], gdf.loc[[True, False, False], :]\n )\n\n\ndef test_iloc_negative_indices():\n psr = pd.Series([1, 2, 3, 4, 5])\n gsr = cudf.from_pandas(psr)\n assert_eq(psr.iloc[[-1, -2, -4]], gsr.iloc[[-1, -2, -4]])\n\n\ndef test_out_of_bounds_indexing():\n psr = pd.Series([1, 2, 3])\n gsr = cudf.from_pandas(psr)\n\n assert_exceptions_equal(\n lambda: psr[[0, 1, 9]],\n lambda: gsr[[0, 1, 9]],\n compare_error_message=False,\n )\n assert_exceptions_equal(\n lambda: psr[[0, 1, -4]],\n lambda: gsr[[0, 1, -4]],\n compare_error_message=False,\n )\n assert_exceptions_equal(\n lambda: psr.__setitem__([0, 1, 9], 2),\n lambda: gsr.__setitem__([0, 1, 9], 2),\n compare_error_message=False,\n )\n assert_exceptions_equal(\n lambda: psr.__setitem__([0, 1, -4], 2),\n lambda: gsr.__setitem__([0, 1, -4], 2),\n compare_error_message=False,\n )\n assert_exceptions_equal(\n lambda: psr[4:6].iloc.__setitem__(-1, 2),\n lambda: gsr[4:6].iloc.__setitem__(-1, 2),\n compare_error_message=False,\n )\n assert_exceptions_equal(\n lambda: psr[4:6].iloc.__setitem__(1, 2),\n lambda: gsr[4:6].iloc.__setitem__(1, 2),\n compare_error_message=False,\n )\n\n\ndef test_sliced_indexing():\n a = list(range(4, 4 + 150))\n b = list(range(0, 0 + 150))\n pdf = pd.DataFrame({\"a\": a, \"b\": b})\n gdf = cudf.DataFrame.from_pandas(pdf)\n pdf = pdf.set_index(\"a\")\n gdf = gdf.set_index(\"a\")\n pidx = pdf.index[:75]\n gidx = gdf.index[:75]\n\n assert_eq(pdf.loc[pidx], gdf.loc[gidx])\n\n\n@pytest.mark.parametrize(\"index\", [[\"a\"], [\"a\", \"a\"], [\"a\", \"a\", \"b\", \"c\"]])\ndef test_iloc_categorical_index(index):\n gdf = cudf.DataFrame({\"data\": range(len(index))}, index=index)\n gdf.index = gdf.index.astype(\"category\")\n pdf = gdf.to_pandas()\n expect = pdf.iloc[:, 0]\n got = gdf.iloc[:, 0]\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\n \"sli\",\n [\n slice(\"2001\", \"2020\"),\n slice(\"2001\", \"2002\"),\n slice(\"2002\", \"2001\"),\n slice(None, \"2020\"),\n slice(\"2020\", None),\n ],\n)\n@pytest.mark.parametrize(\"is_dataframe\", [True, False])\ndef test_loc_datetime_index(sli, is_dataframe):\n\n if is_dataframe is True:\n pd_data = pd.DataFrame(\n {\"a\": [1, 2, 3]},\n index=pd.Series([\"2001\", \"2009\", \"2002\"], dtype=\"datetime64[ns]\"),\n )\n else:\n pd_data = pd.Series(\n [1, 2, 3],\n pd.Series([\"2001\", \"2009\", \"2002\"], dtype=\"datetime64[ns]\"),\n )\n\n gd_data = cudf.from_pandas(pd_data)\n\n expect = pd_data.loc[sli]\n got = gd_data.loc[sli]\n\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\n \"gdf\",\n [\n cudf.DataFrame({\"a\": range(1000000)}),\n cudf.DataFrame({\"a\": range(1000000), \"b\": range(1000000)}),\n cudf.DataFrame({\"a\": range(20), \"b\": range(20)}),\n cudf.DataFrame(\n {\n \"a\": range(20),\n \"b\": range(20),\n \"c\": [\"abc\", \"def\", \"xyz\", \"def\", \"pqr\"] * 4,\n }\n ),\n cudf.DataFrame(index=[1, 2, 3]),\n cudf.DataFrame(index=range(1000000)),\n cudf.DataFrame(columns=[\"a\", \"b\", \"c\", \"d\"]),\n cudf.DataFrame(columns=[\"a\"], index=range(1000000)),\n cudf.DataFrame(\n columns=[\"a\", \"col2\", \"...col n\"], index=range(1000000)\n ),\n cudf.DataFrame(index=cudf.Series(range(1000000)).astype(\"str\")),\n cudf.DataFrame(\n columns=[\"a\", \"b\", \"c\", \"d\"],\n index=cudf.Series(range(1000000)).astype(\"str\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"slice\",\n [\n slice(250000, 500000),\n slice(250000, 250001),\n slice(500000),\n slice(1, 10),\n slice(10, 20),\n slice(15, 24000),\n slice(6),\n ],\n)\ndef test_dataframe_sliced(gdf, slice):\n pdf = gdf.to_pandas()\n\n actual = gdf[slice]\n expected = pdf[slice]\n\n assert_eq(actual, expected)\n\n\n@pytest.mark.parametrize(\n \"gdf\",\n [\n cudf.DataFrame({\"a\": range(10000)}),\n cudf.DataFrame(\n {\n \"a\": range(10000),\n \"b\": range(10000),\n \"c\": range(10000),\n \"d\": range(10000),\n \"e\": range(10000),\n \"f\": range(10000),\n }\n ),\n cudf.DataFrame({\"a\": range(20), \"b\": range(20)}),\n cudf.DataFrame(\n {\n \"a\": range(20),\n \"b\": range(20),\n \"c\": [\"abc\", \"def\", \"xyz\", \"def\", \"pqr\"] * 4,\n }\n ),\n cudf.DataFrame(index=[1, 2, 3]),\n cudf.DataFrame(index=range(10000)),\n cudf.DataFrame(columns=[\"a\", \"b\", \"c\", \"d\"]),\n cudf.DataFrame(columns=[\"a\"], index=range(10000)),\n cudf.DataFrame(columns=[\"a\", \"col2\", \"...col n\"], index=range(10000)),\n cudf.DataFrame(index=cudf.Series(range(10000)).astype(\"str\")),\n cudf.DataFrame(\n columns=[\"a\", \"b\", \"c\", \"d\"],\n index=cudf.Series(range(10000)).astype(\"str\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"slice\", [slice(6), slice(1), slice(7), slice(1, 3)],\n)\ndef test_dataframe_iloc_index(gdf, slice):\n pdf = gdf.to_pandas()\n\n actual = gdf.iloc[:, slice]\n expected = pdf.iloc[:, slice]\n\n assert_eq(actual, expected)\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n [[0], [1], [2]],\n [[0, 1], [2, 3], [4, 5]],\n [[[0, 1], [2]], [[3, 4]], [[5, 6]]],\n [None, [[0, 1], [2]], [[3, 4], [5, 6]]],\n [[], [[0, 1], [2]], [[3, 4], [5, 6]]],\n [[], [[\"a\", \"b\"], None], [[\"c\", \"d\"], []]],\n ],\n)\n@pytest.mark.parametrize(\n \"key\", [[], [0], [0, 1], [0, 1, 0], slice(None), slice(0, 2), slice(1, 3)]\n)\ndef test_iloc_with_lists(data, key):\n psr = pd.Series(data)\n gsr = cudf.Series(data)\n assert_eq(psr.iloc[key], gsr.iloc[key])\n\n\n@pytest.mark.parametrize(\"key\", [5, -10, \"0\", \"a\", np.array(5), np.array(\"a\")])\ndef test_loc_bad_key_type(key):\n psr = pd.Series([1, 2, 3])\n gsr = cudf.from_pandas(psr)\n assert_exceptions_equal(lambda: psr[key], lambda: gsr[key])\n assert_exceptions_equal(lambda: psr.loc[key], lambda: gsr.loc[key])\n\n\n@pytest.mark.parametrize(\"key\", [\"b\", 1.0, np.array(\"b\")])\ndef test_loc_bad_key_type_string_index(key):\n psr = pd.Series([1, 2, 3], index=[\"a\", \"1\", \"c\"])\n gsr = cudf.from_pandas(psr)\n assert_exceptions_equal(lambda: psr[key], lambda: gsr[key])\n assert_exceptions_equal(lambda: psr.loc[key], lambda: gsr.loc[key])\n\n\ndef test_loc_zero_dim_array():\n psr = pd.Series([1, 2, 3])\n gsr = cudf.from_pandas(psr)\n\n assert_eq(psr[np.array(0)], gsr[np.array(0)])\n assert_eq(psr[np.array([0])[0]], gsr[np.array([0])[0]])\n\n\n@pytest.mark.parametrize(\n \"arg\",\n [\n slice(None),\n slice((1, 2), None),\n slice(None, (1, 2)),\n (1, 1),\n (1, slice(None)),\n ],\n)\ndef test_loc_series_multiindex(arg):\n gsr = cudf.DataFrame(\n {\"a\": [1, 1, 2], \"b\": [1, 2, 3], \"c\": [\"a\", \"b\", \"c\"]}\n ).set_index([\"a\", \"b\"])[\"c\"]\n psr = gsr.to_pandas()\n assert_eq(psr.loc[arg], gsr.loc[arg])\n\n\n@pytest.mark.parametrize(\n \"arg\",\n [\n slice(None, None, -1),\n slice(None, -1, -1),\n slice(4, -1, -1),\n slice(None, None, -3),\n slice(None, -1, -3),\n slice(4, -1, -3),\n ],\n)\n@pytest.mark.parametrize(\n \"pobj\", [pd.DataFrame({\"a\": [1, 2, 3, 4, 5]}), pd.Series([1, 2, 3, 4, 5])]\n)\ndef test_iloc_before_zero_terminate(arg, pobj):\n gobj = cudf.from_pandas(pobj)\n\n assert_eq(pobj.iloc[arg], gobj.iloc[arg])\n\n\ndef test_iloc_decimal():\n sr = cudf.Series([\"1.00\", \"2.00\", \"3.00\", \"4.00\"]).astype(\n cudf.Decimal64Dtype(scale=2, precision=3)\n )\n got = sr.iloc[[3, 2, 1, 0]]\n expect = cudf.Series([\"4.00\", \"3.00\", \"2.00\", \"1.00\"],).astype(\n cudf.Decimal64Dtype(scale=2, precision=3)\n )\n assert_eq(expect.reset_index(drop=True), got.reset_index(drop=True))\n" ]
[ [ "pandas.to_datetime", "pandas.Series", "pandas.DataFrame", "numpy.dtype", "numpy.random.sample", "numpy.random.randint", "numpy.arange", "numpy.int8", "numpy.float32", "pandas.MultiIndex", "pandas.Categorical", "pandas.Timedelta", "numpy.random.rand", "pandas.date_range", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.random", "numpy.random.seed", "numpy.datetime64" ] ]
STAR-Center/IncrementalTopo
[ "befbcd55f8a438ff3dd1a2d1d5d4fbb467dc149f" ]
[ "gp_topo/RMsaddle.py" ]
[ "#!/usr/bin/env python\n'''\nThis script will remove some saddle point\nHowever there're some degree one point that is not saddle point and some low density saddle that may not be a passage.\n'''\n'''\nHere we just let the non-saddle point removed that's ok\n'''\nimport numpy as np\nimport pdb\ndef removeSaddle(gau, skeleton, count_map, th = 0.5, rmSize = 5, neighbourTh = 3):\n sk1 = skeleton.copy()\n countTb = (count_map>0)\n newCountTb = countTb.copy()\n chs,cws = np.where(countTb > 0)\n for i in range(len(chs)):\n #if it is long lane, not include\n if np.sum(countTb[chs[i]-rmSize/2:chs[i]+rmSize/2+1, cws[i]-rmSize/2:cws[i]+rmSize/2+1]) >neighbourTh:\n newCountTb[chs[i], cws[i]] = 0\n saddles = gau*newCountTb\n sk1[np.where(saddles>th)] = 0\n return sk1, saddles>th\n" ]
[ [ "numpy.where", "numpy.sum" ] ]
weigq/classification
[ "16f587af0c01a66f7c8a50c80372daf288ca4fd6" ]
[ "utils/misc.py" ]
[ "'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport errno\nimport os\nimport sys\nimport time\nimport math\n\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\n__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p']\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\ndef mkdir_p(path):\n '''make dir if not exist'''\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n" ]
[ [ "torch.nn.init.kaiming_normal", "torch.nn.init.constant", "torch.nn.init.normal" ] ]
AJJLagerweij/topopt
[ "b0f8f0f22363c62678d96d659d4015bec4fc59e5" ]
[ "src_Actuator/constraints.py" ]
[ "\"\"\"\nConstraints class used to specify the density constraints of the topology\noptimisation problem. It contains functions for minimum and maximum element\ndensity in the upcomming iteration and the magnitude of the volume constraint\nfunction itself of the current design. This version of the code is used for the\ncompliant design, local displacement maximisation.\n\nBram Lagerweij\nAerospace Structures and Materials Department TU Delft\n2018\n\"\"\"\n\nimport numpy as np\n\n\nclass DensityConstraint(object):\n \"\"\"\n This object relates to the constraints used in this optimization.\n It can be used for the MMA updatescheme to derive what the limit is for all\n element densities at every itteration.\n The class itself is not changed by the itterations.\n\n Attributes\n -------\n nelx : int\n Number of elements in x direction.\n nely : int\n Number of elements in y direction.\n move : float\n Maximum change in density of an element over 1 itteration.\n volume_frac : float\n Maximum volume that can be filled with material.\n volume_derivative : 2D array size(1, nelx*nely)\n Sensityvity of the density constraint to the density in each element.\n density_min : float (optional)\n Minumum density, set at 0.0 if not specified.\n density_max : float (optional)\n Maximum density, set at 0.0 if not specified.\n\n Methods\n -------\n xmin(load, x)\n Returns the minimum density value of all ellements of this itteration.\n xmax(load, x)\n Returns the maximum density value of all ellements of this itteration.\n current_volconstrain(x)\n Returns the current magnitude of the volume constraint funcion.\n \"\"\"\n def __init__(self, nelx, nely, move, volume_frac, density_min=0.0, density_max=1.0):\n self.nelx = nelx\n self.nely = nely\n self.move = move\n self.volume_frac = volume_frac\n self.volume_derivative = 1/(nelx*nely*volume_frac)*np.ones((1, nely*nelx))\n self.density_min = density_min\n self.density_max = density_max\n\n def xmin(self, x):\n \"\"\"\n This function calculates the minimum density value of all ellements of\n this itteration.\n\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n xmin : 2D array size(nely, nelx)\n Minimum density values of this itteration for the update scheme.\n \"\"\"\n xmin = self.density_min*np.ones((self.nely, self.nelx))\n xmin = np.maximum(xmin, x - self.move)\n return xmin\n\n def xmax(self, x):\n \"\"\"\n This function calculates the maximum density value of all ellements of\n this itteration.\n\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n xmax : 2D array size(nely, nelx)\n Maximum density values of this itteration after updating.\n \"\"\"\n xmax = self.density_max*np.ones((self.nely, self.nelx))\n xmax = np.minimum(xmax, x + self.move)\n return xmax\n\n def current_volconstrain(self, x):\n \"\"\"\n Calculates the current magnitude of the volume constraint funcion: ::\n\n ∑ x\n cur_vol = ────────────────── - 1\n nelx*nelx*vol_frac\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n curvol : float\n Curent value of the density constraint function.\n \"\"\"\n cur_vol = np.sum(x)/(self.nelx*self.nely*self.volume_frac) - 1\n return cur_vol\n" ]
[ [ "numpy.sum", "numpy.maximum", "numpy.minimum", "numpy.ones" ] ]
rwl/pylon
[ "916514255db1ae1661406f0283df756baf960d14" ]
[ "pyreto/roth_erev.py" ]
[ "#------------------------------------------------------------------------------\n# Copyright (C) 2006 Charles Gieseler\n# Copyright (C) 2010 Richard Lincoln\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 dated June, 1991.\n#\n# This software is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANDABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#------------------------------------------------------------------------------\n\n\"\"\" Defines classes that implement the Roth-Erev reinforcement learning method.\nThe original Roth-Erev reinforcement learning algorithm was presented by\nA. Roth and I. Erev in:\n\n - A. E. Roth, I. Erev, D. Fudenberg, J. Kagel, J. Emilie and R. X. Xing,\n \"Learning in Extensive-Form Games: Experimental Data and Simple Dynamic\n Models in the Intermediate Term\", Games and Economic Behavior, 8-1,\n pp 164-212, 1995\n\n - Erev, Ido and Roth, Alvin E., \"Predicting How People Play Games:\n Reinforcement Learning in Experimental Games with Unique, Mixed Strategy\n Equilibria\", The American Economic Review, 88-4, pp 848--881, 1998\n\nImplementation adapted from the RothErevLearner in JRELM by Charles Gieseler\nwhich was itself adapted, in part, from the RothErevLearner in the Java\nJLCRAgent Simulator API (JASA) by Steve Phelps, Department of Computer Science,\nUniversity of Liverpool. For further details see:\n\n - Charles Gieseler, \"A Java Reinforcement Learning Module for the Repast\n Toolkit: Facilitating Study and Implementation with Reinforcement Learning\n in Social Science Multi-Agent Simulations\", MSc Thesis, Department of\n Computer Science, Iowa State University, 2005\n\n@license: GNU GPLv2\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports:\n#------------------------------------------------------------------------------\n\nimport random\nimport scipy\n\nfrom pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner\n#from pybrain.rl.learners.valuebased import ActionValueTable\nfrom pybrain.rl.explorers.discrete.discrete import DiscreteExplorer\nfrom pybrain.utilities import drawIndex #@UnusedImport\n\n#------------------------------------------------------------------------------\n# \"RothErev\" class:\n#------------------------------------------------------------------------------\n\nclass RothErev(ValueBasedLearner):\n \"\"\" Defines the Roth-Erev reinforcement learning method presented in:\n\n - A. E. Roth, I. Erev, \"Predicting How People Play Games with Unique\n Mixed-Strategy Equilibria,\" American Economics Review, Volume 88, 1998,\n 848-881.\n \"\"\"\n\n #: Does the algorithm work on-policy or off-policy?\n offPolicy = False\n\n #: Does the algorithm run in batch mode or online?\n batchMode = True\n\n\n def __init__(self, experimentation=0.5, recency=0.5):\n assert 0.0 <= experimentation <= 1.0\n assert 0.0 <= recency <= 1.0\n\n #----------------------------------------------------------------------\n # ValueBasedLearner interface:\n #----------------------------------------------------------------------\n\n #: Default exploration according to a discrete probability distribution\n #: function.\n self.explorer = ProportionalExplorer()\n\n #----------------------------------------------------------------------\n # RothErev interface:\n #----------------------------------------------------------------------\n\n #: The tendency for experimentation among action choices. The algorithm\n #: will sometimes choose non-optimal actions in favour of exploring the\n #: domain.\n #: Note: Be careful not to choose value e where (1-e) == e / (N - 1),\n #: where N is the size of the action domain (i.e. e == 0.75 and N == 4)\n #: This will result in all action propensities receiving the same\n #: experience update value, regardless of the last action chosen.\n #: Action choice probabilities will then remain uniform and no learning\n #: will occur.\n self.experimentation = experimentation\n\n #: The degree to which actions are 'forgotten'. Used to degrade the\n #: propensity for choosing actions. Meant to make recent experience\n #: more prominent than past experience in the action choice process.\n self.recency = recency\n\n #--------------------------------------------------------------------------\n # Learner interface:\n #--------------------------------------------------------------------------\n\n def learn(self):\n \"\"\" Learn on the current dataset, either for many timesteps and even\n episodes (batchMode = True) or for a single timestep\n (batchMode = False). Batch mode is possible, because Q-Learning is an\n off-policy method.\n\n In batchMode, the algorithm goes through all the samples in the history\n and performs an update on each of them. if batchMode is False, only the\n last data sample is considered. The user himself has to make sure to\n keep the dataset consistent with the agent's history.\n \"\"\"\n if self.batchMode:\n samples = self.dataset\n else:\n samples = [[self.dataset.getSample()]]\n\n for seq in samples:\n for lastState, lastAction, reward in seq:\n self._updatePropensities(int(lastState), int(lastAction), reward)\n\n #--------------------------------------------------------------------------\n # RothErev interface:\n #--------------------------------------------------------------------------\n\n def _updatePropensities(self, lastState, lastAction, reward):\n \"\"\" Update the propensities for all actions. The propensity for last\n action chosen will be updated using the feedback value that resulted\n from performing the action.\n\n If j is the index of the last action chosen, r_j is the reward received\n for performing j, i is the current action being updated, q_i is the\n propensity for i, and phi is the recency parameter, then this update\n function can be expressed as::\n\n q_i = (1-phi) * q_i + E(i, r_j)\n \"\"\"\n phi = self.recency\n\n for action in range(self.module.numActions):\n carryOver = (1 - phi) * self.module.getValue(lastState, action)\n experience = self._experience(lastState, action, lastAction,reward)\n\n self.module.updateValue(lastState, action, carryOver + experience)\n\n\n def _experience(self, lastState, action, previousAction, reward):\n \"\"\" This is the standard experience function for the Roth-Erev\n algorithm. Here propensities for all actions are updated and similarity\n does not come into play. That is, all action choices are assumed to be\n equally similar. If the actionIndex points to the action the reward is\n associated with (usually the last action taken) then simply adjust the\n weight by the experimentation. Otherwise, adjust the weight by a\n smaller portion of the reward.\n\n If j is the index of the last action chosen, r_j is the reward received\n for performing j, i is the current action being updated, n is the size\n of the action domain and e is the experimentation parameter, then this\n experience function can be expressed as::\n _\n | r_j * (1-e) if i = j\n E(i, r_j) = |\n |_ r_j * (e /(n-1)) if i != j\n \"\"\"\n e = self.experimentation\n\n if action == previousAction:\n experience = reward * (1 - e)\n else:\n experience = reward * (e / (self.module.numActions - 1))\n\n return experience\n\n#------------------------------------------------------------------------------\n# \"VariantRothErev\" class:\n#------------------------------------------------------------------------------\n\nclass VariantRothErev(RothErev):\n \"\"\" Variant Roth-Erev Learner\n\n This ReinforcementLearner implements a variation of the Roth-Erev\n algorithm as presented in:\n\n - James Nicolaisen, Valentin Petrov, and Leigh Tesfatsion, \"Market Power\n and Efficiency in a Computational Electricity Market with\n Discriminatory Double-Auction Pricing,\" IEEE Transactions on\n Evolutionary Computation, Volume 5, Number 5, 2001, 504-523.\n\n @see L{RothErev} for details on the original Roth-Erev algorithm.\n \"\"\"\n\n def _experience(self, previousState, action, previousAction, reward):\n \"\"\" This is an altered version of the experience function for used in\n the standard Roth-Erev algorithm. Like in RELearner, propensities for\n all actions are updated and similarity does not come into play. If the\n actionIndex points to the action the reward is associated with (usually\n the last action taken) then simply adjust the weight by the\n experimentation. Otherwise increase the weight of the action by a small\n portion of its current propensity.\n\n If j is the index of the last action chosen, r_j is the reward received\n for performing j, i is the current action being updated, q_i is the\n propensity for i, n is the size of the action domain and e is the\n experimentation parameter, then this experience function can be\n expressed as::\n\n | r_j * (1-e) if i = j\n E(i, r_j) = |\n |_ q_i * (e /(n-1)) if i != j\n \"\"\"\n e = self.experimentation\n\n if action == previousAction:\n experience = reward * (1 - e)\n else:\n propensity = self.module.getValue(previousState, action)\n experience = propensity * (e / (self.module.numActions - 1))\n\n return experience\n\n#------------------------------------------------------------------------------\n# \"PropensityTable\" class:\n#------------------------------------------------------------------------------\n\n#class PropensityTable(ActionValueTable):\n# \"\"\" Interface for building a stateless reinforcement learning policy. This\n# type of policy simply maintains a distribution guiding action choice\n# irrespective of the current state of the world. That is, it simply\n# maintains the propensity for selection of each action for all world states.\n# \"\"\"\n#\n# def __init__(self, numActions, name=None):\n# ActionValueTable.__init__(self, 1, numActions, name)\n\n#------------------------------------------------------------------------------\n# \"ProportionalExplorer\" class:\n#------------------------------------------------------------------------------\n\nclass ProportionalExplorer(DiscreteExplorer):\n \"\"\" A discrete explorer that executes the actions with a probability that\n is proportional to the action propensities.\n \"\"\"\n\n def _forwardImplementation(self, inbuf, outbuf):\n \"\"\" Proportional probability method.\n \"\"\"\n assert self.module\n\n propensities = self.module.getActionValues(0)\n\n summedProps = sum(propensities)\n probabilities = propensities / summedProps\n\n action = eventGenerator(probabilities)\n# action = drawIndex(probabilities)\n\n outbuf[:] = scipy.array([action])\n\n#------------------------------------------------------------------------------\n# \"eventGenerator\" function:\n#------------------------------------------------------------------------------\n\ndef eventGenerator(distrib):\n eventIndex = 0\n randValue = random.random()\n\n while (randValue > 0.0) and (eventIndex < len(distrib)):\n randValue -= distrib[eventIndex]\n eventIndex += 1\n\n return eventIndex - 1\n\n# EOF -------------------------------------------------------------------------\n" ]
[ [ "scipy.array" ] ]
dilawar/pypet
[ "2769c74eff55c165c9002cc67611b96b1d2377ea" ]
[ "pypet/tests/integration/environment_test.py" ]
[ "__author__ = 'Robert Meyer'\n\nimport os\nimport platform\nimport logging\nimport time\nimport numpy as np\n\nfrom pypet.trajectory import Trajectory, load_trajectory\nfrom pypet.utils.explore import cartesian_product\nfrom pypet.environment import Environment\nfrom pypet.storageservice import HDF5StorageService\nfrom pypet import pypetconstants, Result, manual_run\n\nimport pypet.pypetexceptions as pex\n\nimport sys\nimport unittest\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\ntry:\n import dill\nexcept ImportError:\n dill = None\n\nimport scipy.sparse as spsp\nimport random\nfrom pypet import Parameter\n\nimport tables as pt\n\nfrom pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\\n get_root_logger, parse_args, get_log_config, get_log_path\nfrom pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\\n simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\\n multiply_with_graceful_exit\n\n\ndef add_one_particular_item(traj, store_full):\n traj.hi = Result('hi', 42, 'hi!')\n traj.f_store()\n traj.f_remove_child('hi')\n\n\nclass SlowResult(Result):\n def _load(self, load_dict):\n time.sleep(3)\n super(SlowResult, self)._load(load_dict)\n\n\nclass FullStorageTest(TrajectoryComparator):\n\n tags = 'integration', 'hdf5', 'environment' # Test tags\n\n def test_full_store(self):\n filename = make_temp_dir('full_store.hdf5')\n with Environment(filename=filename,\n log_config=get_log_config()) as env:\n\n traj = env.v_trajectory\n\n traj.par.x = Parameter('x', 3, 'jj')\n\n traj.f_explore({'x': [1,2,3]})\n\n env.f_run(add_one_particular_item, True)\n\n traj = load_trajectory(index=-1, filename=filename)\n\n self.assertTrue('hi' in traj)\n\n\ndef with_niceness(traj):\n if traj.multiproc:\n if hasattr(os, 'nice'):\n trajnice = traj.niceness\n osnice = os.nice(0)\n else:\n trajnice = traj.niceness\n osnice = psutil.Process().nice()\n if trajnice != osnice:\n if traj.use_scoop:\n import scoop\n if (not scoop.IS_RUNNING or scoop.IS_ORIGIN):\n return\n raise RuntimeError('traj niceness != os niceness; '\n '%s != %s' % (str(trajnice), str(osnice)))\n\n\ndef add_large_data(traj):\n np_array = np.random.rand(100, 1000, 10)\n traj.f_add_result('l4rge', np_array)\n traj.f_store_item('l4rge')\n traj.f_remove_item('l4rge')\n\n array_list = []\n for irun in range(111):\n array_list.append(np.random.rand(10))\n traj.f_add_result('m4ny', *array_list)\n\n\nclass SimpleEnvironmentTest(TrajectoryComparator):\n\n tags = 'integration', 'hdf5', 'environment', 'quick'\n\n def test_make_default_file_when_giving_directory_without_slash(self):\n filename = make_temp_dir('test.hdf5')\n head, tail = os.path.split(filename)\n env = Environment(filename=head)\n the_file_name = env.v_traj.v_name + '.hdf5'\n head, tail = os.path.split(env.v_traj.v_storage_service.filename)\n self.assertEqual(tail, the_file_name)\n\n\nclass EnvironmentTest(TrajectoryComparator):\n\n tags = 'integration', 'hdf5', 'environment'\n\n def set_mode(self):\n self.mode = 'LOCK'\n self.multiproc = False\n self.gc_interval = None\n self.ncores = 1\n self.use_pool=True\n self.use_scoop=False\n self.freeze_input=False\n self.pandas_format='fixed'\n self.pandas_append=False\n self.complib = 'zlib'\n self.complevel=9\n self.shuffle=True\n self.fletcher32 = False\n self.encoding = 'utf8'\n self.log_stdout=False\n self.wildcard_functions = None\n self.niceness = None\n self.port = None\n self.timeout = None\n self.add_time=True\n self.graceful_exit = False\n\n def explore_complex_params(self, traj):\n matrices_csr = []\n for irun in range(3):\n\n spsparse_csr = spsp.lil_matrix((111,111))\n spsparse_csr[3,2+irun] = 44.5*irun\n\n matrices_csr.append(spsparse_csr.tocsr())\n\n matrices_csc = []\n for irun in range(3):\n\n spsparse_csc = spsp.lil_matrix((111,111))\n spsparse_csc[3,2+irun] = 44.5*irun\n\n matrices_csc.append(spsparse_csc.tocsc())\n\n matrices_bsr = []\n for irun in range(3):\n\n spsparse_bsr = spsp.lil_matrix((111,111))\n spsparse_bsr[3,2+irun] = 44.5*irun\n\n matrices_bsr.append(spsparse_bsr.tocsr().tobsr())\n\n matrices_dia = []\n for irun in range(3):\n\n spsparse_dia = spsp.lil_matrix((111,111))\n spsparse_dia[3,2+irun] = 44.5*irun\n\n matrices_dia.append(spsparse_dia.tocsc().todia())\n\n\n self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']),\n np.array(['Cinco', 'Seis', 'Siette']),\n np.array(['Ocho', 'Nueve', 'Diez'])],\n 'int':[1,2,3],\n 'csr_mat' : matrices_csr,\n 'csc_mat' : matrices_csc,\n 'bsr_mat' : matrices_bsr,\n 'dia_mat' : matrices_dia,\n 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]}\n\n with self.assertRaises(pex.NotUniqueNodeError):\n traj.f_explore(self.explore_dict)\n traj.f_shrink(force=True)\n\n par_dict = traj.parameters.f_to_dict()\n for param_name in par_dict:\n param = par_dict[param_name]\n if param.v_name in self.explore_dict:\n param.f_unlock()\n if param.v_explored:\n param._shrink()\n\n self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']),\n np.array(['Cinco', 'Seis', 'Siette']),\n np.array(['Ocho', 'Nueve', 'Diez'])],\n 'Normal.int':[1,2,3],\n 'csr_mat' : matrices_csr,\n 'csc_mat' : matrices_csc,\n 'bsr_mat' : matrices_bsr,\n 'dia_mat' : matrices_dia,\n 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]}\n\n traj.f_explore(self.explore_dict)\n\n def explore(self, traj):\n self.explored ={'Normal.trial': [0],\n 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],\n 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}\n\n self.explored['csr_mat'][0][1,2]=44.0\n self.explored['csr_mat'][1][2,2]=33\n\n self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr()\n self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr()\n\n traj.f_explore(cartesian_product(self.explored))\n\n def explore_large(self, traj):\n self.explored ={'Normal.trial': [0,1]}\n traj.f_explore(cartesian_product(self.explored))\n\n def tearDown(self):\n self.env.f_disable_logging()\n\n super(EnvironmentTest, self).tearDown()\n\n def setUp(self):\n self.set_mode()\n self.logfolder = make_temp_dir(os.path.join('experiments',\n 'tests',\n 'Log'))\n\n random.seed()\n self.trajname = make_trajectory_name(self)\n self.filename = make_temp_dir(os.path.join('experiments',\n 'tests',\n 'HDF5',\n 'test%s.hdf5' % self.trajname))\n\n env = Environment(trajectory=self.trajname, filename=self.filename,\n file_title=self.trajname,\n log_stdout=self.log_stdout,\n log_config=get_log_config(),\n results_per_run=5,\n wildcard_functions=self.wildcard_functions,\n derived_parameters_per_run=5,\n multiproc=self.multiproc,\n ncores=self.ncores,\n wrap_mode=self.mode,\n use_pool=self.use_pool,\n gc_interval=self.gc_interval,\n freeze_input=self.freeze_input,\n fletcher32=self.fletcher32,\n complevel=self.complevel,\n complib=self.complib,\n shuffle=self.shuffle,\n pandas_append=self.pandas_append,\n pandas_format=self.pandas_format,\n encoding=self.encoding,\n niceness=self.niceness,\n use_scoop=self.use_scoop,\n port=self.port,\n add_time=self.add_time,\n timeout=self.timeout,\n graceful_exit=self.graceful_exit)\n\n traj = env.v_trajectory\n\n traj.v_standard_parameter=Parameter\n\n ## Create some parameters\n self.param_dict={}\n create_param_dict(self.param_dict)\n ### Add some parameter:\n add_params(traj,self.param_dict)\n\n #remember the trajectory and the environment\n self.traj = traj\n self.env = env\n\n @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non Unix.')\n def test_niceness(self):\n ###Explore\n self.explore(self.traj)\n\n self.env.f_run(with_niceness)\n\n self.assertTrue(self.traj.f_is_completed())\n\n def test_file_overwriting(self):\n self.traj.f_store()\n\n with pt.open_file(self.filename, mode='r') as file:\n nchildren = len(file.root._v_children)\n self.assertTrue(nchildren > 0)\n\n env2 = Environment(filename=self.filename,\n log_config=get_log_config())\n traj2 = env2.v_trajectory\n traj2.f_store()\n\n self.assertTrue(os.path.exists(self.filename))\n\n with pt.open_file(self.filename, mode='r') as file:\n nchildren = len(file.root._v_children)\n self.assertTrue(nchildren > 1)\n\n env3 = Environment(filename=self.filename, overwrite_file=True,\n log_config=get_log_config())\n\n self.assertFalse(os.path.exists(self.filename))\n\n env2.f_disable_logging()\n env3.f_disable_logging()\n\n def test_time_display_of_loading(self):\n filename = make_temp_dir('sloooow.hdf5')\n env = Environment(trajectory='traj', add_time=True, filename=filename,\n log_stdout=False,\n log_config=get_log_config(),\n dynamic_imports=SlowResult,\n display_time=0.1)\n traj = env.v_traj\n res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk')\n traj.f_store()\n service_logger = traj.v_storage_service._logger\n root = logging.getLogger('pypet')\n old_level = root.level\n service_logger.setLevel(logging.INFO)\n root.setLevel(logging.INFO)\n\n traj.f_load(load_data=3)\n service_logger.setLevel(old_level)\n root.setLevel(old_level)\n\n path = get_log_path(traj)\n mainfilename = os.path.join(path, 'LOG.txt')\n with open(mainfilename, mode='r') as mainf:\n full_text = mainf.read()\n self.assertTrue('nodes/s)' in full_text)\n\n env.f_disable_logging()\n\n def make_run_large_data(self):\n self.env.f_run(add_large_data)\n\n def make_run(self):\n\n ### Make a test run\n simple_arg = -13\n simple_kwarg= 13.0\n results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg)\n self.are_results_in_order(results)\n\n def test_a_large_run(self):\n get_root_logger().info('Testing large run')\n self.traj.f_add_parameter('TEST', 'test_run')\n ###Explore\n self.explore_large(self.traj)\n self.make_run_large_data()\n\n self.assertTrue(self.traj.f_is_completed())\n\n # Check if printing and repr work\n get_root_logger().info(str(self.env))\n get_root_logger().info(repr(self.env))\n\n newtraj = Trajectory()\n newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename)\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj,newtraj)\n\n size=os.path.getsize(self.filename)\n size_in_mb = size/1000000.\n get_root_logger().info('Size is %sMB' % str(size_in_mb))\n self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb))\n\n def test_two_runs(self):\n self.traj.f_add_parameter('TEST', 'test_run')\n self.traj.hdf5.purge_duplicate_comments = False\n ###Explore\n self.explore(self.traj)\n\n self.make_run()\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n size=os.path.getsize(self.filename)\n size_in_mb = size/1000000.\n get_root_logger().info('Size is %sMB' % str(size_in_mb))\n self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))\n\n mp_traj = self.traj\n\n old_multiproc = self.multiproc\n self.multiproc = False\n\n ### Make a new single core run\n self.setUp()\n\n self.traj.f_add_parameter('TEST', 'test_run')\n self.traj.hdf5.purge_duplicate_comments = False\n ###Explore\n self.explore(self.traj)\n\n self.make_run()\n\n # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n size=os.path.getsize(self.filename)\n size_in_mb = size/1000000.\n get_root_logger().info('Size is %sMB' % str(size_in_mb))\n self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))\n\n self.compare_trajectories(mp_traj, self.traj)\n self.multiproc = old_multiproc\n\n def test_errors(self):\n tmp = make_temp_dir('cont')\n if dill is not None:\n env1 = Environment(continuable=True, continue_folder=tmp,\n log_config=None, filename=self.filename)\n with self.assertRaises(ValueError):\n env1.f_run_map(multiply_args, [1], [2], [3])\n with self.assertRaises(ValueError):\n Environment(multiproc=True, use_pool=False, freeze_input=True,\n filename=self.filename, log_config=None)\n env3 = Environment(log_config=None, filename=self.filename)\n with self.assertRaises(ValueError):\n env3.f_run_map(multiply_args)\n with self.assertRaises(ValueError):\n Environment(use_scoop=True, immediate_postproc=True)\n with self.assertRaises(ValueError):\n Environment(use_pool=True, immediate_postproc=True)\n with self.assertRaises(ValueError):\n Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp)\n with self.assertRaises(ValueError):\n Environment(use_scoop=True, wrap_mode='QUEUE')\n with self.assertRaises(ValueError):\n Environment(automatic_storing=False,\n continuable=True, continue_folder=tmp)\n with self.assertRaises(ValueError):\n Environment(port='www.nosi.de', wrap_mode='LOCK')\n\n def test_run(self):\n self.traj.f_add_parameter('TEST', 'test_run')\n ###Explore\n self.explore(self.traj)\n\n self.make_run()\n\n self.assertTrue(self.traj.f_is_completed())\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n size=os.path.getsize(self.filename)\n size_in_mb = size/1000000.\n get_root_logger().info('Size is %sMB' % str(size_in_mb))\n self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))\n\n def test_just_one_run(self):\n self.make_run()\n self.assertTrue(self.traj.f_is_completed())\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n self.assertTrue(len(newtraj) == 1)\n\n size=os.path.getsize(self.filename)\n size_in_mb = size/1000000.\n get_root_logger().info('Size is %sMB' % str(size_in_mb))\n self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb))\n\n with self.assertRaises(TypeError):\n self.explore(self.traj)\n\n def test_run_complex(self):\n self.traj.f_add_parameter('TEST', 'test_run_complex')\n ###Explore\n self.explore_complex_params(self.traj)\n\n self.make_run()\n\n self.assertTrue(self.traj.f_is_completed())\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False):\n ### Load The Trajectory and check if the values are still the same\n newtraj = Trajectory()\n newtraj.v_storage_service=HDF5StorageService(filename=self.filename)\n newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new,\n load_parameters=2, load_derived_parameters=2, load_results=2,\n load_other_data=2)\n return newtraj\n\n\n def test_expand(self):\n\n ###Explore\n self.traj.f_add_parameter('TEST', 'test_expand')\n self.explore(self.traj)\n\n self.make_run()\n\n self.expand()\n\n get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$')\n self.make_run()\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n def test_expand_after_reload(self):\n\n self.traj.f_add_parameter('TEST', 'test_expand_after_reload')\n ###Explore\n self.explore(self.traj)\n\n self.make_run()\n\n traj_name = self.traj.v_name\n\n\n self.env = Environment(trajectory=self.traj,\n log_stdout=False,\n log_config=get_log_config())\n\n self.traj = self.env.v_trajectory\n\n self.traj.f_load(name=traj_name)\n self.traj.res.f_remove()\n self.traj.dpar.f_remove()\n\n self.expand()\n\n get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n')\n self.make_run()\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj, newtraj)\n\n\n def expand(self):\n self.expanded ={'Normal.trial': [1],\n 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],\n 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}\n\n self.expanded['csr_mat'][0][1,2]=44.0\n self.expanded['csr_mat'][1][2,2]=33\n self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr()\n self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr()\n\n self.traj.f_expand(cartesian_product(self.expanded))\n self.traj.f_store()\n\n\n ################## Overview TESTS #############################\n\n def test_switch_ON_large_tables(self):\n self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables')\n ###Explore\n self.explore(self.traj)\n\n self.env._traj.config.hdf5.overview.results_overview = 1\n self.env._traj.config.hdf5.overview.derived_parameters_overview = 1\n self.make_run()\n\n hdf5file = pt.open_file(self.filename)\n overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')\n should = ['derived_parameters_overview', 'results_overview']\n for name in should:\n self.assertTrue(name in overview_group, '%s not in overviews but it should!' % name)\n hdf5file.close()\n\n self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2)\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name)\n\n self.compare_trajectories(newtraj,self.traj)\n\n def test_switch_off_all_tables(self):\n ###Explore\n self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables')\n self.explore(self.traj)\n\n self.env._traj.config.hdf5.overview.results_overview = 0\n self.env._traj.config.hdf5.overview.derived_parameters_overview = 0\n self.env._traj.config.hdf5.overview.derived_parameters_summary = 0\n self.env._traj.config.hdf5.overview.results_summary = 0\n self.env._traj.config.hdf5.purge_duplicate_comments = 0\n self.env._traj.config.hdf5.overview.parameters_overview = 0\n self.env._traj.config.hdf5.overview.config_overview = 0\n self.env._traj.config.hdf5.overview.explored_parameters_overview = 0\n self.make_run()\n\n hdf5file = pt.open_file(self.filename)\n overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')\n should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys()\n for name in should_not:\n name = name.split('.')[-1] # Get only the name of the table, no the full name\n self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name)\n\n hdf5file.close()\n\n\n def test_store_form_tuple(self):\n self.traj.f_store()\n\n self.traj.f_add_result('TestResItem', 42, 43)\n\n with self.assertRaises(ValueError):\n self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5))\n\n self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem))\n\n self.traj.results.f_remove_child('TestResItem')\n\n self.assertTrue('TestResItem' not in self.traj)\n\n self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON)\n\n self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'}))\n\n self.assertTrue(self.traj.TestResItem, 42)\n\n def test_store_single_group(self):\n self.traj.f_store()\n\n self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42)\n\n self.traj.f_store_item('new.group')\n\n\n # group is below test not new, so ValueError thrown:\n with self.assertRaises(ValueError):\n self.traj.parameters.new.f_remove_child('group')\n\n # group is below test not new, so ValueError thrown:\n with self.assertRaises(ValueError):\n self.traj.parameters.new.f_store_child('group')\n\n # group has children and recursive is false\n with self.assertRaises(TypeError):\n self.traj.parameters.new.f_remove_child('test')\n\n\n self.traj.new.f_remove_child('test', recursive=True)\n\n self.assertTrue('new.group' not in self.traj)\n\n self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON)\n\n self.assertTrue(self.traj.new.group.v_annotations.annotation, 42)\n\n self.traj.f_delete_item('new.test.group')\n\n with self.assertRaises(pex.DataNotInStorageError):\n self.traj.parameters.f_load_child('new.test.group',\n load_data=pypetconstants.LOAD_SKELETON)\n\n def test_switch_on_all_comments(self):\n self.explore(self.traj)\n self.traj.hdf5.purge_duplicate_comments=0\n\n self.make_run()\n\n hdf5file = pt.open_file(self.filename)\n traj_group = hdf5file.get_node(where='/', name= self.traj.v_name)\n\n for node in traj_group._f_walk_groups():\n if 'SRVC_LEAF' in node._v_attrs:\n self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs,\n 'There is no comment in node %s!' % node._v_name)\n\n hdf5file.close()\n\n\n def test_purge_duplicate_comments(self):\n self.explore(self.traj)\n\n with self.assertRaises(RuntimeError):\n self.traj.hdf5.purge_duplicate_comments = 1\n self.traj.overview.results_summary = 0\n self.make_run()\n\n self.traj.f_get('purge_duplicate_comments').f_unlock()\n self.traj.hdf5.purge_duplicate_comments=1\n self.traj.f_get('results_summary').f_unlock()\n self.traj.overview.results_summary=1\n self.make_run()\n\n\n hdf5file = pt.open_file(self.filename, mode='a')\n\n ncomments = {}\n\n try:\n traj_group = hdf5file.get_node(where='/',name= self.traj.v_name)\n\n\n for node in traj_group._f_walk_groups():\n if ('/derived_parameters/' in node._v_pathname or\n '/results/' in node._v_pathname):\n if 'SRVC_LEAF' in node._v_attrs:\n if 'SRVC_INIT_COMMENT' in node._v_attrs:\n comment = node._v_attrs['SRVC_INIT_COMMENT']\n if comment not in ncomments:\n ncomments[comment] = 0\n ncomments[comment] += 1\n finally:\n hdf5file.close()\n\n self.assertGreaterEqual(len(ncomments), 1)\n self.assertTrue(all(x == 1 for x in ncomments.values()))\n\n def test_NOT_purge_duplicate_comments(self):\n self.explore(self.traj)\n self.traj.f_get('purge_duplicate_comments').f_unlock()\n self.traj.hdf5.purge_duplicate_comments=0\n self.traj.f_get('results_summary').f_unlock()\n self.traj.overview.results_summary=0\n self.make_run()\n\n hdf5file = pt.open_file(self.filename, mode='a')\n\n ncomments = {}\n\n try:\n traj_group = hdf5file.get_node(where='/',name= self.traj.v_name)\n\n\n for node in traj_group._f_walk_groups():\n if ('/derived_parameters/' in node._v_pathname or\n '/results/' in node._v_pathname):\n if 'SRVC_LEAF' in node._v_attrs:\n if 'SRVC_INIT_COMMENT' in node._v_attrs:\n comment = node._v_attrs['SRVC_INIT_COMMENT']\n if comment not in ncomments:\n ncomments[comment] = 0\n ncomments[comment] += 1\n finally:\n hdf5file.close()\n\n self.assertGreaterEqual(len(ncomments), 1)\n self.assertTrue(any(x > 1 for x in ncomments.values()))\n\n\ndef my_run_func(idx):\n return 'hello_%d' % idx\n\ndef my_set_func(idx):\n return 'huhu_%d' % idx\n\nclass TestOtherHDF5Settings(EnvironmentTest):\n\n tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'\n\n def set_mode(self):\n EnvironmentTest.set_mode(self)\n self.mode = 'LOCK'\n self.multiproc = False\n self.ncores = 1\n self.use_pool=True\n self.pandas_format='table'\n self.pandas_append=True\n self.complib = 'blosc'\n self.complevel=2\n self.shuffle=False\n self.fletcher32 = False\n self.encoding='latin1'\n self.graceful_exit = True\n\n\n\nclass TestOtherHDF5Settings2(EnvironmentTest):\n\n tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'\n\n def set_mode(self):\n\n EnvironmentTest.set_mode(self)\n self.mode = 'LOCK'\n self.multiproc = False\n self.ncores = 1\n self.use_pool=True\n self.pandas_format='table'\n self.pandas_append=False\n self.complib = 'lzo'\n self.complevel=2\n self.shuffle=False\n self.fletcher32 = True\n self.encoding='latin1'\n self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func}\n\n\nclass ResultSortTest(TrajectoryComparator):\n\n tags = 'integration', 'hdf5', 'environment'\n\n def set_mode(self):\n self.mode = 'LOCK'\n self.multiproc = False\n self.ncores = 1\n self.use_pool=True\n self.log_stdout=False\n self.freeze_input=False\n self.use_scoop = False\n self.log_config = True\n self.port = None\n self.graceful_exit = True\n\n def tearDown(self):\n self.env.f_disable_logging()\n super(ResultSortTest, self).tearDown()\n\n def setUp(self):\n self.set_mode()\n\n self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5'))\n\n self.trajname = make_trajectory_name(self)\n\n env = Environment(trajectory=self.trajname,filename=self.filename,\n file_title=self.trajname,\n log_stdout=self.log_stdout,\n log_config=get_log_config() if self.log_config else None,\n multiproc=self.multiproc,\n wrap_mode=self.mode,\n ncores=self.ncores,\n use_pool=self.use_pool,\n use_scoop=self.use_scoop,\n port=self.port,\n freeze_input=self.freeze_input,\n graceful_exit=self.graceful_exit)\n\n traj = env.v_trajectory\n\n\n traj.v_standard_parameter=Parameter\n\n traj.f_add_parameter('x',99)\n traj.f_add_parameter('y',99)\n\n self.env=env\n self.traj=traj\n\n def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2):\n ### Load The Trajectory and check if the values are still the same\n newtraj = Trajectory()\n newtraj.v_storage_service=HDF5StorageService(filename=self.filename)\n newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new,\n load_derived_parameters=how, load_results=how)\n return newtraj\n\n\n def explore(self,traj):\n self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]}\n traj.f_explore(self.explore_dict)\n\n def explore_cartesian(self,traj):\n self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]})\n traj.f_explore(self.explore_dict)\n\n def expand(self,traj):\n self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]}\n with self.assertRaises(ValueError):\n traj.f_expand(self.expand_dict)\n\n self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]}\n traj.f_expand(self.expand_dict)\n\n def test_if_results_are_sorted_correctly_manual_runs(self):\n ###Explore\n self.explore(self.traj)\n self.traj.f_store(only_init=True)\n man_multiply = manual_run()(multiply_with_storing)\n for idx in self.traj.f_iter_runs(yields='idx'):\n self.assertTrue(isinstance(idx, int))\n man_multiply(self.traj)\n traj = self.traj\n traj.f_store()\n self.assertTrue(len(traj), 5)\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj,newtraj)\n\n def test_if_results_are_sorted_correctly_using_map(self):\n ###Explore\n self.explore(self.traj)\n\n args1=[10*x for x in range(len(self.traj))]\n args2=[100*x for x in range(len(self.traj))]\n args3=list(range(len(self.traj)))\n\n results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3)\n self.assertEqual(len(results), len(self.traj))\n\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct_map(traj, args1, args2, args3)\n\n for res in results:\n self.assertEqual(len(res), 2)\n self.assertTrue(isinstance(res[0], int))\n self.assertTrue(isinstance(res[1], int))\n idx = res[0]\n self.assertEqual(self.traj.res.runs[idx].z, res[1])\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.assertEqual(len(traj), 5)\n self.compare_trajectories(self.traj,newtraj)\n\n def test_if_results_are_sorted_correctly(self):\n\n ###Explore\n self.explore(self.traj)\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n self.assertEqual(len(results), len(self.traj))\n\n\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n for res in results:\n self.assertEqual(len(res), 2)\n self.assertTrue(isinstance(res[0], int))\n self.assertTrue(isinstance(res[1], int))\n idx = res[0]\n self.assertEqual(self.traj.res.runs[idx].z, res[1])\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj,newtraj)\n\n def test_graceful_exit(self):\n\n ###Explore\n self.explore_cartesian(self.traj)\n\n results = self.env.f_run(multiply_with_graceful_exit)\n self.are_results_in_order(results)\n self.assertFalse(self.traj.f_is_completed())\n\n def test_f_iter_runs(self):\n\n ###Explore\n self.explore(self.traj)\n\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n for idx, run_name in enumerate(self.traj.f_iter_runs()):\n newtraj.v_crun=run_name\n self.traj.v_idx = idx\n newtraj.v_idx = idx\n nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))\n self.assertTrue('run_%08d' % (idx+1) not in nameset)\n self.assertTrue('run_%08d' % idx in nameset)\n self.assertTrue(traj.v_crun == run_name)\n self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %\n (str(newtraj.crun.z),str(traj.x),str(traj.y)))\n\n for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')):\n run_name = traj.f_idx_to_run(idx)\n self.assertTrue(traj is self.traj)\n newtraj.v_crun=run_name\n self.traj.v_idx = idx\n newtraj.v_idx = idx\n nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))\n self.assertTrue('run_%08d' % (idx+1) not in nameset)\n self.assertTrue('run_%08d' % idx in nameset)\n self.assertTrue(traj.v_crun == run_name)\n self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %\n (str(newtraj.crun.z),str(traj.x),str(traj.y)))\n\n for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')):\n run_name = traj.f_idx_to_run(idx)\n self.assertTrue(traj is not self.traj)\n newtraj.v_crun=run_name\n self.traj.v_idx = idx\n newtraj.v_idx = idx\n nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))\n self.assertTrue('run_%08d' % (idx+1) not in nameset)\n self.assertTrue('run_%08d' % idx in nameset)\n self.assertTrue(traj.v_crun == run_name)\n self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %\n (str(newtraj.crun.z),str(traj.x),str(traj.y)))\n\n traj = self.traj\n self.assertTrue(traj.v_idx == -1)\n self.assertTrue(traj.v_crun is None)\n self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)\n self.assertTrue(newtraj.v_idx == idx)\n\n\n def test_f_iter_runs_auto_load(self):\n\n ###Explore\n self.explore(self.traj)\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n newtraj = Trajectory()\n newtraj.v_storage_service=HDF5StorageService(filename=self.filename)\n newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0)\n newtraj.v_auto_load = True\n\n newtraj.par.f_load_child('y', load_data=1)\n\n for idx, run_name in enumerate(self.traj.f_iter_runs()):\n newtraj.v_crun=run_name\n self.traj.v_idx = idx\n newtraj.v_idx = idx\n nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))\n self.assertTrue('run_%08d' % (idx+1) not in nameset)\n self.assertTrue('run_%08d' % idx in nameset)\n self.assertTrue(traj.v_crun == run_name)\n self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' %\n (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y)))\n\n\n traj = self.traj\n self.assertTrue(traj.v_idx == -1)\n self.assertTrue(traj.v_crun is None)\n self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)\n self.assertTrue(newtraj.v_idx == idx)\n\n\n\n def test_expand(self):\n ###Explore\n self.explore(self.traj)\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n\n get_root_logger().info(results)\n traj = self.traj\n self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0])))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n traj_name = self.env.v_trajectory.v_name\n del self.env\n self.env = Environment(trajectory=self.traj,\n log_stdout=False,\n log_config=get_log_config())\n\n self.traj = self.env.v_trajectory\n\n self.traj.f_load(name=traj_name)\n\n self.expand(self.traj)\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\n len(list(self.explore_dict.values())[0]))\n\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj,newtraj)\n\n def test_expand_after_reload(self):\n ###Explore\n self.explore(self.traj)\n\n results = self.env.f_run(multiply)\n self.are_results_in_order(results)\n\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n self.expand(self.traj)\n\n self.env.f_run(multiply)\n traj = self.traj\n self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\\n len(list(self.explore_dict.values())[0]))\n\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n self.check_if_z_is_correct(traj)\n\n newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)\n self.traj.f_load_skeleton()\n self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)\n\n self.compare_trajectories(self.traj,newtraj)\n\n def check_if_z_is_correct_map(self,traj, args1, args2, args3):\n for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3):\n traj.v_idx=x\n self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' %\n (str(traj.crun.z),str(traj.x),str(traj.y)))\n traj.v_idx=-1\n\n def check_if_z_is_correct(self,traj):\n traj.v_shortcuts=False\n for x in range(len(traj)):\n traj.v_idx=x\n z = traj.res.runs.crun.z\n x = traj.par.x\n y = traj.par.y\n self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' %\n (str(z),str(x),str(y)))\n traj.v_idx=-1\n traj.v_shortcuts=True\n\n\n# def test_runfunc(traj, list_that_changes):\n# traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx)\n# list_that_changes[traj.v_idx] = 1000\n\n# class DeepCopyTest(TrajectoryComparator):\n#\n# def test_deep_copy_data(self):\n#\n# self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5')\n# self.logfolder = make_temp_dir('experiments/tests/Log')\n# self.trajname = make_trajectory_name(self)\n#\n# env = Environment(trajectory=self.trajname,filename=self.filename,\n# file_title=self.trajname, log_folder=self.logfolder,\n# log_stdout=False,\n# multiproc=False,\n# deep_copy_data=True)\n#\n# traj = env.v_trajectory\n#\n# traj.f_add_parameter('dummy', 1)\n# traj.f_explore({'dummy':[12, 3, 3, 4]})\n#\n# list_that_should_not_change = [42, 42, 42, 42]\n#\n# env.f_run(test_runfunc, list_that_should_not_change)\n#\n# traj.v_auto_load=True\n#\n# for irun, val in enumerate(list_that_should_not_change):\n# self.assertTrue(list_that_should_not_change[irun] == 42)\n# x=traj.results.runs[irun].kkk\n# self.assertTrue(x==42+irun)\n#\n# def test_not_deep_copy_data(self):\n# self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5')\n# self.logfolder = make_temp_dir('experiments/tests/Log')\n# self.trajname = make_trajectory_name(self)\n#\n# env = Environment(trajectory=self.trajname,filename=self.filename,\n# file_title=self.trajname, log_folder=self.logfolder,\n# log_stdout=False,\n# multiproc=False,\n# deep_copy_data=False)\n#\n# traj = env.v_trajectory\n#\n# traj.f_add_parameter('dummy', 1)\n# traj.f_explore({'dummy':[12, 3, 3, 4]})\n#\n# list_that_should_change = [42, 42, 42, 42]\n#\n# env.f_run(test_runfunc, list_that_should_change)\n#\n# traj.v_auto_load=True\n#\n# for irun, val in enumerate(list_that_should_change):\n# self.assertTrue(list_that_should_change[irun] == 1000)\n\nif __name__ == '__main__':\n opt_args = parse_args()\n run_suite(**opt_args)\n" ]
[ [ "numpy.array", "numpy.random.rand", "scipy.sparse.lil_matrix" ] ]
jitdee-ai/darts-models
[ "d200cdf5591959d212fa4535804a5a502ad2a19d" ]
[ "darmo/layers/tresnetv1/general_layers.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .avg_pool import FastAvgPool2d\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass DepthToSpace(nn.Module):\n\n def __init__(self, block_size):\n super().__init__()\n self.bs = block_size\n\n def forward(self, x):\n N, C, H, W = x.size()\n x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)\n x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)\n return x\n\n\nclass SpaceToDepthModule(nn.Module):\n def __init__(self, remove_model_jit=False):\n super().__init__()\n if not remove_model_jit:\n self.op = SpaceToDepthJit()\n else:\n self.op = SpaceToDepth()\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SpaceToDepth(nn.Module):\n def __init__(self, block_size=4):\n super().__init__()\n assert block_size == 4\n self.bs = block_size\n\n def forward(self, x):\n N, C, H, W = x.size()\n x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)\n x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)\n return x\n\n\n@torch.jit.script\nclass SpaceToDepthJit(object):\n def __call__(self, x: torch.Tensor):\n # assuming hard-coded that block_size==4 for acceleration\n N, C, H, W = x.size()\n x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)\n x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)\n return x\n\n\nclass hard_sigmoid(nn.Module):\n def __init__(self, inplace=True):\n super(hard_sigmoid, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n if self.inplace:\n return x.add_(3.).clamp_(0., 6.).div_(6.)\n else:\n return F.relu6(x + 3.) / 6.\n\n\nclass SEModule(nn.Module):\n\n def __init__(self, channels, reduction_channels, inplace=True):\n super(SEModule, self).__init__()\n self.avg_pool = FastAvgPool2d()\n self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, padding=0, bias=True)\n self.relu = nn.ReLU(inplace=inplace)\n self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, padding=0, bias=True)\n # self.activation = hard_sigmoid(inplace=inplace)\n self.activation = nn.Sigmoid()\n\n def forward(self, x):\n x_se = self.avg_pool(x)\n x_se2 = self.fc1(x_se)\n x_se2 = self.relu(x_se2)\n x_se = self.fc2(x_se2)\n x_se = self.activation(x_se)\n return x * x_se\n" ]
[ [ "torch.nn.functional.relu6", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Sigmoid" ] ]
manipopopo/TC-ResNet
[ "7dff6f4f865f1e63ff705d8e0267cf3b9a0d70a3" ]
[ "audio_nets/res.py" ]
[ "import tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef conv_relu_bn(inputs, num_outputs, kernel_size, stride, idx, use_dilation, bn=False):\n scope = f\"conv{idx}\"\n with tf.variable_scope(scope, values=[inputs]):\n if use_dilation:\n assert stride == 1\n rate = int(2**(idx // 3))\n net = slim.conv2d(inputs,\n num_outputs=num_outputs,\n kernel_size=kernel_size,\n stride=stride,\n rate=rate)\n else:\n net = slim.conv2d(inputs,\n num_outputs=num_outputs,\n kernel_size=kernel_size,\n stride=stride)\n # conv + relu are done\n if bn:\n net = slim.batch_norm(net, scope=f\"{scope}_bn\")\n\n return net\n\n\ndef resnet(inputs, num_classes, num_layers, num_channels, pool_size, use_dilation, scope=\"Res\"):\n \"\"\"Re-implement https://github.com/castorini/honk/blob/master/utils/model.py\"\"\"\n endpoints = dict()\n\n with tf.variable_scope(scope):\n net = slim.conv2d(inputs, num_channels, kernel_size=3, stride=1, scope=\"f_conv\")\n\n if pool_size:\n net = slim.avg_pool2d(net, kernel_size=pool_size, stride=1, scope=\"avg_pool0\")\n\n # block\n num_blocks = num_layers // 2\n idx = 0\n for i in range(num_blocks):\n layer_in = net\n\n net = conv_relu_bn(net, num_outputs=num_channels, kernel_size=3, stride=1, idx=idx,\n use_dilation=use_dilation, bn=True)\n idx += 1\n\n net = conv_relu_bn(net, num_outputs=num_channels, kernel_size=3, stride=1, idx=(2 * i + 1),\n use_dilation=use_dilation, bn=False)\n idx += 1\n\n net += layer_in\n net = slim.batch_norm(net, scope=f\"conv{2 * i + 1}_bn\")\n\n if num_layers % 2 != 0:\n net = conv_relu_bn(net, num_outputs=num_channels, kernel_size=3, stride=1, idx=idx,\n use_dilation=use_dilation, bn=True)\n\n # last\n net = slim.avg_pool2d(net, kernel_size=net.shape[1:3], stride=1, scope=\"avg_pool1\")\n\n logits = slim.conv2d(net, num_classes, 1, activation_fn=None, scope=\"fc\")\n logits = tf.reshape(logits, shape=(-1, logits.shape[3]), name=\"squeeze_logit\")\n\n return logits, endpoints\n\n\ndef Res8(inputs, num_classes):\n return resnet(inputs,\n num_classes,\n num_layers=6,\n num_channels=45,\n pool_size=[4, 3],\n use_dilation=False)\n\n\ndef Res8Narrow(inputs, num_classes):\n return resnet(inputs,\n num_classes,\n num_layers=6,\n num_channels=19,\n pool_size=[4, 3],\n use_dilation=False)\n\n\ndef Res15(inputs, num_classes):\n return resnet(inputs,\n num_classes,\n num_layers=13,\n num_channels=45,\n pool_size=None,\n use_dilation=True)\n\n\ndef Res15Narrow(inputs, num_classes):\n return resnet(inputs,\n num_classes,\n num_layers=13,\n num_channels=19,\n pool_size=None,\n use_dilation=True)\n\n\ndef Res_arg_scope(is_training, weight_decay=0.00001):\n batch_norm_params = {\n \"is_training\": is_training,\n \"center\": False,\n \"scale\": False,\n \"decay\": 0.997,\n \"fused\": True,\n }\n\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_initializer=slim.initializers.xavier_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=tf.nn.relu,\n biases_initializer=None,\n normalizer_fn=None,\n padding=\"SAME\",\n ):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params) as scope:\n return scope\n" ]
[ [ "tensorflow.variable_scope", "tensorflow.reshape" ] ]
coursekevin/AerospikeDesign
[ "2385e53fa6af51fb09b8f1280cbb052e7a5c7aea" ]
[ "angelinoNozzle_py/angelino_nozzle_design.py" ]
[ "import numpy as np \nimport gasdynamics as gd \nimport matplotlib.pyplot as plt\nfrom scipy import optimize\nfrom scipy import interpolate\nfrom matplotlib import cm \nimport os\nfrom MOC import chr_mesh\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nclass plug_nozzle:\n def __init__(self,design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,n,truncate_ratio = 1):\n # input design parameters \n\n (p_atm,T_atm,rho_atm) = gd.standard_atmosphere([design_alt])\n\n PR = p_c/p_atm\n\n M_e = gd.PR_expansion_mach(PR,gamma)\n\n expansion_ratio = gd.expansion_ratio(1,M_e,gamma)\n print('Expansion ratio: ' + str(expansion_ratio))\n\n self.expansion_ratio = expansion_ratio\n self.A_t = r_e**2*np.pi/expansion_ratio\n self.r_e = r_e\n self.gamma = gamma\n self.n = n\n self.truncate_ratio = truncate_ratio\n\n self.T_c = T_c\n self.p_c = p_c\n self.a_c = a_c\n self.rho_c = rho_c\n\n # calculated design parameters\n self.A_e = self.A_t*self.expansion_ratio\n self.r_b = np.sqrt(-self.A_e/np.pi + self.r_e**2)\n self.M_e = optimize.fsolve(lambda M: gd.expansion_ratio_zero(1,M,self.gamma,self.expansion_ratio),5)\n\n # DESIGN OF NOZZLE, FUNCTION ORDER IS IMPORTANT\n # NON-OPTIONAL FUNCTION RUNS\n self.design_nozzle()\n\n self.truncate_nozzle()\n\n self.calc_flow_properties()\n\n self.arc_length_coord()\n\n # OPTIONAL FUNCTION CONSTANTS\n self.converge_section = 0 # whether the converging section has been designed\n\n\n ## NON-OPTIONAL FUNCTIONS\n def design_nozzle(self): \n # discrete contour design variables\n self.M = np.linspace(1,self.M_e,self.n) \n self.A = self.A_t*gd.expansion_ratio(1,self.M,self.gamma)\n self.alpha = gd.prandtl_meyer(self.M_e,self.gamma) - gd.prandtl_meyer(self.M,self.gamma) + gd.mach_angle(self.M)\n self.l = (self.r_e - np.sqrt(np.abs(self.r_e**2 - (self.A*self.M*np.sin(self.alpha)/np.pi))))/np.sin(self.alpha)\n\n self.x = self.l*np.cos(self.alpha)\n self.y = self.l*np.sin(self.alpha)\n\n self.centre_spike()\n \n self.length = self.x.max()\n\n def centre_spike(self):\n self.lip_x = -self.x.min()\n self.lip_y = self.r_e \n\n self.x = self.x - self.x.min()\n self.y = self.r_e - self.y\n\n\n def truncate_nozzle(self):\n # based on: Marcello Onofri, \"Plug Nozzles: Summary of Flow Features and Engine Performance\", University of Rome, 01 Jan 2006, American Institue of Aeronautics and Astronautics\n\n # Truncating to about 20% of the original length will produce and efficiency of of 0.82-0.97 for a pressure ratio of 8.9-200 (as opposed to 0.98-0.99 for full length nozzle)\n idx = self.x <= self.x.max()*self.truncate_ratio#x.max()#*0.2\n self.M = self.M[idx]; self.A = self.A[idx]; self.alpha = self.alpha[idx]; self.l = self.l[idx]; self.x= self.x[idx]; self.y = self.y[idx]; \n\n def calc_flow_properties(self):\n T_ratio,p_ratio,rho_ratio,a_ratio = gd.isentropic_ratios(0,self.M,self.gamma)\n self.T = self.T_c*T_ratio\n self.p = self.p_c*p_ratio \n self.a = self.a_c*a_ratio\n self.V = self.a*self.M \n self.rho = self.rho_c*rho_ratio\n\n def arc_length_coord(self):\n y_dummy = self.y[1:] - self.y[:-1]\n x_dummy = self.x[1:] - self.x[:-1]\n s_dummy = np.sqrt(y_dummy**2+x_dummy**2)\n\n s_dummy = np.concatenate((np.array([0]),s_dummy))\n\n self.s = np.zeros(s_dummy.shape)\n\n for i in range(1,s_dummy.shape[0]):\n self.s[i] = s_dummy[i] + self.s[i-1]\n\n ## OPTIONAL-FUNCTIONS\n def update_contour(self,x,y,M,x_centre_spike=0):\n # Updates the spike contour with new (x,y) points with known values of M at each point\n self.x = x; self.y = y; self.M = M\n\n # optionally centre spike about x-axis\n if(x_centre_spike):\n self.centre_spike()\n\n # update flow properties based on isentropic expansion\n self.calc_flow_properties()\n\n # update arc length coordinates\n self.arc_length_coord()\n\n # update exit mach number\n self.M_e = M[-1]\n\n # update expansion ratio\n self.expansion_ratio = gd.expansion_ratio(1,self.M_e)\n\n self.A_t = self.r_e**2*np.pi/self.expansion_ratio\n print(\"Warning, throat area update not complete, assumes perfect isentropic expansion from throat to exit\")\n \n # update area estimation \n self.A = self.A_t*gd.expansion_ratio(1,self.M,self.gamma)\n # update base radius\n self.r_b = self.y[-1]\n\n # update exit area\n self.A_e = np.pi*(self.r_e**2-self.r_b**2)\n\n if(self.converge_section):\n print(\"Warning, congerence section not updated. Run self.converge_section(args) again to define a new convergence section.\")\n\n\n def calc_ideal_thrust(self,p_atm):\n # calculates ideal thrust\n # F = m_dot*V_e + (P_e - P_o)A_e\n p_e = self.p[-1]\n\n #print(p_e - p_atm)\n thrust = self.rho[0]*self.V[0]*self.A_t*self.V[-1] + (p_e-p_atm)*self.A_e\n return thrust \n\n def define_compression(self,r1,r2,slope,conv_length,n):\n self.converge_section = 1\n tck = interpolate.splrep(self.x,self.y)\n\n alpha = np.arctan(-1/interpolate.splev(self.x[0],tck,der=1))\n\n x1 = -r1*np.cos(alpha); x2 = x1 \n\n y1 = self.y[0]-r1*np.sin(alpha)\n\n y2 = r1 + y1 - r2\n\n beta = np.arctan(-1/slope)+np.pi\n\n x_str_bnd = x2 + r2*np.cos(beta)\n\n y_str_bnd = y2 + r2*np.sin(beta)\n\n def conv_geom(x):\n if (x > x1):\n theta = np.arccos((x-x1)/r1)\n y = r1*np.sin(theta) + y1\n elif (x > x_str_bnd):\n theta = np.arccos((x-x2)/r2)\n y = r2*np.sin(theta) + y2\n else:\n y = slope*(x-x_str_bnd) + y_str_bnd\n\n return y\n\n x_init = x_str_bnd - np.sqrt(conv_length**2/(1+slope**2))\n\n self.conv_x = np.linspace(x_init,self.x[0],n)\n self.conv_y = np.ones(self.conv_x.shape)\n for i in range(len(self.conv_x)):\n self.conv_y[i] = conv_geom(self.conv_x[i])\n #print(self.conv_x)\n\n def plot_contour(self,ax):\n\n if (self.converge_section):\n ax.plot(self.conv_x,self.conv_y)\n\n ax.plot(self.x,self.y)\n ax.plot(self.lip_x,self.lip_y,'rx')\n ax.plot(self.x,np.zeros(self.x.shape),'k--')\n ax.plot([self.x[-1],self.x[-1]],[0,self.y.min()],'k')\n\n def plot_exhaust_contourf(self,ax,prop,n=100):\n self.lip_y=self.lip_y*-1\n new_x = np.concatenate((self.x,np.flip(self.x[:-1]*-1+self.x[-1]*2,0)))\n new_y = np.concatenate((self.y,np.flip(1.3*self.y[:-1],0))); \n new_y = new_y*-1\n tck = interpolate.splrep(new_x,new_y)\n\n alpha_mat = np.linspace(0,self.alpha[0],n)\n\n for i in range(1,len(self.alpha)):\n alpha_row = np.linspace(self.alpha[i-1],self.alpha[i],n)\n alpha_mat = np.vstack((alpha_mat,alpha_row))\n\n alpha_vec = []\n r_vec = []\n M_vec = []\n\n for i in range(alpha_mat.shape[0]):\n for ag in alpha_mat[i,:]:\n\n line = lambda x: np.tan(ag)*(x-self.lip_x) + self.lip_y\n intercept = lambda x: interpolate.splev(x,tck,der=0) - line(x)\n \n #plt.plot(x_line,line(x_line))\n #plt.show()\n x_c = optimize.brentq(intercept,self.x[0],self.x[-1]*2)\n\n #print(x_c)\n distance = np.sqrt((interpolate.splev(x_c,tck)-self.lip_y)**2 + (x_c - self.lip_x)**2)\n\n r_temp = np.linspace(0,distance,n)\n\n for r in r_temp:\n alpha_vec.append(ag)\n r_vec.append(r)\n if i ==0:\n M_vec.append(prop[-1])\n else:\n M_vec.append(prop[i])\n\n ## convert from polar to cartesion coordinates\n #print(r_vec)\n x_vec = []; y_vec = []\n for i in range(len(alpha_vec)):\n x_vec.append(r_vec[i]*np.cos(alpha_vec[i]) + self.lip_x)\n y_vec.append(r_vec[i]*np.sin(alpha_vec[i]) + self.lip_y)\n\n self.lip_y = self.lip_y*-1\n\n x_vec = np.asarray(x_vec) \n y_vec = np.asarray(y_vec); y_vec=y_vec*-1\n\n \n # plt.scatter(x_vec,y_vec,c=M_vec,cmap=cm.jet)\n #interpolation for plotting\n X_plt = np.linspace(0,self.x[-1],n)\n Y_plt = np.linspace(0,self.lip_y,n)\n X_plt,Y_plt = np.meshgrid(X_plt,Y_plt)\n \n M_contour=interpolate.griddata((x_vec,y_vec),M_vec,(X_plt,Y_plt),method='linear')\n ax.set_aspect('equal','box')\n M_fill = ax.contourf(X_plt,Y_plt,M_contour,cmap=cm.jet)\n # divider = make_axes_locatable(ax)\n # cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(M_fill,ax=ax,orientation = 'horizontal')\n \n\n def save_to_csv(self):\n if not os.path.exists('plug_csv'):\n os.makedirs('plug_csv')\n\n csv_array = np.array([self.x,self.y,self.s,self.p,self.T,self.M,self.A,self.a,self.V,self.rho])\n np.savetxt('plug_csv/aerospike_diverge_contour.csv', csv_array.T, delimiter = ',')\n with open('plug_csv/aerospike_diverge_contour.csv','r') as original: data = original.read()\n with open('plug_csv/aerospike_diverge_contour.csv','w') as modified: modified.write('x,y,s,p,T,M,A,a,V,rho\\n' + data)\n\n csv_array = np.array([[self.lip_x],[self.lip_y]]) \n np.savetxt('plug_csv/aerospike_lip_coordinates.csv',csv_array.T,delimiter =',')\n with open('plug_csv/aerospike_lip_coordinates.csv','r') as original: data = original.read()\n with open('plug_csv/aerospike_lip_coordinates.csv','w') as modified: modified.write('lip x,lip y\\n' + data)\n if self.converge_section:\n csv_array = np.array([self.conv_x,self.conv_y])\n np.savetxt('plug_csv/aerospike_converge_contour.csv', csv_array.T,delimiter = ',')\n with open('plug_csv/aerospike_converge_contour.csv','r') as original: data = original.read()\n with open('plug_csv/aerospike_converge_contour.csv','w') as modified: modified.write('Converging x,Converging y\\n' + data)\n\ndef design_test_nozzle():\n r_e = 0.027 #0.034 # likely too large\n\n ## CONSTANTS OF DESIGN FOR HEAT FLUX\n #user input variable in metric units:\n T_w=600 #[K] desired temperature of nozzle \n\n\n ## CONSTANTS OF SIM\n alpha = 1#0.07/8 # 0.07/8 : 1 ratio of alpha : beta gives very similar weights\n beta = 0#1\n design_alt = 9144\n truncate_ratio = 1# bounds on truncate < 0.1425\n\n gamma = 1.237 #np.mean([1.2534,1.2852])\n T_c = 2831.47 # combustion chamber temperature\n p_c = 3102640.8 # combustion chamber pressure\n rho_c = 3.3826 # combustion chamber density \n a_c = np.sqrt(gamma*(1-1/gamma)*200.07*T_c) # combustion chamber sound speed\n\n return plug_nozzle(design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,100,truncate_ratio = 0.2) \n\nif __name__ == '__main__':\n\n ## CONSTANTS OF DESIGN FOR AERODYNAMICS\n def test_stand_nozzle():\n r_e = 0.027 #0.034 # likely too large\n\n ## CONSTANTS OF DESIGN FOR HEAT FLUX\n #user input variable in metric units:\n T_w=600 #[K] desired temperature of nozzle \n\n\n ## CONSTANTS OF SIM\n alpha = 1#0.07/8 # 0.07/8 : 1 ratio of alpha : beta gives very similar weights\n beta = 0#1\n design_alt = 9144\n truncate_ratio = 1# bounds on truncate < 0.1425\n\n gamma = 1.237 #np.mean([1.2534,1.2852])\n T_c = 2831.47 # combustion chamber temperature\n p_c = 3102640.8 # combustion chamber pressure\n rho_c = 3.3826 # combustion chamber density \n a_c = np.sqrt(gamma*(1-1/gamma)*200.07*T_c) # combustion chamber sound speed\n\n plug1 = plug_nozzle(design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,100,truncate_ratio = 1)\n print('Throat area: ' + str(plug1.A_t))\n (p_atm,T_atm,rho_atm) = gd.standard_atmosphere([design_alt])\n\n\n def make_isentropic_plots(plug1,p_atm):\n print('Ideal thrust: ' + str(plug1.calc_ideal_thrust(p_atm)))\n\n fig, ax1 = plt.subplots(1,1)\n plug1.plot_contour(ax1)\n\n ax1.set_title('Mach Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax1,plug1.M,n=100)\n \n \n ax1.set_xlabel('x (m)')\n ax1.set_ylabel('y (m)')\n # plug1.save_to_csv()\n name = \"final_report/mach_isen_plot_lab\"\n fig.set_size_inches(18.5,10.5)\n\n plt.savefig(name,dpi=100)\n plt.show()\n plt.close()\n # print(plug1.alpha)\n\n ## OTHER PLOTS\n fig, (ax2,ax3,ax4) = plt.subplots(3,1)\n plug1.plot_contour(ax2)\n\n ax2.set_title('Temperature Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax2,plug1.T,n=100)\n \n ax2.set_xlabel('x (m)')\n ax2.set_ylabel('y (m)')\n # plug1.save_to_csv()\n fig.set_size_inches(18.5,10.5)\n\n\n\n\n ####\n plug1.plot_contour(ax3)\n\n ax3.set_title('Pressure Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax3,plug1.p,n=100)\n \n ax3.set_xlabel('x (m)')\n ax3.set_ylabel('y (m)')\n # plug1.save_to_csv()\n fig.set_size_inches(18.5,10.5)\n\n\n\n\n plug1.plot_contour(ax4)\n\n ax4.set_title('Density Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax4,plug1.rho,n=100)\n \n ax4.set_xlabel('x (m)')\n ax4.set_ylabel('y (m)')\n # plug1.save_to_csv()\n name = \"final_report/isen_plots_lab\"\n fig.set_size_inches(18.5,10.5)\n\n plt.savefig(name,dpi=100) \n plt.show()\n plt.close() \n \n def make_MOC_plot(plug1,design_alt,gamma,n):\n\n #mesh plot\n plug1.plot_contour(plt)\n plt.xlabel('x (m)')\n plt.ylabel('y (m)')\n plt.title('Mesh generated by M.O.C.')\n plug_mesh = chr_mesh(plug1,gamma,design_alt,30,downstream_factor=1.1,plot_chr=1)\n\n\n # plug1.save_to_csv()\n name = \"final_report/MOC_mesh\"\n plt.axis('equal')\n\n plt.savefig(name,dpi=100) \n plt.show()\n plt.close() \n\n\n # mach plot\n plug_mesh = chr_mesh(plug1,gamma,design_alt,120,downstream_factor=1.0,plot_chr=0)\n\n thrust = plug_mesh.compute_thrust('linear',100)\n print('MOC thrust: ' + str(thrust))\n tck = interpolate.splrep(plug_mesh.x[plug_mesh.ID_contour_chr],plug_mesh.y[plug_mesh.ID_contour_chr])\n fig, (ax1) = plt.subplots(1,1)\n plug1.plot_contour(ax1)\n ax1.axes.get_xaxis().set_ticklabels([])\n ax1.axes.get_yaxis().set_ticklabels([])\n X_plt = np.linspace(0,plug_mesh.x.max(),100)\n Y_plt = np.linspace(0,plug_mesh.y.min(),100)\n X_plt,Y_plt = np.meshgrid(X_plt,Y_plt)\n \n invalid_grid = interpolate.splev(X_plt.flatten(),tck)<Y_plt.flatten()\n invalid_grid = invalid_grid.reshape(X_plt.shape)\n\n M_contour=interpolate.griddata((plug_mesh.x,plug_mesh.y),plug_mesh.M,(X_plt,Y_plt),method='linear')\n \n #ax1.plot(X_plt[valid_grid],Y_plt[invalid_grid],'.')\n \n M_contour[invalid_grid] = np.nan\n # ax1.plot(X_plt.flatten(),interpolate.splev(X_plt.flatten(),tck),'.')\n ax1.set_aspect('equal','box')\n for item in ([ax1.title, ax1.xaxis.label, ax1.yaxis.label] + ax1.get_xticklabels() + ax1.get_yticklabels()):\n item.set_fontsize(20)\n M_fill = ax1.contourf(X_plt,-Y_plt,M_contour,cmap=cm.jet)\n cbar = plt.colorbar(M_fill,ax=ax1,orientation = 'horizontal')\n cbar.ax.tick_params(labelsize=20)\n name = \"final_report/M_contour_MOC\"\n fig.set_size_inches(18.5,10.5)\n # ax1.set_xlabel('x (m)')\n # ax1.set_ylabel('y (m)')\n # ax1.set_title('M Contour Plot for M.O.C.')\n ax1.set_aspect('equal','box')\n plt.savefig(name,dpi=100)\n plt.show()\n plt.close()\n\n fig, (ax2) = plt.subplots(1,1)\n\n ax2.axes.get_xaxis().set_ticklabels([])\n ax2.axes.get_yaxis().set_ticklabels([])\n \n for item in ([ax2.title, ax2.xaxis.label, ax2.yaxis.label] + ax2.get_xticklabels() + ax2.get_yticklabels()):\n item.set_fontsize(20)\n # velocity plot\n U = np.cos(plug_mesh.theta)*plug_mesh.V\n V = np.sin(plug_mesh.theta)*plug_mesh.V \n x_grid, y_grid = np.meshgrid(np.linspace(0,plug_mesh.x.max(),30),np.linspace(0,plug_mesh.y.min(),20))\n invalid_grid = interpolate.splev(x_grid.flatten(),tck)<y_grid.flatten()\n invalid_grid = invalid_grid.reshape(x_grid.shape)\n\n u_grid = interpolate.griddata((plug_mesh.x,plug_mesh.y),U,(x_grid,y_grid),method='linear')\n v_grid = interpolate.griddata((plug_mesh.x,plug_mesh.y),V,(x_grid,y_grid),method='linear')\n mag_grid = interpolate.griddata((plug_mesh.x,plug_mesh.y),plug_mesh.V,(x_grid,y_grid),method='linear')\n\n mag_grid[invalid_grid] = np.nan\n\n plug1.plot_contour(ax2)\n fig.set_size_inches(18.5,10.5)\n Q = ax2.quiver(x_grid,-y_grid,u_grid,-v_grid,mag_grid,cmap=cm.jet,alpha=0.5)\n cbar = plt.colorbar(Q,ax=ax2,orientation=\"horizontal\")\n cbar.ax.tick_params(labelsize=20)\n ax2.set_aspect('equal','box')\n # ax2.set_title('Velocity Vector Plot (m/s)')\n # ax2.set_xlabel('x (m)')\n # ax2.set_ylabel('y (m)')\n name = \"final_report/V_vec\"\n plt.savefig(name,dpi=100)\n plt.show()\n\n # other property contours\n\n make_MOC_plot(plug1,design_alt,gamma,10)\n # make_isentropic_plots(plug1,p_atm) \n\n def record_breaker_nozzle():\n r_e = 0.027# r_e = 0.056 #0.034 # likely too large\n\n ## CONSTANTS OF DESIGN FOR HEAT FLUX\n #user input variable in metric units:\n T_w=600 #[K] desired temperature of nozzle \n\n\n ## CONSTANTS OF SIM\n alpha = 1#0.07/8 # 0.07/8 : 1 ratio of alpha : beta gives very similar weights\n beta = 0#1\n design_alt = 9144\n truncate_ratio = 1# bounds on truncate < 0.1425\n\n gamma = 1.237 #np.mean([1.2534,1.2852])\n T_c = 2831.47 # combustion chamber temperature\n p_c = 3102640.8 # combustion chamber pressure\n rho_c = 3.3826 # combustion chamber density \n a_c = np.sqrt(gamma*(1-1/gamma)*200.07*T_c) # combustion chamber sound speed\n\n plug1 = plug_nozzle(design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,100,truncate_ratio = truncate_ratio)\n\n # zero_x = np.linspace(plug1.x[-1],plug1.length,20)\n # zero_y = np.zeros(zero_x.shape)\n\n (p_atm,T_atm,rho_atm) = gd.standard_atmosphere([design_alt])\n print('Ideal thrust: ' + str(plug1.calc_ideal_thrust(p_atm)))\n\n fig, ax1 = plt.subplots(1,1)\n plug1.plot_contour(ax1)\n\n ax1.set_title('Mach Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax1,plug1.M,n=100)\n \n \n ax1.set_xlabel('x (m)')\n ax1.set_ylabel('y (m)')\n # plug1.save_to_csv()\n name = \"final_report/mach_isentropic_plot\"\n fig.set_size_inches(18.5,10.5)\n\n plt.savefig(name,dpi=100)\n plt.show()\n plt.close()\n # print(plug1.alpha)\n\n ## OTHER PLOTS\n fig, (ax2,ax3,ax4) = plt.subplots(3,1)\n plug1.plot_contour(ax2)\n\n ax2.set_title('Temperature Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax2,plug1.T,n=100)\n \n ax2.set_xlabel('x (m)')\n ax2.set_ylabel('y (m)')\n # plug1.save_to_csv()\n fig.set_size_inches(18.5,10.5)\n\n\n\n\n ####\n plug1.plot_contour(ax3)\n\n ax3.set_title('Pressure Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax3,plug1.p,n=100)\n \n ax3.set_xlabel('x (m)')\n ax3.set_ylabel('y (m)')\n # plug1.save_to_csv()\n fig.set_size_inches(18.5,10.5)\n\n\n\n\n plug1.plot_contour(ax4)\n\n ax4.set_title('Density Isentropic Expansion Contour Plot')\n\n # m_dot = plug1.rho[0]*plug1.A_t*plug1.V[0]\n # print(\"Mass flow rate: \" + str(m_dot))\n plug1.plot_exhaust_contourf(ax4,plug1.rho,n=100)\n \n ax4.set_xlabel('x (m)')\n ax4.set_ylabel('y (m)')\n # plug1.save_to_csv()\n name = \"final_report/T_p_rho\"\n fig.set_size_inches(18.5,10.5)\n\n plt.savefig(name,dpi=100) \n plt.show()\n plt.close() \n # MOC_mesh = chr_mesh(plug1,gamma,design_alt,130,plot_chr=0)\n # plt.show()\n # MOC_mesh.save_to_csv()\n # print('Ideal thrust: ' + str(plug1.calc_ideal_thrust(p_atm)) + ', MOC thrust: ' + str(MOC_mesh.compute_thrust(approx_method='linear',n=30)))\n\n test_stand_nozzle()\n\n def plot_nozzle_final_poster():\n r_e = 0.027 #0.034 # likely too large\n\n ## CONSTANTS OF DESIGN FOR HEAT FLUX\n #user input variable in metric units:\n T_w=600 #[K] desired temperature of nozzle \n\n\n ## CONSTANTS OF SIM\n alpha = 1#0.07/8 # 0.07/8 : 1 ratio of alpha : beta gives very similar weights\n beta = 0#1\n design_alt = 9144\n truncate_ratio = 1# bounds on truncate < 0.1425\n\n gamma = 1.237 #np.mean([1.2534,1.2852])\n T_c = 2831.47 # combustion chamber temperature\n p_c = 3102640.8 # combustion chamber pressure\n rho_c = 3.3826 # combustion chamber density \n a_c = np.sqrt(gamma*(1-1/gamma)*200.07*T_c) # combustion chamber sound speed\n\n plug1 = plug_nozzle(design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,150,truncate_ratio = 1) \n plug2 = plug_nozzle(design_alt,r_e,gamma,T_c,p_c,a_c,rho_c,150,truncate_ratio = 0.2)\n\n\n fig, ax1 = plt.subplots(1,1)\n\n plug1.x =plug1.x*1000; plug1.y = plug1.y*1000; plug1.lip_x=plug1.lip_x*1000; plug1.lip_y=plug1.lip_y*1000\n\n plug2.x=plug2.x*1000; plug2.y=plug2.y*1000\n\n ax1.plot(plug1.x,plug1.y,'r-',linewidth = 4)\n ax1.plot(plug2.x,plug2.y,'b-',linewidth=4)\n\n ax1.plot(plug1.x,-plug1.y,'r-',linewidth = 4)\n ax1.plot(plug2.x,-plug2.y,'b-',linewidth=4)\n\n ax1.plot([plug2.x[-1],plug2.x[-1]],[plug2.y[-1],-plug2.y[-1]],'b-',linewidth=4)\n\n ax1.plot(plug1.lip_x,plug1.lip_y,'kx',markersize=6)\n ax1.plot(plug1.lip_x,-plug1.lip_y,'kx',markersize=6)\n\n ax1.set_title('Final Truncated Nozzle')\n\n ax1.set_xlabel('(mm)')\n\n ax1.set_ylabel('(mm)')\n\n for item in ([ax1.title, ax1.xaxis.label, ax1.yaxis.label] + ax1.get_xticklabels() + ax1.get_yticklabels()):\n item.set_fontsize(20)\n\n fig.set_size_inches(18.5,10.5)\n\n plt.savefig('contours',dpi=100)\n plt.show()\n plt.close()\n\n plt.show()\n\n # plot_nozzle_final_poster()\n###\n# End of helper function / class descriptions\n###\n\n#design for 30,000\n\n# r_e = 0.072/2 #0.034 # likely too large\n# expansion_ratio = 6.64 #8.1273\n# A_t = r_e**2*np.pi/expansion_ratio # max expansion (r_b = 0, r_e**2 >= A_t*expansion_ratio/np.pi)\n# gamma = 1.2343# np.mean([1.2534,1.2852])\n# T_c = 2833.63\n# p_c = 34.474\n# rho_c = 3.3826\n# a_c = np.sqrt(gamma*(1-1/gamma)*200.07*T_c) \n\n# print('Sound speed: ' + str(a_c))\n\n# plug1 = plug_nozzle(expansion_ratio,A_t,r_e,gamma,T_c,p_c,a_c,rho_c,10000)\n\n# plt.plot(plug1.x,plug1.y, label='Aerospike Contour')#c=plug1.rho,cmap=cm.coolwarm)\n# plt.plot(plug1.lip_x,plug1.lip_y,'rx',label='Lip Location')\n# #plt.colorbar()\n# plt.plot([0,plug1.x.max()],[0,0], 'k--',label='Centre Line')\n# plt.legend()\n# print('Distance above r_t: ' + str(plug1.lip_y - plug1.y[0]))\n# plt.xlabel('x (m)')\n# plt.ylabel('r (m)')\n\n# m = (plug1.lip_y - plug1.y[0])/(plug1.lip_x - plug1.x[0])\n\n# m = -1/m\n\n# print('Flow angle at throat: ' + str(180/np.pi*np.tan(m)-180))\n\n# max_y = m*(-plug1.lip_x) + plug1.lip_y\n\n# # plt.plot(0,max_y,'gx')\n# plt.axis('equal')\n\n\n\n# print('radius of curvature near the throat: ' + str(2*np.sqrt((plug1.lip_x - plug1.x[0])**2 + (plug1.lip_y - plug1.y[0])**2)))\n\n# csv_array = np.array([plug1.x,plug1.y,plug1.s,plug1.p,plug1.T,plug1.M,plug1.A,plug1.a,plug1.V,plug1.rho])\n\n# np.savetxt('aerospike_contour.csv', csv_array.T, delimiter = ',')\n\n# ## plots of p,T,M,a,V,rho\n\n# fig1, ((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2,3)\n\n# ax1.plot(plug1.x*100,plug1.p*100)\n# #ax1.set_xlabel('x (cm)')\n# ax1.set_ylabel('kPa')\n# ax1.set_title('Pressure on Contour Surface')\n# ax1.grid()\n\n# ax2.plot(plug1.x*100,plug1.T)\n# #ax2.set_xlabel('x (cm)')\n# ax2.set_ylabel('K')\n# ax2.set_title('Temperature on Contour Surface')\n# ax2.grid()\n\n# ax3.plot(plug1.x*100,plug1.M)\n# #ax3.set_xlabel('x (cm)')\n# ax3.set_ylabel('M')\n# ax3.set_title('Mach on Contour Surface')\n# ax3.grid()\n\n# ax4.plot(plug1.x*100,plug1.a)\n# ax4.set_xlabel('x (cm)')\n# ax4.set_ylabel('m/s')\n# ax4.set_title('Sound Speed on Contour Surface')\n# ax4.grid()\n\n# ax5.plot(plug1.x*100,plug1.V)\n# ax5.set_xlabel('x (cm)')\n# ax5.set_ylabel('m/s')\n# ax5.set_title('Velocity on Contour Surface')\n# ax5.grid()\n\n# ax6.plot(plug1.x*100,plug1.rho)\n# ax6.set_xlabel('x (cm)')\n# ax6.set_ylabel('KG/CU')\n# ax6.set_title('Density on Contour Surface')\n# ax6.grid()\n\n# plt.show()" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.arctan", "numpy.asarray", "scipy.interpolate.griddata", "numpy.sin", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.title", "numpy.arccos", "matplotlib.pyplot.savefig", "scipy.interpolate.splev", "numpy.tan", "numpy.savetxt", "numpy.array", "numpy.meshgrid", "matplotlib.pyplot.show", "numpy.flip", "matplotlib.pyplot.ylabel", "scipy.interpolate.splrep", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.ones", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "scipy.optimize.brentq", "numpy.vstack" ] ]
yux121/CS282a
[ "03781ad645a68f3dc23fcee7b6103fe878abe7f9" ]
[ "utils.py" ]
[ "import numpy as np\nimport time\nimport math\nimport torch\nfrom torch.nn import functional as F\n\nPAD_ID, SOS_ID, EOS_ID, UNK_ID, DEL_ID = [0, 1, 2, 3, 10000]\n\ndef cos_approx(data1,data2):\n \"\"\"numpy implementation of cosine similarity for matrix\"\"\"\n #print(\"warning: the second matrix will be transposed, so try to put the simpler matrix as the second argument in order to save time.\")\n dotted = np.dot(data1,np.transpose(data2))\n norm1 = np.linalg.norm(data1,axis=1)\n norm2 = np.linalg.norm(data2,axis=1)\n matrix_vector_norms = np.multiply(norm1, norm2)\n neighbors = np.divide(dotted, matrix_vector_norms)\n return neighbors\n\ndef normalize(data):\n \"\"\"normalize matrix by rows\"\"\"\n return data/np.linalg.norm(data,axis=1,keepdims=True)\n\ndef dot_np(data1,data2):\n \"\"\"cosine similarity for normalized vectors\"\"\"\n #print(\"warning: the second matrix will be transposed, so try to put the simpler matrix as the second argument in order to save time.\")\n return np.dot(data1, data2.T)\n\ndef sigmoid(x):\n return 1/(1 + np.exp(-x)) \n\ndef similarity(vec1, vec2, measure='cos'):\n if measure=='cos':\n vec1_norm = normalize(vec1)\n vec2_norm = normalize(vec2)\n return np.dot(vec1_norm, vec2_norm.T)[:,0]\n elif measure=='poly':\n return (0.5*np.dot(vec1, vec2.T).diagonal()+1)**2\n elif measure=='sigmoid':\n return np.tanh(np.dot(vec1, vec2.T).diagonal()+1)\n elif measure in ['euc', 'gesd', 'aesd']: #https://arxiv.org/pdf/1508.01585.pdf \n euc_dist = np.linalg.norm(vec1-vec2, axis=1)\n euc_sim = 1 / (1 + euc_dist)\n if measure=='euc': return euc_sim \n sigmoid_sim = sigmoid(np.dot(vec1, vec2.T).diagonal()+1)\n if measure == 'gesd': return euc_sim * sigmoid_sim\n elif measure == 'aesd': return 0.5*(euc_sim+sigmoid_sim)\n\n#######################################################################\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%d:%d'% (m, s)\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s<%s'%(asMinutes(s), asMinutes(rs))\n\n#######################################################################\nimport nltk\ntry: nltk.word_tokenize(\"hello world\")\nexcept LookupError: nltk.download('punkt')\n \ndef sent2indexes(sentence, vocab, maxlen):\n '''sentence: a string or list of string\n return: a numpy array of word indices\n ''' \n def convert_sent(sent, vocab, maxlen):\n idxes = np.zeros(maxlen, dtype=np.int64)\n idxes.fill(PAD_ID)\n tokens = nltk.word_tokenize(sent.strip())\n idx_len = min(len(tokens), maxlen)\n for i in range(idx_len): idxes[i] = vocab.get(tokens[i], UNK_ID)\n return idxes, idx_len\n if type(sentence) is list:\n inds, lens = [], []\n for sent in sentence:\n idxes, idx_len = convert_sent(sent, vocab, maxlen)\n #idxes, idx_len = np.expand_dims(idxes, 0), np.array([idx_len])\n inds.append(idxes)\n lens.append(idx_len)\n return np.vstack(inds), np.vstack(lens)\n else:\n inds, lens = sent2indexes([sentence], vocab, maxlen)\n return inds[0], lens[0]\n \ndef indexes2sent(indexes, vocab, ignore_tok=PAD_ID): \n '''indexes: numpy array'''\n def revert_sent(indexes, ivocab, ignore_tok=PAD_ID):\n indexes=filter(lambda i: i!=ignore_tok, indexes)\n toks, length = [], 0 \n for idx in indexes:\n toks.append(ivocab.get(idx, '<unk>'))\n length+=1\n if idx == EOS_ID:\n break\n return ' '.join(toks), length\n \n ivocab = {v: k for k, v in vocab.items()}\n if indexes.ndim==1:# one sentence\n return revert_sent(indexes, ivocab, ignore_tok)\n else:# dim>1\n sentences, lens =[], [] # a batch of sentences\n for inds in indexes:\n sentence, length = revert_sent(inds, ivocab, ignore_tok)\n sentences.append(sentence)\n lens.append(length)\n return sentences, lens\n\n########################################################################\n" ]
[ [ "numpy.dot", "numpy.multiply", "numpy.vstack", "numpy.linalg.norm", "numpy.transpose", "numpy.exp", "numpy.zeros", "numpy.divide" ] ]
ap3xx/great_expectations
[ "67251ff3fcb60b1a52a6ece1bec98fb8e96f6a96" ]
[ "great_expectations/util.py" ]
[ "import importlib\nimport json\nimport logging\nimport os\nimport time\nfrom functools import wraps\nfrom inspect import getcallargs\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import Callable, Union\n\nimport black\nimport importlib_metadata\nfrom pkg_resources import Distribution\n\nfrom great_expectations.core import expectationSuiteSchema\nfrom great_expectations.exceptions import (\n PluginClassNotFoundError,\n PluginModuleNotFoundError,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef measure_execution_time(func) -> Callable:\n @wraps(func)\n def compute_delta_t(*args, **kwargs) -> Callable:\n time_begin: int = int(round(time.time() * 1000))\n try:\n return func(*args, **kwargs)\n finally:\n time_end: int = int(round(time.time() * 1000))\n delta_t: int = time_end - time_begin\n call_args: dict = getcallargs(func, *args, **kwargs)\n print(\n f\"Total execution time of function {func.__name__}({call_args}): {delta_t} ms.\"\n )\n\n return compute_delta_t\n\n\n# noinspection SpellCheckingInspection\ndef get_project_distribution() -> Union[Distribution, None]:\n ditr: Distribution\n for distr in importlib_metadata.distributions():\n relative_path: Path\n try:\n relative_path = Path(__file__).relative_to(distr.locate_file(\"\"))\n except ValueError:\n pass\n else:\n if relative_path in distr.files:\n return distr\n return None\n\n\ndef verify_dynamic_loading_support(module_name: str, package_name: str = None) -> None:\n \"\"\"\n :param module_name: a possibly-relative name of a module\n :param package_name: the name of a package, to which the given module belongs\n \"\"\"\n try:\n module_spec: importlib.machinery.ModuleSpec = importlib.util.find_spec(\n module_name, package=package_name\n )\n except ModuleNotFoundError:\n module_spec = None\n if not module_spec:\n if not package_name:\n package_name = \"\"\n message: str = f\"\"\"No module named \"{package_name + module_name}\" could be found in the repository. Please \\\nmake sure that the file, corresponding to this package and module, exists and that dynamic loading of code modules, \\\ntemplates, and assets is supported in your execution environment. This error is unrecoverable.\n \"\"\"\n raise FileNotFoundError(message)\n\n\ndef import_library_module(module_name: str) -> Union[ModuleType, None]:\n \"\"\"\n :param module_name: a fully-qualified name of a module (e.g., \"great_expectations.dataset.sqlalchemy_dataset\")\n :return: raw source code of the module (if can be retrieved)\n \"\"\"\n module_obj: Union[ModuleType, None]\n\n try:\n module_obj = importlib.import_module(module_name)\n except ModuleNotFoundError:\n module_obj = None\n\n return module_obj\n\n\ndef load_class(class_name, module_name):\n try:\n verify_dynamic_loading_support(module_name=module_name)\n except FileNotFoundError:\n raise PluginModuleNotFoundError(module_name)\n\n module_obj: Union[ModuleType, None] = import_library_module(module_name=module_name)\n\n if module_obj is None:\n raise PluginModuleNotFoundError(module_name)\n try:\n klass_ = getattr(module_obj, class_name)\n except AttributeError:\n raise PluginClassNotFoundError(module_name=module_name, class_name=class_name)\n\n return klass_\n\n\ndef _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None):\n \"\"\"\n Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite\n\n Args:\n df: the DataFrame object to convert\n dataset_class: the class to which to convert the existing DataFrame\n expectation_suite: the expectation suite that should be attached to the resulting dataset\n profiler: the profiler to use to generate baseline expectations, if any\n\n Returns:\n A new Dataset object\n \"\"\"\n\n if expectation_suite is not None:\n # Create a dataset of the new class type, and manually initialize expectations according to\n # the provided expectation suite\n new_df = dataset_class.from_dataset(df)\n new_df._initialize_expectations(expectation_suite)\n else:\n # Instantiate the new Dataset with default expectations\n new_df = dataset_class.from_dataset(df)\n if profiler is not None:\n new_df.profile(profiler)\n\n return new_df\n\n\ndef _load_and_convert_to_dataset_class(\n df, class_name, module_name, expectation_suite=None, profiler=None\n):\n \"\"\"\n Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite\n\n Args:\n df: the DataFrame object to convert\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n expectation_suite: the expectation suite that should be attached to the resulting dataset\n profiler: the profiler to use to generate baseline expectations, if any\n\n Returns:\n A new Dataset object\n \"\"\"\n verify_dynamic_loading_support(module_name=module_name)\n dataset_class = load_class(class_name, module_name)\n return _convert_to_dataset_class(df, dataset_class, expectation_suite, profiler)\n\n\ndef read_csv(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_csv and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n df = pd.read_csv(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef read_json(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n accessor_func=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_json and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n accessor_func (Callable): functions to transform the json object in the file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n if accessor_func is not None:\n json_obj = json.load(open(filename, \"rb\"))\n json_obj = accessor_func(json_obj)\n df = pd.read_json(json.dumps(json_obj), *args, **kwargs)\n\n else:\n df = pd.read_json(filename, *args, **kwargs)\n\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef read_excel(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_excel and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset or ordered dict of great_expectations datasets,\n if multiple worksheets are imported\n \"\"\"\n import pandas as pd\n\n df = pd.read_excel(filename, *args, **kwargs)\n if dataset_class is None:\n verify_dynamic_loading_support(module_name=module_name)\n dataset_class = load_class(class_name=class_name, module_name=module_name)\n if isinstance(df, dict):\n for key in df:\n df[key] = _convert_to_dataset_class(\n df=df[key],\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n df = _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n return df\n\n\ndef read_table(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_table and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n df = pd.read_table(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef read_feather(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_feather and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n df = pd.read_feather(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef read_parquet(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_parquet and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n df = pd.read_parquet(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef from_pandas(\n pandas_df,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n):\n \"\"\"Read a Pandas data frame and return a great_expectations dataset.\n\n Args:\n pandas_df (Pandas df): Pandas data frame\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string) = None: path to great_expectations expectation suite file\n profiler (profiler class) = None: The profiler that should\n be run on the dataset to establish a baseline expectation suite.\n\n Returns:\n great_expectations dataset\n \"\"\"\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=pandas_df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=pandas_df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef read_pickle(\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n \"\"\"Read a file using Pandas read_pickle and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n class_name (str): class to which to convert resulting Pandas df\n module_name (str): dataset module from which to try to dynamically load the relevant module\n dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;\n if not specified, try to load the class named via the class_name and module_name parameters\n expectation_suite (string): path to great_expectations expectation suite file\n profiler (Profiler class): profiler to use when creating the dataset (default is None)\n\n Returns:\n great_expectations dataset\n \"\"\"\n import pandas as pd\n\n df = pd.read_pickle(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n\n\ndef validate(\n data_asset,\n expectation_suite=None,\n data_asset_name=None,\n expectation_suite_name=None,\n data_context=None,\n data_asset_class_name=None,\n data_asset_module_name=\"great_expectations.dataset\",\n data_asset_class=None,\n *args,\n **kwargs,\n):\n \"\"\"Validate the provided data asset. Validate can accept an optional data_asset_name to apply, data_context to use\n to fetch an expectation_suite if one is not provided, and data_asset_class_name/data_asset_module_name or\n data_asset_class to use to provide custom expectations.\n\n Args:\n data_asset: the asset to validate\n expectation_suite: the suite to use, or None to fetch one using a DataContext\n data_asset_name: the name of the data asset to use\n expectation_suite_name: the name of the expectation_suite to use\n data_context: data context to use to fetch an an expectation suite, or the path from which to obtain one\n data_asset_class_name: the name of a class to dynamically load a DataAsset class\n data_asset_module_name: the name of the module to dynamically load a DataAsset class\n data_asset_class: a class to use. overrides data_asset_class_name/ data_asset_module_name if provided\n *args:\n **kwargs:\n\n Returns:\n\n \"\"\"\n # Get an expectation suite if not provided\n if expectation_suite is None and data_context is None:\n raise ValueError(\n \"Either an expectation suite or a DataContext is required for validation.\"\n )\n\n if expectation_suite is None:\n logger.info(\"Using expectation suite from DataContext.\")\n # Allow data_context to be a string, and try loading it from path in that case\n if isinstance(data_context, str):\n from great_expectations.data_context import DataContext\n\n data_context = DataContext(data_context)\n expectation_suite = data_context.get_expectation_suite(\n expectation_suite_name=expectation_suite_name\n )\n else:\n if isinstance(expectation_suite, dict):\n expectation_suite = expectationSuiteSchema.load(expectation_suite)\n if data_asset_name is not None:\n raise ValueError(\n \"When providing an expectation suite, data_asset_name cannot also be provided.\"\n )\n if expectation_suite_name is not None:\n raise ValueError(\n \"When providing an expectation suite, expectation_suite_name cannot also be provided.\"\n )\n logger.info(\n \"Validating data_asset_name %s with expectation_suite_name %s\"\n % (data_asset_name, expectation_suite.expectation_suite_name)\n )\n\n # If the object is already a DataAsset type, then this is purely a convenience method\n # and no conversion is needed; try to run validate on the given object\n if data_asset_class_name is None and data_asset_class is None:\n return data_asset.validate(\n expectation_suite=expectation_suite,\n data_context=data_context,\n *args,\n **kwargs,\n )\n\n # Otherwise, try to convert and validate the dataset\n if data_asset_class is None:\n verify_dynamic_loading_support(module_name=data_asset_module_name)\n data_asset_class = load_class(data_asset_class_name, data_asset_module_name)\n\n import pandas as pd\n from great_expectations.dataset import Dataset, PandasDataset\n\n if data_asset_class is None:\n # Guess the GE data_asset_type based on the type of the data_asset\n if isinstance(data_asset, pd.DataFrame):\n data_asset_class = PandasDataset\n # Add other data_asset_type conditions here as needed\n\n # Otherwise, we will convert for the user to a subclass of the\n # existing class to enable new expectations, but only for datasets\n if not isinstance(data_asset, (Dataset, pd.DataFrame)):\n raise ValueError(\n \"The validate util method only supports dataset validations, including custom subclasses. For other data \"\n \"asset types, use the object's own validate method.\"\n )\n\n if not issubclass(type(data_asset), data_asset_class):\n if isinstance(data_asset, pd.DataFrame) and issubclass(\n data_asset_class, PandasDataset\n ):\n pass # This is a special type of allowed coercion\n else:\n raise ValueError(\n \"The validate util method only supports validation for subtypes of the provided data_asset_type.\"\n )\n\n data_asset_ = _convert_to_dataset_class(\n data_asset, dataset_class=data_asset_class, expectation_suite=expectation_suite\n )\n return data_asset_.validate(*args, data_context=data_context, **kwargs)\n\n\n# https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python\ndef gen_directory_tree_str(startpath):\n \"\"\"Print the structure of directory as a tree:\n\n Ex:\n project_dir0/\n AAA/\n BBB/\n aaa.txt\n bbb.txt\n\n #Note: files and directories are sorted alphabetically, so that this method can be used for testing.\n \"\"\"\n\n output_str = \"\"\n\n tuples = list(os.walk(startpath))\n tuples.sort()\n\n for root, dirs, files in tuples:\n level = root.replace(startpath, \"\").count(os.sep)\n indent = \" \" * 4 * level\n output_str += \"{}{}/\\n\".format(indent, os.path.basename(root))\n subindent = \" \" * 4 * (level + 1)\n\n files.sort()\n for f in files:\n output_str += \"{}{}\\n\".format(subindent, f)\n\n return output_str\n\n\ndef lint_code(code):\n \"\"\"Lint strings of code passed in.\"\"\"\n black_file_mode = black.FileMode()\n if not isinstance(code, str):\n raise TypeError\n try:\n linted_code = black.format_file_contents(code, fast=True, mode=black_file_mode)\n return linted_code\n except (black.NothingChanged, RuntimeError):\n return code\n" ]
[ [ "pandas.read_excel", "pandas.read_csv", "pandas.read_table", "pandas.read_parquet", "pandas.read_feather", "pandas.read_json", "pandas.read_pickle" ] ]
mercuries-ai/ml-agents
[ "80c5dbf08a8834a5802e218abc408ecb74da1694" ]
[ "ml-agents/mlagents/trainers/policy/torch_policy.py" ]
[ "from typing import Any, Dict, List, Tuple, Optional\nimport numpy as np\nfrom mlagents.torch_utils import torch, default_device\nimport copy\n\nfrom mlagents.trainers.action_info import ActionInfo\nfrom mlagents.trainers.behavior_id_utils import get_global_agent_id\nfrom mlagents.trainers.policy import Policy\nfrom mlagents_envs.base_env import DecisionSteps, BehaviorSpec\nfrom mlagents_envs.timers import timed\n\nfrom mlagents.trainers.settings import TrainerSettings\nfrom mlagents.trainers.torch.networks import (\n SharedActorCritic,\n SeparateActorCritic,\n GlobalSteps,\n)\n\nfrom mlagents.trainers.torch.utils import ModelUtils\nfrom mlagents.trainers.buffer import AgentBuffer\nfrom mlagents.trainers.torch.agent_action import AgentAction\nfrom mlagents.trainers.torch.action_log_probs import ActionLogProbs\n\nEPSILON = 1e-7 # Small value to avoid divide by zero\n\n\nclass TorchPolicy(Policy):\n def __init__(\n self,\n seed: int,\n behavior_spec: BehaviorSpec,\n trainer_settings: TrainerSettings,\n tanh_squash: bool = False,\n reparameterize: bool = False,\n separate_critic: bool = True,\n condition_sigma_on_obs: bool = True,\n ):\n \"\"\"\n Policy that uses a multilayer perceptron to map the observations to actions. Could\n also use a CNN to encode visual input prior to the MLP. Supports discrete and\n continuous actions, as well as recurrent networks.\n :param seed: Random seed.\n :param behavior_spec: Assigned BehaviorSpec object.\n :param trainer_settings: Defined training parameters.\n :param load: Whether a pre-trained model will be loaded or a new one created.\n :param tanh_squash: Whether to use a tanh function on the continuous output,\n or a clipped output.\n :param reparameterize: Whether we are using the resampling trick to update the policy\n in continuous output.\n \"\"\"\n super().__init__(\n seed,\n behavior_spec,\n trainer_settings,\n tanh_squash,\n reparameterize,\n condition_sigma_on_obs,\n )\n self.global_step = (\n GlobalSteps()\n ) # could be much simpler if TorchPolicy is nn.Module\n self.grads = None\n\n reward_signal_configs = trainer_settings.reward_signals\n reward_signal_names = [key.value for key, _ in reward_signal_configs.items()]\n\n self.stats_name_to_update_name = {\n \"Losses/Value Loss\": \"value_loss\",\n \"Losses/Policy Loss\": \"policy_loss\",\n }\n if separate_critic:\n ac_class = SeparateActorCritic\n else:\n ac_class = SharedActorCritic\n self.actor_critic = ac_class(\n sensor_specs=self.behavior_spec.sensor_specs,\n network_settings=trainer_settings.network_settings,\n action_spec=behavior_spec.action_spec,\n stream_names=reward_signal_names,\n conditional_sigma=self.condition_sigma_on_obs,\n tanh_squash=tanh_squash,\n )\n # Save the m_size needed for export\n self._export_m_size = self.m_size\n # m_size needed for training is determined by network, not trainer settings\n self.m_size = self.actor_critic.memory_size\n\n self.actor_critic.to(default_device())\n self._clip_action = not tanh_squash\n\n @property\n def export_memory_size(self) -> int:\n \"\"\"\n Returns the memory size of the exported ONNX policy. This only includes the memory\n of the Actor and not any auxillary networks.\n \"\"\"\n return self._export_m_size\n\n def _extract_masks(self, decision_requests: DecisionSteps) -> np.ndarray:\n mask = None\n if self.behavior_spec.action_spec.discrete_size > 0:\n num_discrete_flat = np.sum(self.behavior_spec.action_spec.discrete_branches)\n mask = torch.ones([len(decision_requests), num_discrete_flat])\n if decision_requests.action_mask is not None:\n mask = torch.as_tensor(\n 1 - np.concatenate(decision_requests.action_mask, axis=1)\n )\n return mask\n\n def update_normalization(self, buffer: AgentBuffer) -> None:\n \"\"\"\n If this policy normalizes vector observations, this will update the norm values in the graph.\n :param buffer: The buffer with the observations to add to the running estimate\n of the distribution.\n \"\"\"\n\n if self.normalize:\n self.actor_critic.update_normalization(buffer)\n\n @timed\n def sample_actions(\n self,\n obs: List[torch.Tensor],\n masks: Optional[torch.Tensor] = None,\n memories: Optional[torch.Tensor] = None,\n seq_len: int = 1,\n ) -> Tuple[AgentAction, ActionLogProbs, torch.Tensor, torch.Tensor]:\n \"\"\"\n :param obs: List of observations.\n :param masks: Loss masks for RNN, else None.\n :param memories: Input memories when using RNN, else None.\n :param seq_len: Sequence length when using RNN.\n :return: Tuple of AgentAction, ActionLogProbs, entropies, and output memories.\n \"\"\"\n actions, log_probs, entropies, memories = self.actor_critic.get_action_stats(\n obs, masks, memories, seq_len\n )\n return (actions, log_probs, entropies, memories)\n\n def evaluate_actions(\n self,\n obs: List[torch.Tensor],\n actions: AgentAction,\n masks: Optional[torch.Tensor] = None,\n memories: Optional[torch.Tensor] = None,\n seq_len: int = 1,\n ) -> Tuple[ActionLogProbs, torch.Tensor, Dict[str, torch.Tensor]]:\n log_probs, entropies, value_heads = self.actor_critic.get_stats_and_value(\n obs, actions, masks, memories, seq_len\n )\n return log_probs, entropies, value_heads\n\n @timed\n def evaluate(\n self, decision_requests: DecisionSteps, global_agent_ids: List[str]\n ) -> Dict[str, Any]:\n \"\"\"\n Evaluates policy for the agent experiences provided.\n :param global_agent_ids:\n :param decision_requests: DecisionStep object containing inputs.\n :return: Outputs from network as defined by self.inference_dict.\n \"\"\"\n obs = decision_requests.obs\n masks = self._extract_masks(decision_requests)\n tensor_obs = [torch.as_tensor(np_ob) for np_ob in obs]\n\n memories = torch.as_tensor(self.retrieve_memories(global_agent_ids)).unsqueeze(\n 0\n )\n\n run_out = {}\n with torch.no_grad():\n action, log_probs, entropy, memories = self.sample_actions(\n tensor_obs, masks=masks, memories=memories\n )\n action_tuple = action.to_action_tuple()\n run_out[\"action\"] = action_tuple\n # This is the clipped action which is not saved to the buffer\n # but is exclusively sent to the environment.\n env_action_tuple = action.to_action_tuple(clip=self._clip_action)\n run_out[\"env_action\"] = env_action_tuple\n run_out[\"log_probs\"] = log_probs.to_log_probs_tuple()\n run_out[\"entropy\"] = ModelUtils.to_numpy(entropy)\n run_out[\"learning_rate\"] = 0.0\n if self.use_recurrent:\n run_out[\"memory_out\"] = ModelUtils.to_numpy(memories).squeeze(0)\n return run_out\n\n def get_action(\n self, decision_requests: DecisionSteps, worker_id: int = 0\n ) -> ActionInfo:\n \"\"\"\n Decides actions given observations information, and takes them in environment.\n :param worker_id:\n :param decision_requests: A dictionary of behavior names and DecisionSteps from environment.\n :return: an ActionInfo containing action, memories, values and an object\n to be passed to add experiences\n \"\"\"\n if len(decision_requests) == 0:\n return ActionInfo.empty()\n\n global_agent_ids = [\n get_global_agent_id(worker_id, int(agent_id))\n for agent_id in decision_requests.agent_id\n ] # For 1-D array, the iterator order is correct.\n\n run_out = self.evaluate(\n decision_requests, global_agent_ids\n ) # pylint: disable=assignment-from-no-return\n self.save_memories(global_agent_ids, run_out.get(\"memory_out\"))\n self.check_nan_action(run_out.get(\"action\"))\n return ActionInfo(\n action=run_out.get(\"action\"),\n env_action=run_out.get(\"env_action\"),\n value=run_out.get(\"value\"),\n outputs=run_out,\n agent_ids=list(decision_requests.agent_id),\n )\n\n def get_current_step(self):\n \"\"\"\n Gets current model step.\n :return: current model step.\n \"\"\"\n return self.global_step.current_step\n\n def set_step(self, step: int) -> int:\n \"\"\"\n Sets current model step to step without creating additional ops.\n :param step: Step to set the current model step to.\n :return: The step the model was set to.\n \"\"\"\n self.global_step.current_step = step\n return step\n\n def increment_step(self, n_steps):\n \"\"\"\n Increments model step.\n \"\"\"\n self.global_step.increment(n_steps)\n return self.get_current_step()\n\n def load_weights(self, values: List[np.ndarray]) -> None:\n self.actor_critic.load_state_dict(values)\n\n def init_load_weights(self) -> None:\n pass\n\n def get_weights(self) -> List[np.ndarray]:\n return copy.deepcopy(self.actor_critic.state_dict())\n\n def get_modules(self):\n return {\"Policy\": self.actor_critic, \"global_step\": self.global_step}\n" ]
[ [ "numpy.concatenate", "numpy.sum" ] ]
quochungto/stanford-cs231n-assignment-solution
[ "22f6a97f29c4188d57a581822ce1dbd37a36b882" ]
[ "assignment1/cs231n/classifiers/neural_net.py" ]
[ "from __future__ import print_function\n\nfrom builtins import range\nfrom builtins import object\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom past.builtins import xrange\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network. The net has an input dimension of\n N, a hidden layer dimension of H, and performs classification over C classes.\n We train the network with a softmax loss function and L2 regularization on the\n weight matrices. The network uses a ReLU nonlinearity after the first fully\n connected layer.\n\n In other words, the network has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each class.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n \"\"\"\n Initialize the model. Weights are initialized to small random values and\n biases are initialized to zero. Weights and biases are stored in the\n variable self.params, which is a dictionary with the following keys:\n\n W1: First layer weights; has shape (D, H)\n b1: First layer biases; has shape (H,)\n W2: Second layer weights; has shape (H, C)\n b2: Second layer biases; has shape (C,)\n\n Inputs:\n - input_size: The dimension D of the input data.\n - hidden_size: The number of neurons H in the hidden layer.\n - output_size: The number of classes C.\n \"\"\"\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def loss(self, X, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n scores1 = X.dot(W1) + b1 # (N,H)\n relu1 = np.maximum(0, scores1) # (N,H)\n scores = relu1.dot(W2) + b2 # (N,C)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n exp_scores = np.exp(scores)\n sum_exp = np.sum(exp_scores, axis=1)\n softmax_matrix = exp_scores / sum_exp.reshape(-1, 1)\n loss = -np.mean(np.log(softmax_matrix[np.arange(N), y]))\n loss += reg * (np.sum(W1**2) + np.sum(W2**2))\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n softmax_matrix[np.arange(N), y] -= 1\n softmax_matrix /= N\n grads['W2'] = relu1.T.dot(softmax_matrix) # (H,C)\n grads['W2'] += 2*reg*W2 # (H,C)\n grads['b2'] = np.sum(softmax_matrix, axis=0) # (C,)\n drelu1 = softmax_matrix.dot(W2.T) # (N,H)\n drelu1 = (relu1 > 0) * drelu1 # (N,H)\n \n grads['W1'] = X.T.dot(drelu1) # (D,H)\n grads['W1'] += 2*reg*W1 # (D,H)\n grads['b1'] = np.sum(drelu1, axis=0) # (H,)\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, grads\n\n def train(self, X, y, X_val, y_val,\n learning_rate=1e-3, learning_rate_decay=0.95,\n reg=5e-6, num_iters=100,\n batch_size=200, verbose=False, EarlyStopping=False, patience=50):\n \"\"\"\n Train this neural network using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving training data.\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\n X[i] has label c, where 0 <= c < C.\n - X_val: A numpy array of shape (N_val, D) giving validation data.\n - y_val: A numpy array of shape (N_val,) giving validation labels.\n - learning_rate: Scalar giving learning rate for optimization.\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\n after each epoch.\n - reg: Scalar giving regularization strength.\n - num_iters: Number of steps to take when optimizing.\n - batch_size: Number of training examples to use per step.\n - verbose: boolean; if true print progress during optimization.\n \"\"\"\n num_train = X.shape[0]\n iterations_per_epoch = max(num_train / batch_size, 1)\n count_pregress = 0\n best_loss = np.inf\n best_acc = -1\n best_params = None\n # Use SGD to optimize the parameters in self.model\n loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: Create a random minibatch of training data and labels, storing #\n # them in X_batch and y_batch respectively. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n batch_indices = np.random.choice(num_train, batch_size)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Compute loss and gradients using the current minibatch\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n loss_history.append(loss)\n val_loss, _ = self.loss(X_val, y_val, reg=reg)\n # Check accuracy\n train_acc = (self.predict(X_batch) == y_batch).mean()\n val_acc = (self.predict(X_val) == y_val).mean()\n train_acc_history.append(train_acc)\n val_acc_history.append(val_acc)\n \n # Early stopping\n if EarlyStopping:\n if val_loss >= best_loss:\n count_progress += 1\n if count_progress == patience:\n self.params = best_params\n print('early stopping at iteration %d / %d' % (it, num_iters))\n print('best validation accuracy: %f' % (best_acc))\n break\n else:\n best_loss = val_loss\n best_acc = val_acc\n best_params = self.params\n count_progress = 0\n\n #########################################################################\n # TODO: Use the gradients in the grads dictionary to update the #\n # parameters of the network (stored in the dictionary self.params) #\n # using stochastic gradient descent. You'll need to use the gradients #\n # stored in the grads dictionary defined above. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n for param in ('W1', 'b1', 'W2', 'b2'):\n self.params[param] -= learning_rate * grads[param]\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n # Every epoch, check train and val accuracy and decay learning rate.\n if it % iterations_per_epoch == 0:\n # Decay learning rate\n learning_rate *= learning_rate_decay\n if EarlyStopping:\n self.params = best_params\n return {\n 'loss_history': loss_history,\n 'train_acc_history': train_acc_history,\n 'val_acc_history': val_acc_history,\n }\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this two-layer network to predict labels for\n data points. For each data point we predict scores for each of the C\n classes, and assign each data point to the class with the highest score.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\n classify.\n\n Returns:\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\n to have class c, where 0 <= c < C.\n \"\"\"\n y_pred = None\n\n ###########################################################################\n # TODO: Implement this function; it should be VERY simple! #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n y_pred = np.argmax(self.loss(X), axis=1)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return y_pred\n" ]
[ [ "numpy.maximum", "numpy.random.choice", "numpy.arange", "numpy.random.randn", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
xwshi/Semantic-Segmentation
[ "4cc89a22ffc9018d2b44e69e85672c7bdd1ab706" ]
[ "deeplab_Xception/nets/deeplab.py" ]
[ "import tensorflow as tf\nfrom keras import backend as K\nfrom keras import layers\nfrom keras.activations import relu\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.layers import (Activation, Add, BatchNormalization, Concatenate,\n Conv2D, DepthwiseConv2D, Dropout,\n GlobalAveragePooling2D, Input, Lambda, Reshape,\n Softmax, ZeroPadding2D)\nfrom keras.models import Model\nfrom keras.utils.data_utils import get_file\n\nfrom nets.Xception import Xception\n\n\ndef SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):\n if stride == 1:\n depth_padding = 'same'\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n depth_padding = 'valid'\n \n if not depth_activation:\n x = Activation('relu')(x)\n\n # 首先使用3x3的深度可分离卷积\n x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),\n padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)\n x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n\n # 利用1x1卷积进行通道数调整\n x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x)\n x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n\n return x\n\ndef Deeplabv3(input_shape=(512, 512, 3), classes=21, alpha=1.,OS=16):\n\n img_input = Input(shape=input_shape)\n\n # x 64, 64, 2048\n # skip1 128, 128, 256\n x, atrous_rates, skip1 = Xception(img_input,alpha,OS=OS)\n size_before = tf.keras.backend.int_shape(x)\n\n #---------------------------------------------------------------#\n # 全部求平均后,再利用expand_dims扩充维度\n # 64,64,2048 -> 1,1,2048 -> 1,1,2048\n #---------------------------------------------------------------#\n b4 = GlobalAveragePooling2D()(x)\n b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)\n b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)\n b4 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='image_pooling')(b4)\n b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)\n b4 = Activation('relu')(b4)\n # 1,1,256 -> 64,64,256\n b4 = Lambda(lambda x: tf.image.resize_images(x, size_before[1:3]))(b4)\n\n #---------------------------------------------------------------#\n # 调整通道\n #---------------------------------------------------------------#\n b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)\n b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)\n b0 = Activation('relu', name='aspp0_activation')(b0)\n\n #---------------------------------------------------------------#\n # rate值与OS相关,SepConv_BN为先3x3膨胀卷积,再1x1卷积,进行压缩\n # 其膨胀率就是rate值\n #---------------------------------------------------------------#\n b1 = SepConv_BN(x, 256, 'aspp1', rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)\n b2 = SepConv_BN(x, 256, 'aspp2', rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)\n b3 = SepConv_BN(x, 256, 'aspp3', rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)\n\n # 64, 64, 256 + 64, 64, 256 + 64, 64, 256 + 64, 64, 256 + 64, 64, 256 -> 64, 64, 1280\n x = Concatenate()([b4, b0, b1, b2, b3])\n\n # 利用1x1卷积调整通道数\n # 64, 64, 1280 -> 64,64,256\n x = Conv2D(256, (1, 1), padding='same',\n use_bias=False, name='concat_projection')(x)\n x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)\n x = Activation('relu')(x)\n x = Dropout(0.1)(x)\n\n # skip1.shape[1:3] 为 128,128\n # 64,64,256 -> 128,128,256\n x = Lambda(lambda xx: tf.image.resize_images(x, skip1.shape[1:3]))(x)\n \n # 128,128,24 -> 128,128,48\n dec_skip1 = Conv2D(48, (1, 1), padding='same',use_bias=False, name='feature_projection0')(skip1)\n dec_skip1 = BatchNormalization(name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)\n dec_skip1 = Activation('relu')(dec_skip1)\n\n # 128,128,256 + 128,128,48 -> 128,128,304\n x = Concatenate()([x, dec_skip1])\n # 128,128,304 -> 128,128,256 -> 128,128,256\n x = SepConv_BN(x, 256, 'decoder_conv0',\n depth_activation=True, epsilon=1e-5)\n x = SepConv_BN(x, 256, 'decoder_conv1',\n depth_activation=True, epsilon=1e-5)\n\n # 128,128,256 -> 128,128,2 -> 512,512,2\n x = Conv2D(classes, (1, 1), padding='same')(x)\n \n size_before3 = tf.keras.backend.int_shape(img_input)\n x = Lambda(lambda xx:tf.image.resize_images(xx,size_before3[1:3]))(x)\n\n x = Reshape((-1,classes))(x)\n x = Softmax()(x)\n\n inputs = img_input\n model = Model(inputs, x, name='deeplabv3plus')\n\n return model\n\n" ]
[ [ "tensorflow.keras.backend.int_shape", "tensorflow.image.resize_images" ] ]
sstolle/scikit-lego
[ "a8543b7246db5cca8e89173c66b96bc63bc2a42a" ]
[ "tests/test_api.py" ]
[ "from collections import defaultdict\n\nimport pytest\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.utils import estimator_checks\n\nfrom sklego.dummy import RandomRegressor\nfrom sklego.transformers import EstimatorTransformer, RandomAdder\nfrom tests.conftest import id_func\n\n\n@pytest.mark.parametrize(\"estimator\", [\n RandomAdder(),\n EstimatorTransformer(LinearRegression()),\n RandomRegressor(),\n], ids=id_func)\ndef test_check_estimator(estimator, monkeypatch):\n \"\"\"Uses the sklearn `check_estimator` method to verify our custom estimators\"\"\"\n\n # Not all estimators CAN adhere to the defined sklearn api. An example of this is the random adder as sklearn\n # expects methods to be invariant to whether they are applied to the full dataset or a subset.\n # These tests can be monkey patched out using the skips dictionary.\n skips = defaultdict(list, {\n RandomAdder: [\n 'check_methods_subset_invariance', # Since we add noise, the method is not invariant on a subset\n ],\n RandomRegressor: [\n 'check_methods_subset_invariance', # Since we add noise, the method is not invariant on a subset\n 'check_regressors_train', # RandomRegressors score is not always greater than 0.5 due to randomness\n ]\n })\n\n def no_test(*args, **kwargs):\n return True\n\n for skip in skips[type(estimator)]:\n monkeypatch.setattr(estimator_checks, skip, no_test)\n\n estimator_checks.check_estimator(estimator)\n" ]
[ [ "sklearn.utils.estimator_checks.check_estimator", "sklearn.linear_model.LinearRegression" ] ]
meliani09/smt
[ "af729143be09b012257bf81dcd3e2c8c40f65c96", "ab3bda1e38ca3a785dfc0ed692f1437bf7e78bd3" ]
[ "smt/surrogate_models/surrogate_model.py", "smt/surrogate_models/kpls.py" ]
[ "'''\nAuthor: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>\n Dr. John T. Hwang <hwangjt@umich.edu>\n\nThis package is distributed under New BSD license.\n'''\n#TODO: Extend to multifidelity problems by adding training_points = {'approx': {}}\n#TODO: Complete the mixture of expert model: verify from if self.options['name'] == 'MixExp': (predict)\n\nfrom __future__ import division\n\nimport numpy as np\nfrom collections import defaultdict\n\nfrom smt.utils.printer import Printer\nfrom smt.utils.options_dictionary import OptionsDictionary\nfrom smt.utils.checks import check_support, check_nx, check_2d_array\n\n\nclass SurrogateModel(object):\n \"\"\"\n Base class for all surrogate models.\n\n Attributes\n ----------\n options : OptionsDictionary\n Dictionary of options. Options values can be set on this attribute directly\n or they can be passed in as keyword arguments during instantiation.\n supports : dict\n Dictionary containing information about what this surrogate model supports.\n\n Examples\n --------\n >>> from smt.surrogate_models import RBF\n >>> sm = RBF(print_training=False)\n >>> sm.options['print_prediction'] = False\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Constructor where values of options can be passed in.\n\n For the list of options, see the documentation for the surrogate model being used.\n\n Parameters\n ----------\n **kwargs : named arguments\n Set of options that can be optionally set; each option must have been declared.\n\n Examples\n --------\n >>> from smt.surrogate_models import RBF\n >>> sm = RBF(print_global=False)\n \"\"\" \n self.options = OptionsDictionary()\n \n self.supports = supports = {}\n supports['training_derivatives'] = False\n supports['derivatives'] = False\n supports['output_derivatives'] = False\n supports['adjoint_api'] = False\n supports['variances'] = False\n \n declare = self.options.declare\n\n declare('print_global', True, types=bool,\n desc='Global print toggle. If False, all printing is suppressed')\n declare('print_training', True, types=bool,\n desc='Whether to print training information')\n declare('print_prediction', True, types=bool,\n desc='Whether to print prediction information')\n declare('print_problem', True, types=bool,\n desc='Whether to print problem information')\n declare('print_solver', True, types=bool,\n desc='Whether to print solver information')\n\n self._initialize()\n self.options.update(kwargs)\n self.training_points = defaultdict(dict)\n self.printer = Printer()\n \n def set_training_values(self, xt, yt, name=None):\n \"\"\"\n Set training data (values).\n\n Parameters\n ----------\n xt : np.ndarray[nt, nx] or np.ndarray[nt]\n The input values for the nt training points.\n yt : np.ndarray[nt, ny] or np.ndarray[nt]\n The output values for the nt training points.\n name : str or None\n An optional label for the group of training points being set.\n This is only used in special situations (e.g., multi-fidelity applications).\n \"\"\"\n xt = check_2d_array(xt, 'xt')\n yt = check_2d_array(yt, 'yt')\n\n if xt.shape[0] != yt.shape[0]:\n raise ValueError('the first dimension of xt and yt must have the same length')\n\n self.nt = xt.shape[0]\n self.nx = xt.shape[1]\n self.ny = yt.shape[1]\n kx = 0\n self.training_points[name][kx] = [np.array(xt), np.array(yt)]\n\n def update_training_values(self, yt, name=None):\n \"\"\"\n Update the training data (values) at the previously set input values.\n\n Parameters\n ----------\n yt : np.ndarray[nt, ny] or np.ndarray[nt]\n The output values for the nt training points.\n name : str or None\n An optional label for the group of training points being set.\n This is only used in special situations (e.g., multi-fidelity applications).\n \"\"\"\n yt = check_2d_array(yt, 'yt')\n\n kx = 0\n\n if kx not in self.training_points[name]:\n raise ValueError(\n 'The training points must be set first with set_training_values ' +\n 'before calling update_training_values.')\n\n xt = self.training_points[name][kx][0]\n if xt.shape[0] != yt.shape[0]:\n raise ValueError(\n 'The number of training points does not agree with the earlier call of ' +\n 'set_training_values.')\n\n self.training_points[name][kx][1] = np.array(yt)\n\n def set_training_derivatives(self, xt, dyt_dxt, kx, name=None):\n \"\"\"\n Set training data (derivatives).\n\n Parameters\n ----------\n xt : np.ndarray[nt, nx] or np.ndarray[nt]\n The input values for the nt training points.\n dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]\n The derivatives values for the nt training points.\n kx : int\n 0-based index of the derivatives being set.\n name : str or None\n An optional label for the group of training points being set.\n This is only used in special situations (e.g., multi-fidelity applications).\n \"\"\"\n check_support(self, 'training_derivatives')\n\n xt = check_2d_array(xt, 'xt')\n dyt_dxt = check_2d_array(dyt_dxt, 'dyt_dxt')\n\n if xt.shape[0] != dyt_dxt.shape[0]:\n raise ValueError('the first dimension of xt and dyt_dxt must have the same length')\n\n if not isinstance(kx, int):\n raise ValueError('kx must be an int')\n\n self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]\n\n def update_training_derivatives(self, dyt_dxt, kx, name=None):\n \"\"\"\n Update the training data (values) at the previously set input values.\n\n Parameters\n ----------\n dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]\n The derivatives values for the nt training points.\n kx : int\n 0-based index of the derivatives being set.\n name : str or None\n An optional label for the group of training points being set.\n This is only used in special situations (e.g., multi-fidelity applications).\n \"\"\"\n check_support(self, 'training_derivatives')\n\n dyt_dxt = check_2d_array(dyt_dxt, 'dyt_dxt')\n\n if kx not in self.training_points[name]:\n raise ValueError(\n 'The training points must be set first with set_training_values ' +\n 'before calling update_training_values.')\n\n xt = self.training_points[name][kx][0]\n if xt.shape[0] != dyt_dxt.shape[0]:\n raise ValueError(\n 'The number of training points does not agree with the earlier call of ' +\n 'set_training_values.')\n\n self.training_points[name][kx + 1][1] = np.array(dyt_dxt)\n\n def train(self):\n \"\"\"\n Train the model\n \"\"\"\n n_exact = self.training_points[None][0][0].shape[0]\n\n self.printer.active = self.options['print_global']\n self.printer._line_break()\n self.printer._center(self.name)\n\n self.printer.active = self.options['print_global'] and self.options['print_problem']\n self.printer._title('Problem size')\n self.printer(' %-25s : %i' % ('# training points.', n_exact))\n self.printer()\n\n self.printer.active = self.options['print_global'] and self.options['print_training']\n if self.name == 'MixExp':\n # Mixture of experts model\n self.printer._title('Training of the Mixture of experts')\n else:\n self.printer._title('Training')\n\n #Train the model using the specified model-method\n with self.printer._timed_context('Training', 'training'):\n self._train()\n\n def predict_values(self, x):\n \"\"\"\n Predict the output values at a set of points.\n\n Parameters\n ----------\n x : np.ndarray[n, nx] or np.ndarray[n]\n Input values for the prediction points.\n\n Returns\n -------\n y : np.ndarray[n, ny]\n Output values at the prediction points.\n \"\"\"\n x = check_2d_array(x, 'x')\n check_nx(self.nx, x)\n n = x.shape[0]\n self.printer.active = self.options['print_global'] and self.options['print_prediction']\n\n if self.name == 'MixExp':\n # Mixture of experts model\n self.printer._title('Evaluation of the Mixture of experts')\n else:\n self.printer._title('Evaluation')\n self.printer(' %-12s : %i' % ('# eval points.', n))\n self.printer()\n\n #Evaluate the unknown points using the specified model-method\n with self.printer._timed_context('Predicting', key='prediction'):\n y = self._predict_values(x)\n\n time_pt = self.printer._time('prediction')[-1] / n\n self.printer()\n self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)\n self.printer()\n return y.reshape((n, self.ny))\n\n def predict_derivatives(self, x, kx):\n \"\"\"\n Predict the dy_dx derivatives at a set of points.\n\n Parameters\n ----------\n x : np.ndarray[n, nx] or np.ndarray[n]\n Input values for the prediction points.\n kx : int\n The 0-based index of the input variable with respect to which derivatives are desired.\n\n Returns\n -------\n dy_dx : np.ndarray[n, ny]\n Derivatives.\n \"\"\"\n check_support(self, 'derivatives')\n x = check_2d_array(x, 'x')\n check_nx(self.nx, x)\n n = x.shape[0]\n self.printer.active = self.options['print_global'] and self.options['print_prediction']\n\n if self.name == 'MixExp':\n # Mixture of experts model\n self.printer._title('Evaluation of the Mixture of experts')\n else:\n self.printer._title('Evaluation')\n self.printer(' %-12s : %i' % ('# eval points.', n))\n self.printer()\n\n #Evaluate the unknown points using the specified model-method\n with self.printer._timed_context('Predicting', key='prediction'):\n y = self._predict_derivatives(x, kx)\n\n time_pt = self.printer._time('prediction')[-1] / n\n self.printer()\n self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)\n self.printer()\n\n return y.reshape((n, self.ny))\n\n def predict_output_derivatives(self, x):\n \"\"\"\n Predict the derivatives dy_dyt at a set of points.\n\n Parameters\n ----------\n x : np.ndarray[n, nx] or np.ndarray[n]\n Input values for the prediction points.\n\n Returns\n -------\n dy_dyt : dict of np.ndarray[n, nt]\n Dictionary of output derivatives.\n Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.\n \"\"\"\n check_support(self, 'output_derivatives')\n check_nx(self.nx, x)\n\n dy_dyt = self._predict_output_derivatives(x)\n return dy_dyt\n\n def predict_variances(self, x):\n \"\"\"\n Predict the variances at a set of points.\n\n Parameters\n ----------\n x : np.ndarray[n, nx] or np.ndarray[n]\n Input values for the prediction points.\n\n Returns\n -------\n s2 : np.ndarray[n, ny]\n Variances.\n \"\"\"\n check_support(self, 'variances')\n check_nx(self.nx, x)\n n = x.shape[0]\n s2 = self._predict_variances(x)\n return s2.reshape((n, self.ny))\n\n def _initialize(self):\n \"\"\"\n Implemented by surrogate models to declare options and declare what they support (optional).\n\n Examples\n --------\n self.options.declare('option_name', default_value, types=(bool, int), desc='description')\n self.supports['derivatives'] = True\n \"\"\"\n pass\n\n def _train(self):\n \"\"\"\n Implemented by surrogate models to perform training (optional, but typically implemented).\n \"\"\"\n pass\n\n def _predict_values(self, x):\n \"\"\"\n Implemented by surrogate models to predict the output values.\n\n Parameters\n ----------\n x : np.ndarray[n, nx]\n Input values for the prediction points.\n\n Returns\n -------\n y : np.ndarray[n, ny]\n Output values at the prediction points.\n \"\"\"\n raise Exception('This surrogate model is incorrectly implemented')\n\n def _predict_derivatives(self, x, kx):\n \"\"\"\n Implemented by surrogate models to predict the dy_dx derivatives (optional).\n\n If this method is implemented, the surrogate model should have\n\n ::\n self.supports['derivatives'] = True\n\n in the _initialize() implementation.\n\n Parameters\n ----------\n x : np.ndarray[n, nx]\n Input values for the prediction points.\n kx : int\n The 0-based index of the input variable with respect to which derivatives are desired.\n\n Returns\n -------\n dy_dx : np.ndarray[n, ny]\n Derivatives.\n \"\"\"\n check_support(self, 'derivatives', fail=True)\n\n def _predict_output_derivatives(self, x):\n \"\"\"\n Implemented by surrogate models to predict the dy_dyt derivatives (optional).\n\n If this method is implemented, the surrogate model should have\n\n ::\n self.supports['output_derivatives'] = True\n\n in the _initialize() implementation.\n\n Parameters\n ----------\n x : np.ndarray[n, nx]\n Input values for the prediction points.\n\n Returns\n -------\n dy_dyt : dict of np.ndarray[n, nt]\n Dictionary of output derivatives.\n Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.\n \"\"\"\n check_support(self, 'output_derivatives', fail=True)\n\n def _predict_variances(self, x):\n \"\"\"\n Implemented by surrogate models to predict the variances at a set of points (optional).\n\n If this method is implemented, the surrogate model should have\n\n ::\n self.supports['variances'] = True\n\n in the _initialize() implementation.\n\n Parameters\n ----------\n x : np.ndarray[n, nx]\n Input values for the prediction points.\n\n Returns\n -------\n s2 : np.ndarray[n, ny]\n Variances.\n \"\"\"\n check_support(self, 'variances', fail=True)\n", "'''\nAuthor: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>\n\nThis package is distributed under New BSD license.\n'''\n\nfrom __future__ import division\nimport warnings\nimport numpy as np\n\nfrom smt.surrogate_models.krg_based import KrgBased\nfrom smt.utils.kriging_utils import componentwise_distance_PLS\nfrom sklearn.cross_decomposition.pls_ import PLSRegression as pls\n\n\"\"\"\nThe KPLS class.\n\"\"\"\n\nclass KPLS(KrgBased):\n\n \"\"\"\n - KPLS\n \"\"\"\n def _initialize(self):\n super(KPLS, self)._initialize()\n declare = self.options.declare\n declare('n_comp', 1, types=int, desc='Number of principal components')\n declare('theta0', [1e-2], types=(list, np.ndarray), desc='Initial hyperparameters')\n self.name = 'KPLS'\n\n def _compute_pls(self,X,y):\n _pls = pls(self.options['n_comp'])\n self.coeff_pls = _pls.fit(X.copy(),y.copy()).x_rotations_\n\n return X,y\n\n def _componentwise_distance(self,dx,opt=0):\n d = componentwise_distance_PLS(dx,self.options['corr'].__name__,\n self.options['n_comp'],self.coeff_pls)\n return d\n" ]
[ [ "numpy.array" ], [ "sklearn.cross_decomposition.pls_.PLSRegression" ] ]
fjaragones/pyacq
[ "be4d7a485d3c851a6e8f28a1197ba6118ad082ea" ]
[ "pyacq/core/tests/test_tools.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2016, French National Center for Scientific Research (CNRS)\n# Distributed under the (new) BSD License. See LICENSE for more info.\n\nfrom pyacq.core import OutputStream, InputStream\nfrom pyacq.core.tools import ThreadPollInput, StreamConverter, ChannelSplitter\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph as pg\n\nimport numpy as np\nimport weakref\nimport time\n\nnb_channel = 16\nchunksize = 100\nsr = 20000.\n\nstream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', \n transfermode='plaindata', streamtype='analogsignal',\n dtype='float32', shape=(-1, nb_channel),\n nb_channel =nb_channel,\n compression ='', scale = None, offset = None, units = '')\n\n\nclass ThreadSender(QtCore.QThread):\n def __init__(self, output_stream, parent=None):\n QtCore.QThread.__init__(self, parent)\n self.output_stream = weakref.ref(output_stream)\n \n def run(self):\n index = 0\n for i in range(500):\n index += chunksize\n arr = np.random.rand(chunksize, nb_channel).astype(stream_spec['dtype'])\n self.output_stream().send(arr, index=index)\n time.sleep(chunksize/sr)\n self.finished.emit()\n\n\ndef test_ThreadPollInput():\n app = pg.mkQApp()\n \n outstream = OutputStream()\n outstream.configure(**stream_spec)\n instream = InputStream()\n instream.connect(outstream)\n \n sender = ThreadSender(output_stream=outstream)\n poller = ThreadPollInput(input_stream=instream, return_data=True)\n \n \n global last_pos\n last_pos= 0\n def on_new_data(pos, arr):\n assert arr.shape==(chunksize, nb_channel)\n global last_pos\n last_pos += chunksize\n assert last_pos==pos\n \n def terminate():\n sender.wait()\n poller.stop()\n poller.wait()\n app.quit()\n \n sender.finished.connect(terminate)\n poller.new_data.connect(on_new_data)\n \n poller.start()\n sender.start()\n \n app.exec_()\n\n\ndef test_streamconverter():\n app = pg.mkQApp()\n \n stream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', \n transfermode='plaindata', streamtype='analogsignal',\n dtype='float32', shape=(-1, nb_channel))\n \n outstream = OutputStream()\n outstream.configure(**stream_spec)\n sender = ThreadSender(output_stream=outstream)\n \n stream_spec2 = dict(protocol='tcp', interface='127.0.0.1', port='*', \n transfermode='sharedmem', streamtype='analogsignal',\n dtype='float32', shape=(-1, nb_channel), buffer_size=1000,\n double=True,\n )\n\n \n \n conv = StreamConverter()\n conv.configure()\n conv.input.connect(outstream)\n conv.output.configure(**stream_spec2)\n conv.initialize()\n\n instream = InputStream()\n instream.connect(conv.output)\n\n global last_pos\n last_pos= 0\n def on_new_data(pos, arr):\n assert arr is None\n global last_pos\n last_pos += chunksize\n assert last_pos==pos\n \n def terminate():\n sender.wait()\n #~ conv.stop()\n poller.stop()\n poller.wait()\n app.quit()\n\n poller = ThreadPollInput(input_stream=instream, return_data=None)\n sender.finished.connect(terminate)\n poller.new_data.connect(on_new_data)\n \n \n poller.start()\n conv.start()\n sender.start()\n \n \n app.exec_()\n\n\ndef test_stream_splitter():\n app = pg.mkQApp()\n \n outstream = OutputStream()\n outstream.configure(**stream_spec)\n sender = ThreadSender(output_stream=outstream)\n\n def on_new_data(pos, arr):\n assert arr.shape[0]==chunksize\n assert not arr.flags['C_CONTIGUOUS']\n \n all_instream = []\n all_poller = []\n splitter = ChannelSplitter()\n splitter.configure(output_channels = { 'out0' : [0,1,2], 'out1' : [1,4,9, 12] })\n splitter.input.connect(outstream)\n for name, output in splitter.outputs.items():\n output.configure()\n instream = InputStream()\n instream.connect(output)\n poller = ThreadPollInput(input_stream=instream, return_data=True)\n poller.new_data.connect(on_new_data)\n all_instream.append(instream)\n all_poller.append(poller)\n splitter.initialize()\n\n def terminate():\n sender.wait()\n splitter.stop()\n for poller in all_poller:\n poller.stop()\n poller.wait()\n app.quit()\n\n sender.finished.connect(terminate)\n \n for poller in all_poller:\n poller.start()\n \n splitter.start()\n sender.start()\n \n \n app.exec_()\n \n \n\nif __name__ == '__main__':\n test_ThreadPollInput()\n test_streamconverter()\n test_stream_splitter()\n" ]
[ [ "numpy.random.rand" ] ]
marcintustin/xnmt
[ "f315fc5e493d25746bbde46d2c89cea3410d43df" ]
[ "test/test_training.py" ]
[ "import unittest\n\nimport dynet as dy\nimport numpy as np\n\nfrom xnmt.translator import DefaultTranslator\nfrom xnmt.embedder import SimpleWordEmbedder\nfrom xnmt.lstm import LSTMSeqTransducer\nfrom xnmt.pyramidal import PyramidalLSTMSeqTransducer\nfrom xnmt.attender import MlpAttender, DotAttender\nfrom xnmt.decoder import MlpSoftmaxDecoder, CopyBridge\nfrom xnmt.training_corpus import BilingualTrainingCorpus\nfrom xnmt.input import BilingualCorpusParser, PlainTextReader\nfrom xnmt.batcher import mark_as_batch, Mask, SrcBatcher\nimport xnmt.xnmt_train\nfrom xnmt.options import Args\nfrom xnmt.vocab import Vocab\nfrom xnmt.model_context import ModelContext, PersistentParamCollection\nfrom xnmt.training_strategy import TrainingStrategy\nimport xnmt.events\n\nclass TestTruncatedBatchTraining(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n self.model_context = ModelContext()\n self.model_context.dynet_param_collection = PersistentParamCollection(\"some_file\", 1)\n self.training_corpus = BilingualTrainingCorpus(train_src = \"examples/data/head.ja\",\n train_trg = \"examples/data/head.en\",\n dev_src = \"examples/data/head.ja\",\n dev_trg = \"examples/data/head.en\")\n self.corpus_parser = BilingualCorpusParser(src_reader = PlainTextReader(),\n trg_reader = PlainTextReader())\n self.corpus_parser.read_training_corpus(self.training_corpus)\n\n def assert_single_loss_equals_batch_loss(self, model, batch_size=5):\n \"\"\"\n Tests whether single loss equals batch loss.\n Truncating src / trg sents to same length so no masking is necessary\n \"\"\"\n batch_size = 5\n src_sents = self.training_corpus.train_src_data[:batch_size]\n src_min = min([len(x) for x in src_sents])\n src_sents_trunc = [s[:src_min] for s in src_sents]\n for single_sent in src_sents_trunc: single_sent[src_min-1] = Vocab.ES\n trg_sents = self.training_corpus.train_trg_data[:batch_size]\n trg_min = min([len(x) for x in trg_sents])\n trg_sents_trunc = [s[:trg_min] for s in trg_sents]\n for single_sent in trg_sents_trunc: single_sent[trg_min-1] = Vocab.ES\n\n single_loss = 0.0\n for sent_id in range(batch_size):\n dy.renew_cg()\n train_loss = model.calc_loss(src=src_sents_trunc[sent_id],\n trg=trg_sents_trunc[sent_id]).value()\n single_loss += train_loss\n\n dy.renew_cg()\n\n batched_loss = model.calc_loss(src=mark_as_batch(src_sents_trunc),\n trg=mark_as_batch(trg_sents_trunc)).value()\n self.assertAlmostEqual(single_loss, sum(batched_loss), places=4)\n\n def test_loss_model1(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model2(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=PyramidalLSTMSeqTransducer(self.model_context, layers=3),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model3(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context, layers=3),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100, bridge=CopyBridge(self.model_context, dec_layers=1)),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model4(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context),\n attender=DotAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\nclass TestBatchTraining(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n self.model_context = ModelContext()\n self.model_context.dynet_param_collection = PersistentParamCollection(\"some_file\", 1)\n self.training_corpus = BilingualTrainingCorpus(train_src = \"examples/data/head.ja\",\n train_trg = \"examples/data/head.en\",\n dev_src = \"examples/data/head.ja\",\n dev_trg = \"examples/data/head.en\")\n self.corpus_parser = BilingualCorpusParser(src_reader = PlainTextReader(),\n trg_reader = PlainTextReader())\n self.corpus_parser.read_training_corpus(self.training_corpus)\n\n def assert_single_loss_equals_batch_loss(self, model, batch_size=5):\n \"\"\"\n Tests whether single loss equals batch loss.\n Here we don't truncate the target side and use masking.\n \"\"\"\n batch_size = 5\n src_sents = self.training_corpus.train_src_data[:batch_size]\n src_min = min([len(x) for x in src_sents])\n src_sents_trunc = [s[:src_min] for s in src_sents]\n for single_sent in src_sents_trunc: single_sent[src_min-1] = Vocab.ES\n trg_sents = self.training_corpus.train_trg_data[:batch_size]\n trg_max = max([len(x) for x in trg_sents])\n trg_masks = Mask(np.zeros([batch_size, trg_max]))\n for i in range(batch_size):\n for j in range(len(trg_sents[i]), trg_max):\n trg_masks.np_arr[i,j] = 1.0\n trg_sents_padded = [[w for w in s] + [Vocab.ES]*(trg_max-len(s)) for s in trg_sents]\n\n single_loss = 0.0\n for sent_id in range(batch_size):\n dy.renew_cg()\n train_loss = model.calc_loss(src=src_sents_trunc[sent_id],\n trg=trg_sents[sent_id]).value()\n single_loss += train_loss\n\n dy.renew_cg()\n\n batched_loss = model.calc_loss(src=mark_as_batch(src_sents_trunc),\n trg=mark_as_batch(trg_sents_padded, trg_masks)).value()\n self.assertAlmostEqual(single_loss, sum(batched_loss), places=4)\n\n def test_loss_model1(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model2(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=PyramidalLSTMSeqTransducer(self.model_context, layers=3),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model3(self):\n model = DefaultTranslator(\n src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context, layers=3),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100, bridge=CopyBridge(self.model_context, dec_layers=1)),\n )\n model.initialize_training_strategy(TrainingStrategy())\n model.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n\nclass TestTrainDevLoss(unittest.TestCase):\n \n def setUp(self):\n xnmt.events.clear()\n\n def test_train_dev_loss_equal(self):\n self.model_context = ModelContext()\n self.model_context.dynet_param_collection = PersistentParamCollection(\"some_file\", 1)\n task_options = xnmt.xnmt_train.options\n train_args = dict({opt.name: opt.default_value for opt in task_options if\n opt.default_value is not None or not opt.required})\n train_args['training_corpus'] = BilingualTrainingCorpus(train_src = \"examples/data/head.ja\",\n train_trg = \"examples/data/head.en\",\n dev_src = \"examples/data/head.ja\",\n dev_trg = \"examples/data/head.en\")\n train_args['corpus_parser'] = BilingualCorpusParser(src_reader = PlainTextReader(),\n trg_reader = PlainTextReader())\n train_args['training_strategy'] = TrainingStrategy()\n train_args['model'] = DefaultTranslator(src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n train_args['model_file'] = None\n train_args['save_num_checkpoints'] = 0\n train_args['batcher'] = SrcBatcher(batch_size=5, break_ties_randomly=False)\n xnmt_trainer = xnmt.xnmt_train.XnmtTrainer(args=Args(**train_args), need_deserialization=False, param_collection=self.model_context.dynet_param_collection)\n xnmt_trainer.model_context = self.model_context\n xnmt_trainer.run_epoch(update_weights=False)\n self.assertAlmostEqual(xnmt_trainer.logger.epoch_loss.loss_values['loss'] / xnmt_trainer.logger.epoch_words,\n xnmt_trainer.logger.dev_score.loss)\n\nclass TestOverfitting(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n\n def test_overfitting(self):\n self.model_context = ModelContext()\n self.model_context.dynet_param_collection = PersistentParamCollection(\"some_file\", 1)\n self.model_context.default_layer_dim = 16\n task_options = xnmt.xnmt_train.options\n train_args = dict({opt.name: opt.default_value for opt in task_options if\n opt.default_value is not None or not opt.required})\n train_args['training_corpus'] = BilingualTrainingCorpus(train_src = \"examples/data/head.ja\",\n train_trg = \"examples/data/head.en\",\n dev_src = \"examples/data/head.ja\",\n dev_trg = \"examples/data/head.en\")\n train_args['corpus_parser'] = BilingualCorpusParser(src_reader = PlainTextReader(),\n trg_reader = PlainTextReader())\n train_args['training_strategy'] = TrainingStrategy()\n train_args['model'] = DefaultTranslator(src_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n encoder=LSTMSeqTransducer(self.model_context),\n attender=MlpAttender(self.model_context),\n trg_embedder=SimpleWordEmbedder(self.model_context, vocab_size=100),\n decoder=MlpSoftmaxDecoder(self.model_context, vocab_size=100),\n )\n train_args['model_file'] = None\n train_args['save_num_checkpoints'] = 0\n train_args['trainer'] = \"adam\"\n train_args['learning_rate'] = 0.1\n train_args['batcher'] = SrcBatcher(batch_size=10, break_ties_randomly=False)\n xnmt_trainer = xnmt.xnmt_train.XnmtTrainer(args=Args(**train_args), need_deserialization=False, param_collection=self.model_context.dynet_param_collection)\n xnmt_trainer.model_context = self.model_context\n for _ in range(50):\n xnmt_trainer.run_epoch(update_weights=True)\n self.assertAlmostEqual(0.0,\n xnmt_trainer.logger.epoch_loss.loss_values['loss'] / xnmt_trainer.logger.epoch_words,\n places=2)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.zeros" ] ]
nnayar7/FEELVOS
[ "fc6387101127d1b94625b5ddd2be0d95b706d038" ]
[ "feelvos/models/Backbone.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass UNet(nn.Module):\n def __init__(self, n_ch, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_ch = n_ch\n self.n_classes = n_classes\n self.bilinear = bilinear\n\n self.inc = Double33Conv(n_ch, 64)\n \n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n self.down4 = Down(512, 512)\n self.up1 = Up(1024, 256, bilinear)\n self.up2 = Up(512, 128, bilinear)\n self.up3 = Up(256, 64, bilinear)\n self.up4 = Up(128, 64, bilinear)\n self.out = Out(64, n_classes)\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n y = self.out(x)\n return y\n \n\nclass Double33Conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.double33conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n \n def forward(self, x):\n return self.double33conv(x)\n \n\nclass Down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.down = nn.Sequential(\n nn.MaxPool2d(2),\n Double33Conv(in_ch, out_ch)\n )\n \n def forward(self, x):\n return self.down(x)\n \n\nclass Up(nn.Module):\n def __init__(self, in_ch, out_ch, bilinear=True):\n super().__init__()\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else: \n self.up = nn.ConvTranspose2d(in_ch//2, out_ch//2, kernel_size=2, stride=2)\n \n self.conv = Double33Conv(in_ch, out_ch)\n \n \n def forward(self, x1, x2):\n x1 = self.up(x1)\n\n dy = x2.size()[2]-x1.size()[2]\n dx = x2.size()[3]-x1.size()[3]\n \"\"\" Caution: Padding dimension\n N, C, H, W, dx=diffence of W-value\n pad=(w_left,w_right,h_top,h_bottom)\n \"\"\"\n x1 = F.pad(input=x1, pad=(dx//2, dx-dx//2, dy//2, dy-dy//2))\n # print('sizes',x1.size(),x2.size(),dx // 2, dx - dx//2, dy // 2, dy - dy//2)\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n \n\nclass Out(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.out = nn.Conv2d(in_ch, out_ch, kernel_size=1)\n \n def forward(self, x):\n return self.out(x)" ]
[ [ "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.pad" ] ]
qgking/CKDNet
[ "bc7273e0f3001e7f07241c842d7c49fff077fe5d" ]
[ "module/Critierion.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\n\n\ndef make_one_hot(labels, classes):\n one_hot = torch.cuda.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_()\n target = one_hot.scatter_(1, labels.data, 1)\n return target\n\n\nclass SoftDiceLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(SoftDiceLoss, self).__init__()\n\n def forward(self, probs, targets):\n num = targets.size(0)\n m1 = probs.view(num, -1)\n m2 = targets.view(num, -1)\n intersection = (m1 * m2)\n\n score = 2. * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1)\n score = 1 - score.sum() / num\n return max(score, 0)\n\n\n# V1\nclass DC_and_Focal_loss(nn.Module):\n def __init__(self, ):\n super(DC_and_Focal_loss, self).__init__()\n # self.flb = BinaryFocalLoss(alpha=None)\n self.flb = BinaryFocalLoss()\n self.dc = SoftDiceLoss()\n # self.gamma1 = Parameter(torch.ones(1))\n\n def forward(self, net_output, target, lambada=1.):\n dc_loss = self.dc(net_output, target)\n flb_loss = self.flb(net_output, target)\n # flb_loss_value = flb_loss.item()\n # dc_loss_value = dc_loss.item()\n # result = dc_loss_value * flb_loss + dc_loss * flb_loss_value\n result = lambada * flb_loss + dc_loss\n # result = self.gamma1 * flb_loss + dc_loss\n return result\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)\n\n def forward(self, output, target):\n logpt = self.CE_loss(output, target)\n pt = torch.exp(-logpt)\n loss = ((1 - pt) ** self.gamma) * logpt\n if self.size_average:\n return loss.mean()\n return loss.sum()\n\n\nclass BinaryFocalLoss(nn.Module):\n \"\"\"\n This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in\n 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'\n Focal_Loss= -1*alpha*(1-pt)*log(pt)\n :param num_class:\n :param alpha: (tensor) 3D or 4D the scalar factor for this criterion\n :param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more\n focus on hard misclassified example\n :param reduction: `none`|`mean`|`sum`\n :param **kwargs\n balance_index: (int) balance class index, should be specific when alpha is float\n \"\"\"\n\n def __init__(self, alpha=[1.0, 1.0], gamma=2, ignore_index=None, reduction='mean'):\n super(BinaryFocalLoss, self).__init__()\n if alpha is None:\n alpha = [0.25, 0.75]\n self.alpha = alpha\n self.gamma = gamma\n self.smooth = 1e-6\n self.ignore_index = ignore_index\n self.reduction = reduction\n\n assert self.reduction in ['none', 'mean', 'sum']\n\n if self.alpha is None:\n self.alpha = torch.ones(2)\n elif isinstance(self.alpha, (list, np.ndarray)):\n self.alpha = np.asarray(self.alpha)\n self.alpha = np.reshape(self.alpha, (2))\n assert self.alpha.shape[0] == 2, \\\n 'the `alpha` shape is not match the number of class'\n elif isinstance(self.alpha, (float, int)):\n self.alpha = np.asarray([self.alpha, 1.0 - self.alpha], dtype=np.float).view(2)\n\n else:\n raise TypeError('{} not supported'.format(type(self.alpha)))\n\n def forward(self, output, target):\n prob = output\n prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)\n\n pos_mask = (target == 1).float()\n neg_mask = (target == 0).float()\n\n pos_loss = -self.alpha[0] * torch.pow(torch.sub(1.0, prob), self.gamma) * torch.log(prob) * pos_mask\n neg_loss = -self.alpha[1] * torch.pow(prob, self.gamma) * \\\n torch.log(torch.sub(1.0, prob)) * neg_mask\n\n neg_loss = neg_loss.sum()\n pos_loss = pos_loss.sum()\n num_pos = pos_mask.view(pos_mask.size(0), -1).sum()\n num_neg = neg_mask.view(neg_mask.size(0), -1).sum()\n\n if num_pos == 0:\n loss = neg_loss\n else:\n loss = pos_loss / num_pos + neg_loss / num_neg\n return loss\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.ones", "numpy.asarray", "numpy.reshape", "torch.sub", "torch.exp", "torch.log", "torch.clamp", "torch.pow" ] ]
Justin-Tan/invariant_reps
[ "a6a647b03193e02af9f6a289a9443266564de15d" ]
[ "network.py" ]
[ "\"\"\" Network wiring \"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport glob, time, os\nimport functools\n\nfrom utils import Utils\n\nclass Network(object):\n\n @staticmethod\n def _spectral_norm(w):\n w_shape = w.shape.as_list()\n w = tf.reshape(w, [-1, w_shape[-1]])\n\n with tf.variable_scope(\"u\", reuse=tf.AUTO_REUSE):\n u = tf.get_variable(\n \"u\", [1, w_shape[-1]],\n dtype=w.dtype,\n initializer=tf.truncated_normal_initializer(),\n trainable=False)\n\n v_hat = tf.nn.l2_normalize(tf.matmul(u, tf.transpose(w)))\n u_hat = tf.nn.l2_normalize(tf.matmul(v_hat, w))\n\n with tf.control_dependencies([tf.assign(u, u_hat, name=\"update_u\")]):\n u_hat = tf.identity(u_hat)\n\n u_hat = tf.stop_gradient(u_hat)\n v_hat = tf.stop_gradient(v_hat)\n\n sigma_w = tf.squeeze(tf.matmul(v_hat, tf.matmul(w, tf.transpose(u_hat))))\n\n assign_u = tf.assign(u, u_hat)\n\n w_sn = tf.divide(w, sigma_w)\n w_sn = tf.reshape(w_sn, w_shape)\n\n return w_sn\n\n @staticmethod\n def dense_network(x, config, training, name='fully_connected', actv=tf.nn.relu, **kwargs):\n # Toy dense network for binary classification\n \n init = tf.contrib.layers.xavier_initializer()\n shape = [512,512,512,512,512]\n kwargs = {'center': True, 'scale': True, 'training': training, 'fused': True, 'renorm': True}\n # x = tf.reshape(x, [-1, num_features])\n # x = x[:,:-1]\n print('Input X shape', x.get_shape())\n\n with tf.variable_scope(name, initializer=init, reuse=tf.AUTO_REUSE) as scope:\n h0 = tf.layers.dense(x, units=shape[0], activation=actv)\n h0 = tf.layers.batch_normalization(h0, **kwargs)\n\n h1 = tf.layers.dense(h0, units=shape[1], activation=actv)\n h1 = tf.layers.batch_normalization(h1, **kwargs)\n\n h2 = tf.layers.dense(h1, units=shape[2], activation=actv)\n h2 = tf.layers.batch_normalization(h2, **kwargs)\n\n h3 = tf.layers.dense(h2, units=shape[3], activation=actv)\n h3 = tf.layers.batch_normalization(h3, **kwargs)\n\n h4 = tf.layers.dense(h3, units=shape[3], activation=actv)\n h4 = tf.layers.batch_normalization(h4, **kwargs)\n\n out = tf.layers.dense(h4, units=1, kernel_initializer=init)\n \n return out, h4\n\n @staticmethod\n def dense_network_ext(x, config, training, n_layers, n_classes, name='fcn', actv=tf.nn.relu, **kwargs):\n # Toy dense network for binary classification\n \n init = tf.contrib.layers.xavier_initializer()\n shape = [64 for _ in range(int(n_layers))]\n assert n_layers <= len(shape), 'Number of requested layers too high.'\n kwargs = {'center': True, 'scale': True, 'training': training, 'fused': True, 'renorm': True}\n print('Input X shape', x.get_shape())\n\n with tf.variable_scope(name, initializer=init, reuse=tf.AUTO_REUSE) as scope:\n h0 = tf.layers.dense(x, units=shape[0], activation=None)\n # h0 = tf.layers.batch_normalization(h0, **kwargs)\n h0 = tf.contrib.layers.layer_norm(h0, center=True, scale=True, activation_fn=actv)\n h = h0\n current_layer = 1\n\n while current_layer < n_layers:\n h = tf.layers.dense(h, units=shape[current_layer], activation=None)\n # h = tf.layers.batch_normalization(h, **kwargs)\n h = tf.contrib.layers.layer_norm(h, center=True, scale=True, activation_fn=actv)\n current_layer += 1\n\n out = tf.layers.dense(h, units=n_classes, kernel_initializer=init)\n\n return out, h\n\n @staticmethod\n def MINE(x, y, y_prime, training, batch_size, name='MINE', actv=tf.nn.elu, \n n_layers=2, dimension=2, labels=None, jensen_shannon=True, \n standardize=False, apply_sn=False, **kwargs):\n \"\"\"\n Mutual Information Neural Estimator\n (x,y): Drawn from joint p(x,y)\n y_prime: Drawn from marginal p(y)\n\n returns\n MI: Lower bound on mutual information between x,y\n \"\"\"\n\n init = tf.contrib.layers.xavier_initializer()\n drop_rate = 0.0\n shape = [64 for _ in range(int(n_layers))]\n assert n_layers <= len(shape), 'Number of requested layers too high.'\n kwargs = {'center': True, 'scale': True, 'training': training, 'fused': True, 'renorm': False}\n # y_prime = tf.random_shuffle(y)\n\n # Standardize inputs\n x_mu, x_sigma = tf.nn.moments(x, axes=0)\n y_mu, y_sigma = tf.nn.moments(y, axes=0)\n y_prime_mu, y_prime_sigma = tf.nn.moments(y_prime, axes=0)\n\n if standardize:\n x = (x - x_mu) / x_sigma\n y = (y - y_mu) / y_sigma\n y_prime = (y_prime - y_prime_mu) / y_prime_sigma\n\n if dimension == 2:\n y, y_prime = tf.expand_dims(y, axis=1), tf.expand_dims(y_prime, axis=1)\n if len(x.get_shape().as_list()) < 2:\n x = tf.expand_dims(x, axis=1)\n\n z = tf.concat([x,y], axis=1)\n z_prime = tf.concat([x,y_prime], axis=1)\n z.set_shape([None, dimension])\n z_prime.set_shape([None, dimension])\n print('X SHAPE:', x.get_shape().as_list())\n print('Z SHAPE:', z.get_shape().as_list())\n print('Z PRIME SHAPE:', z_prime.get_shape().as_list())\n\n def statistic_network(t, name='MINE', apply_spectral_norm=False, reuse=False):\n\n if apply_spectral_norm is True:\n kernel_constraint = Network._spectral_norm\n print('Applying spectral norm')\n else:\n kernel_constraint = None\n\n with tf.variable_scope(name, initializer=init, reuse=reuse) as scope:\n\n h0 = tf.layers.dense(t, units=shape[0], activation=None, \n kernel_constraint=kernel_constraint)\n # h0 = tf.layers.batch_normalization(h0, **kwargs)\n h0 = tf.contrib.layers.layer_norm(h0, center=True, scale=True, activation_fn=actv)\n\n h = h0\n current_layer = 1\n\n while current_layer < n_layers:\n h = tf.layers.dense(h, units=shape[current_layer], activation=None,\n kernel_constraint=kernel_constraint)\n h = tf.contrib.layers.layer_norm(h, center=True, scale=True, activation_fn=actv)\n current_layer += 1\n\n out = tf.layers.dense(h, units=1, kernel_initializer=init)\n\n return out\n\n def log_sum_exp_trick(x, batch_size, axis=1):\n # Compute along batch dimension\n x = tf.squeeze(x)\n x_max = tf.reduce_max(x)\n # lse = x_max + tf.log(tf.reduce_mean(tf.exp(x-x_max)))\n lse = x_max + tf.log(tf.reduce_sum(tf.exp(x-x_max))) - tf.log(batch_size)\n return lse\n\n joint_f = statistic_network(z, apply_spectral_norm=apply_sn)\n marginal_f = statistic_network(z_prime, reuse=True, apply_spectral_norm=apply_sn)\n print('Joint shape', joint_f.shape)\n print('marginal shape', marginal_f.shape)\n\n # MI_lower_bound = tf.reduce_mean(joint_f) - tf.log(tf.reduce_mean(tf.exp(marginal_f)) + 1e-5)\n MI_lower_bound = tf.squeeze(tf.reduce_mean(joint_f)) - tf.squeeze(log_sum_exp_trick(marginal_f,\n tf.cast(batch_size, tf.float32)))\n\n # H(p,q) = - E_p[log q]\n joint_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=joint_f,\n labels=tf.ones_like(joint_f)))\n marginal_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=marginal_f,\n labels=tf.zeros_like(marginal_f)))\n\n JSD_lower_bound = -(marginal_loss + joint_loss) + tf.log(4.0)\n # JSD_lower_bound = tf.squeeze(tf.reduce_mean(-tf.nn.softplus(-tf.squeeze(joint_f)))) - tf.squeeze(tf.reduce_mean(tf.nn.softplus(tf.squeeze(marginal_f))))\n # GAN_lower_bound = tf.reduce_mean(tf.log(tf.nn.sigmoid(joint_f))) + tf.reduce_mean(tf.log(1.0-tf.nn.sigmoid(marginal_f)))\n\n if jensen_shannon:\n lower_bound = JSD_lower_bound\n else:\n lower_bound = MI_lower_bound\n\n return (z, z_prime), (joint_f, marginal_f), lower_bound\n\n\n @staticmethod\n def kernel_MMD(x, y, y_prime, batch_size, name='kernel_MMD', actv=tf.nn.elu, dimension=2, labels=None, bkg_only=True):\n \"\"\"\n Kernel MMD \n (x,y): Drawn from joint\n y_prime: Drawn from marginal\n\n returns\n mmd2: MMD distance between two distributions\n \"\"\"\n\n def gaussian_kernel_mmd2(X, Y, gamma):\n \"\"\"\n Parameters\n ____\n X: Matrix, shape: (n_samples, features)\n Y: Matrix, shape: (m_samples, features)\n\n Returns\n ____\n mmd: MMD under Gaussian kernel\n \"\"\"\n\n XX = tf.matmul(X, X, transpose_b=True)\n XY = tf.matmul(X, Y, transpose_b=True)\n YY = tf.matmul(Y, Y, transpose_b=True)\n\n M, N = tf.cast(XX.get_shape()[0], tf.float32), tf.cast(YY.get_shape()[0], tf.float32)\n\n X_sqnorm = tf.reduce_sum(X**2, axis=-1)\n Y_sqnorm = tf.reduce_sum(Y**2, axis=-1)\n\n row_bc = lambda x: tf.expand_dims(x,0)\n col_bc = lambda x: tf.expand_dims(x,1)\n\n K_XX = tf.exp( -gamma * (col_bc(X_sqnorm) - 2 * XX + row_bc(X_sqnorm)))\n K_XY = tf.exp( -gamma * (col_bc(X_sqnorm) - 2 * XY + row_bc(Y_sqnorm)))\n K_YY = tf.exp( -gamma * (col_bc(Y_sqnorm) - 2 * YY + row_bc(Y_sqnorm)))\n\n mmd2 = tf.reduce_sum(K_XX) / M**2 - 2 * tf.reduce_sum(K_XY) / (M*N) + tf.reduce_sum(K_YY) / N**2\n\n return mmd2\n\n def rbf_mixed_mmd2(X, Y, M, N, sigmas=[1.0, 2.0, 5.0, 10.0, 20.0, 40.0, 80.0]):\n \"\"\"\n Parameters\n ____\n X: Matrix, shape: (n_samples, features)\n Y: Matrix, shape: (m_samples, features)\n sigmas: RBF parameter\n\n Returns\n ____\n mmd2: MMD under Gaussian mixed kernel\n \"\"\"\n\n XX = tf.matmul(X, X, transpose_b=True)\n XY = tf.matmul(X, Y, transpose_b=True)\n YY = tf.matmul(Y, Y, transpose_b=True)\n\n X_sqnorm = tf.reduce_sum(X**2, axis=-1)\n Y_sqnorm = tf.reduce_sum(Y**2, axis=-1)\n\n row_bc = lambda x: tf.expand_dims(x,0)\n col_bc = lambda x: tf.expand_dims(x,1)\n\n K_XX, K_XY, K_YY = 0,0,0\n\n for sigma in sigmas:\n gamma = 1 / (2 * sigma**2)\n K_XX += tf.exp( -gamma * (col_bc(X_sqnorm) - 2 * XX + row_bc(X_sqnorm)))\n K_XY += tf.exp( -gamma * (col_bc(X_sqnorm) - 2 * XY + row_bc(Y_sqnorm)))\n K_YY += tf.exp( -gamma * (col_bc(Y_sqnorm) - 2 * YY + row_bc(Y_sqnorm)))\n\n mmd2 = tf.reduce_sum(K_XX) / M**2 - 2 * tf.reduce_sum(K_XY) / (M*N) + tf.reduce_sum(K_YY) / N**2\n\n return mmd2\n\n init = tf.contrib.layers.xavier_initializer()\n\n if bkg_only:\n batch_size_bkg_only = tf.cast(batch_size - tf.reduce_sum(labels), tf.float32)\n\n if dimension == 2:\n y, y_prime = tf.expand_dims(y, axis=1), tf.expand_dims(y_prime, axis=1)\n if len(x.get_shape().as_list()) < 2:\n x = tf.expand_dims(x, axis=1)\n\n z = tf.concat([x,y], axis=1)\n z_prime = tf.concat([x,y_prime], axis=1)\n z.set_shape([None, dimension])\n z_prime.set_shape([None, dimension])\n print('X SHAPE:', x.get_shape().as_list())\n print('Z SHAPE:', z.get_shape().as_list())\n print('Z PRIME SHAPE:', z_prime.get_shape().as_list())\n\n mmd2 = tf.nn.relu(rbf_mixed_mmd2(z, z_prime, M=batch_size_bkg_only, N=batch_size))\n\n return z, z_prime, tf.sqrt(mmd2)\n" ]
[ [ "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.layers.batch_normalization", "tensorflow.nn.moments", "tensorflow.layers.dense", "tensorflow.squeeze", "tensorflow.divide", "tensorflow.stop_gradient", "tensorflow.truncated_normal_initializer", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.matmul", "tensorflow.identity", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.reduce_max", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.ones_like", "tensorflow.contrib.layers.layer_norm", "tensorflow.log", "tensorflow.variable_scope", "tensorflow.sqrt" ] ]
rick1270/DS-Unit-3-Sprint-1-Software-Engineering
[ "3e0b0b09ce365452a50980d18d5479deeb19e6bc" ]
[ "acme.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nfrom numpy import random\n\nclass Product(object):\n def __init__(self, name, price=10, weight=20, flammability=0.5, identifier=random.randint(1000000,999999999)):\n self.name = name\n self.price = price\n self.weight = weight\n self.flammability = flammability \n\n def stealability(self):\n st = self.price/self.weight\n if st < .5:\n return \"Not so stealable...\"\n elif st >= .5 and st <1:\n return \"Kinda stealable.\"\n else:\n return \"Very stealable!\"\n\n def explode(self):\n ex = self.flammability * self.weight\n if ex < 10:\n return \"fizzle...\"\n elif ex >= 10 and ex <50:\n return \"...boom!\"\n else:\n return \"...BABOOM!!\"\nclass BoxingGlove(Product):\n def __init__(self, name, price=10, weight=10, flammability=0.5, identifier=random.randint(1000000,999999999)):\n self.name = name\n self.price = price\n self.weight = weight\n self.flammability = flammability \n\n def stealability(self):\n st = self.price/self.weight\n if st < .5:\n return \"Not so stealable...\"\n elif st >= .5 and st <1:\n return \"Kinda stealable.\"\n else:\n return \"Very stealable!\"\n\n def explode(self):\n return \"...it's a glove.\"\n \n def punch(self):\n pu = self.weight\n if pu < 5:\n return \"That tickles.\"\n elif pu >= 5 and pu <15:\n return \"Hey that hurt!\"\n else:\n return \"OUCH!\"\n\n" ]
[ [ "numpy.random.randint" ] ]
danielmlow/vfp
[ "f43d170f25fa413c60b4d1930d658cb38cdd3265" ]
[ "analyze_results.py" ]
[ "\n\nimport os\nimport pickle\nimport json\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef performance_table(results, permute_order, output_dir, score_i = 0,score_name='roc_auc_score', round = 2):\n\t# score = {N}, choose which metric to compute, if run has 30 splits with 2 score metrics, scores_data.shape = (30,2)\n\t# Performance df\n\n\tdf_null = []\n\tdf_all = []\n\tcolumns = []\n\tscores_data_all = []\n\tscores_data_median_all = [ ]\n\tscores_null_median_all = [ ]\n\tscores_data_ci_all = []\n\n\t# Loop through models\n\tfor i, permute in enumerate(permute_order):\n\t\tif permute:\n\t\t\tmodel_null = results[i]\n\t\t\tscores_null = model_null[1].output.score\n\t\t\tscores_null_median = np.median(scores_null)\n\t\t\tscores_null_median_all.append(scores_null_median)\n\t\telse:\n\t\t\tmodel = results[i]\n\t\t\tmodel_name = list(model[0].values())[0][1]\n\t\t\tcolumns.append(model_name)\n\t\t\tscores_data = np.array(model[1].output.score)[:,score_i] #which metric\n\t\t\tscores_data_all.append(scores_data)\n\t\t\tscores_data_median = np.median(scores_data)\n\t\t\tscores_data_median_all.append(scores_data_median)\n\t\t\tci = [np.percentile(scores_data,2.5),np.percentile(scores_data,97.5)] # 95% CI\n\t\t\tscores_data_ci_all.append(ci)\n\n\n\n\t\t# Save median score with median null score in parenthesis as strings\n\tif False in permute_order:\n\t\tfor data, null, ci in zip(scores_data_median_all, scores_null_median_all, scores_data_ci_all ):\n\t\t\tdata = format(np.round(data,round),'.2f')\n\t\t\tnull = format(np.round(null,round),'.2f')\n\t\t\tci_lower = format(np.round(ci[0],round),'.2f')\n\t\t\tci_upper = format(np.round(ci[1], round), '.2f')\n\t\t\tdf_null.append(f'{data} ({ci_lower}–{ci_upper}; {null})')\n\t\tdf_null = pd.DataFrame(df_null).T\n\t\tdf_null.columns = columns\n\t\tcolumns.sort() # we put cols in alphabetical order to match the test and stats plot\n\t\tdf_null = df_null[columns]\n\t\tdf_null.to_csv(os.path.join(output_dir, f'test_performance_with_null_{score_name}.csv')) # Todo add timestep\n\t\tprint(df_null.values)\n\t\tprint('=====')\n\n\t# Save all results\n\tfor all_score in scores_data_all:\n\t\tdf_all.append(all_score)\n\n\tdf_all = pd.DataFrame(df_all).T\n\tdf_all.columns = columns\n\tcolumns.sort() # we put cols in alphabetical order to match the test and stats plot\n\tdf_all = df_all[columns]\n\tdf_all.to_csv(os.path.join(output_dir, f'test_performance_{score_name}.csv'))# Todo add timestep\n\tdf_median = df_all.median()\n\tdf_median.to_csv(os.path.join(output_dir, f'test_performance_median_{score_name}.csv')) # Todo add timestep\n\n\treturn df_median, df_all\n\n\ndef feature_importance_to_summary(results, permute_order, feature_names, output_dir):\n\t# output from sklearn pipeline.coef_, .coefs_ or .feature_importances_\n\tnew_output_dir = output_dir + 'feature_importance/'\n\ttry:\n\t\tos.mkdir(new_output_dir)\n\texcept:\n\t\tpass\n\tfor i, permute in enumerate(permute_order):\n\t\tif permute:\n\t\t\t# if this is the null distribution with permuted labels, it won't output feature importance\n\t\t\tcontinue\n\t\telse:\n\t\t\tmodel = results[i]\n\t\t\tmodel_name = list(model[0].values())[0][1]\n\t\t\tif 'MLP' in model_name:\n\t\t\t\t# it is computed but it will have N weights for each input feature. One could take the sum, but it is harder to interpret.\n\t\t\t\tcontinue\n\n\t\t\tfeature_importance = model[1].output.feature_importance\n\t\t\tcolumns = ['split_'+str(n) for n in range(len(feature_importance))]\n\t\t\tdf = pd.DataFrame(feature_importance, index= columns, columns = feature_names).T\n\t\t\tdf[\"mean\"] = df.mean(axis=1)\n\t\t\tdf[\"std\"] = df.std(axis=1)\n\t\t\tdf[\"min\"] = df.min(axis=1)\n\t\t\tdf[\"max\"] = df.max(axis=1)\n\t\t\tdf_sorted = df.sort_values(\"mean\")[::-1]\n\t\t\tdf_sorted.to_csv(f\"{new_output_dir}feature_importance_{model_name}.csv\")\n\treturn\n\n\ndef permutation_importance_to_summary(results, permute_order, feature_names,output_dir):\n\t# output from sklearn.inspection.permutation_importance()\n\tnew_output_dir = output_dir + 'permutation_importance/'\n\ttry: os.mkdir(new_output_dir )\n\texcept: pass\n\tfor i, permute in enumerate(permute_order):\n\t\tif permute:\n\t\t\t# if this is the null distribution with permuted labels, it won't output feature importance\n\t\t\tcontinue\n\t\telse:\n\t\t\tmodel = results[i]\n\t\t\tmodel_name = list(model[0].values())[0][1]\n\t\t\tpermutation_importance = model[1].output.permutation_importance\n\t\t\tcolumns = ['split_'+str(n) for n in range(len(permutation_importance))]\n\t\t\tdf = pd.DataFrame(permutation_importance, index= columns, columns = feature_names).T\n\t\t\tdf[\"mean\"] = df.mean(axis=1)\n\t\t\tdf[\"std\"] = df.std(axis=1)\n\t\t\tdf[\"min\"] = df.min(axis=1)\n\t\t\tdf[\"max\"] = df.max(axis=1)\n\t\t\tdf_sorted = df.sort_values(\"mean\")[::-1]\n\t\t\tdf_sorted.to_csv(f\"{new_output_dir}permutation_importance_{model_name}.csv\")\n\treturn\n\n\n\n\ndef plot_summary(summary, output_dir=None, filename=\"shap_plot\", plot_top_n_shap=16):\n\tplt.clf()\n\tplt.figure(figsize=(8, 12))\n\t# plot without all bootstrapping values\n\tsummary = summary[[\"mean\", \"std\", \"min\", \"max\"]]\n\tnum_features = len(list(summary.index))\n\tif (plot_top_n_shap != 1 and type(plot_top_n_shap) == float) or type(\n\t\tplot_top_n_shap) == int:\n\t\t# if plot_top_n_shap != 1.0 but includes 1 (int)\n\t\tif plot_top_n_shap <= 0:\n\t\t\traise ValueError(\n\t\t\t\t\"plot_top_n_shap should be a float between 0 and 1.0 or an integer >= 1. You set to zero or negative.\"\n\t\t\t)\n\t\telif plot_top_n_shap < 1:\n\t\t\tplot_top_n_shap = int(np.round(plot_top_n_shap * num_features))\n\t\tsummary = summary.iloc[:plot_top_n_shap, :]\n\t\tfilename += f\"_top_{plot_top_n_shap}\"\n\t\t# todo remove\n\t\tfilename = filename.replace('_values', '')\n\n\n\thm = sns.heatmap(\n\t\tsummary.round(3), annot=True, xticklabels=True, yticklabels=True, cbar=False, square=True, annot_kws={\"size\": 10}\n\t)\n\thm.set_xticklabels(summary.columns, rotation=45)\n\thm.set_yticklabels(summary.index, rotation=0)\n\tplt.ylabel(\"Features\")\n\t# plt.savefig(output_dir + f\"summary_{filename}.png\", dpi=100, bbox_inches='tight')\n\tplt.savefig(output_dir + f\"{filename.replace('.csv', '')}.png\", dpi=100, bbox_inches='tight')\n\tplt.show(block=False)\n\n\n# # Redo plots\n# # =============================================\n# # Obtain results pkl\n# input_dir = './../vfp_v6_collinearity/'\n# results_dir = 'out-voto_spec.json-20200822T104154.938604/'\n# json_file = 'northwestern_spec_text_liwc_extremes.json' #'northwestern_spec_text_liwc.json'\n#\n# # Redo plots\n#\n# input_dir = input_dir+dirs[0]+'/shap-20200822T104159.176859/'\n# output_dir = input_dir\n#\n# for file in os.listdir(input_dir):#todo pasar nuevo report a pydra cluster\n# \tif file.endswith('.csv'):\n# \t\tsummary = pd.read_csv(input_dir+file, index_col=0)\n# \t\tplot_summary(summary, output_dir=output_dir, filename=file, plot_top_n_shap=16)\n# =============================================\n\n\n\n\nif __name__ == \"__main__\":\n\n\n\tinput_dir = './data/output/vfp_v8_wo_top5/'\n\n\tmodels = 4\n\tpermute_order = [False, True]\n\tpermute_order = permute_order * models\n\n\n\tdirs = [n for n in os.listdir(input_dir+'outputs/') if 'out-vfp' in n]\n\tdirs.sort()\n\n\n\t'''\n\timport glob\n\tdirs = glob.glob(input_dir + '*/out-vfp*', recursive=True)\n\tdirs.sort()\n\tdirs = [n.replace(input_dir+'outputs/','') for n in dirs]\n\t'''\n\n\tfor results_dir in dirs:\n\t\tjson_file = f\"specs/{results_dir.split('json')[0]+'json'}\".replace('out-vfp', 'vfp') #'northwestern_spec_text_liwc_extremes.json' #'northwestern_spec_text_liwc.json'\n\t\tresults_dir = f'outputs/{results_dir}/' # results_dir = 'outputs/out-vfp_spec_4models_both_if_3-19_explanations.json-20200910T072101.085324/'\n\t\twith open(input_dir+json_file, 'r') as f:\n\t\t\tspec_file = json.load(f)\n\n\t\tfeature_names = spec_file['x_indices']\n\t\tscore_names = [\"roc_auc_score\"] #[\"f1_score\", \"roc_auc_score\"] #todo obtain from json\n\n\t\t# for results_dir in dirs:\n\t\tfiles = os.listdir(input_dir+results_dir)\n\t\tresults_pkl = [n for n in files if 'results' in n][0]\n\t\twith open(os.path.join(input_dir,results_dir, results_pkl), 'rb') as f:\n\t\t\tresults = pickle.load(f)\n\n\t\toutput_dir = input_dir + results_dir\n\t\tfor score_i, score_name in enumerate(score_names):\n\t\t\tprint(results_dir)\n\t\t\tperformance_table(results, permute_order, output_dir, score_i=score_i, score_name = score_names[score_i], round = 2)\n\t\t\t# feature_importance_to_summary(results, permute_order, feature_names, output_dir)\n\t\t\t# permutation_importance_to_summary(results, permute_order, feature_names, output_dir)\n\n\n\t# ====================================================\n\t# Obtain results pkl\n\tmodels = 4\n\n\t# other\n\tpermute_order = [False, True]\n\tscore_names = [\"roc_auc_score\"] #todo obtain from json\n\tpermute_order = permute_order * models\n\n\tinput_dir = './../vfp_v7_indfact/outputs/'\n\tspec_dir = './../vfp_v7_indfact/specs/'\n\tdata_types = ['both', 'speech', 'vowel']\n\tcollinearity_methods = ['if']\n\tthresholds_n = 9\n\tdirs = os.listdir(input_dir)\n\tdirs = [n for n in dirs if not n.startswith('.')]\n\tdirs.sort()\n\t# dirs = ['out-northwestern_spec_text_liwc_extremes.json-20200819T100712.772595']\n\t# \t\t\t\t# 'out-vfp_spec_4models_vowel.json-20200814T100938.800433',\n\t# \t\t\t\t# 'out-vfp_spec_4models_both.json-20200814T085556.295861']\n\n\n\n\n\t# this would have been done by pydraml\n\n\tfor collinearity_method in collinearity_methods :\n\t\tfor data_type in data_types:\n\t\t\tperformance_all = []\n\t\t\tperformance_median = []\n\n\t\t\tvars_count_by_thesh_id = []\n\t\t\tvars_by_thesh_id = []\n\t\t\t\t# if not 'out-' in results_dir or not data_type in results_dir or not '_'+collinearity_method in results_dir:\n\t\t\t\t# \tcontinue\n\t\t\t\t# data_type_file = results_dir.split('.json')[0].split('_')[-3]\n\t\t\t\t# collinearity_method_file = results_dir.split('.json')[0].split('_')[-2]\n\t\t\t\t#\n\t\t\t\t# # assert data_type ==data_type_file and collinearity_method == collinearity_method_file\n\t\t\t\t# job_array_id = results_dir.split('.json')[0].split('_')[-1]\n\n\n\t\t\tfor job_array_id in range(1,thresholds_n+1):\n\t\t\t\t# Load from spec file\n\n\t\t\t\twith open(spec_dir + f'vfp_spec_4models_{data_type}_{collinearity_method}_{job_array_id}.json', 'r') as f:\n\t\t\t\t\tspec_file = json.load(f)\n\n\t\t\t\tfeature_names = spec_file['x_indices']\n\n\n\t\t\t\tvars_count_by_thesh_id.append([job_array_id, len(feature_names)])\n\t\t\t\tvars_by_thesh_id.append([job_array_id, feature_names])\n\t\t\t\tmodels = len(spec_file['clf_info'])\n\t\t\t\tpermute_order = spec_file['permute'] # [False, True]\n\t\t\t\tpermute_order = permute_order * models\n\t\t\t\tscore_names = spec_file['metrics'] # [\"f1_score\", \"roc_auc_score\"]\n\n\t\t\t\t# Load results\n\t\t\t\tresults_dir = [n for n in os.listdir(input_dir) if (data_type in n and '_'+collinearity_method in n and '_'+str(job_array_id)+'.' in n)]\n\t\t\t\tif len(results_dir) == 1:\n\t\t\t\t\t# make sure it only found 1\n\t\t\t\t\tresults_dir = results_dir[0]\n\t\t\t\telse:\n\t\t\t\t\tprint('multiple')\n\t\t\t\t\tbreak\n\t\t\t\tfiles = os.listdir(input_dir+results_dir)\n\t\t\t\tresults_pkl = [n for n in files if 'results' in n][0]\n\t\t\t\twith open(os.path.join(input_dir,results_dir, results_pkl), 'rb') as f:\n\t\t\t\t\tresults = pickle.load(f)\n\n\t\t\t\tfeature_names_results = results[0][1].output.feature_names\n\t\t\t\tassert len(feature_names) == len(feature_names_results)\n\t\t\t\tprint(job_array_id, len(feature_names), len(feature_names_results))\n\n\t\t\t\t# create table of performance\n\t\t\t\toutput_dir = input_dir + results_dir + '/'\n\n\t\t\t\t# Obtain median\n\t\t\t\tfor score_i, score_name in enumerate(score_names):\n\t\t\t\t\tdf_median, df_all = performance_table(results, permute_order, output_dir, score_i=score_i, score_name = score_names[score_i], round = 2)\n\n\t\t\t\t\tdf_median['run_name'] = f'{data_type}_{collinearity_method}_{job_array_id}'\n\t\t\t\t\tdf_median['job_id'] = f'{job_array_id}'\n\n\t\t\t\t\tdf_all['run_name'] = [f'{data_type}_{collinearity_method}_{job_array_id}'] * df_all.shape[0]\n\t\t\t\t\tdf_all['job_id'] = [f'{job_array_id}'] * df_all.shape[0]\n\n\t\t\t\t\tperformance_median.append(df_median)\n\t\t\t\t\tperformance_all.append(df_all)\n\n\n\t\t\t\t# feature_importance_to_summary(results, permute_order, feature_names, output_dir)\n\t\t\t\t# permutation_importance_to_summary(results, permute_order, feature_names, output_dir)\n\n\n\n\t\t\tperformance_median_df = pd.concat(performance_median, axis=1).T\n\t\t\tperformance_median_df = performance_median_df.sort_index().round(2)\n\t\t\tperformance_median_df['vars_count'] = [n[1] for n in vars_count_by_thesh_id]\n\t\t\tperformance_median_df = performance_median_df.reset_index(drop=True)\n\t\t\tperformance_median_df.to_csv(input_dir+f'test_performance_median_{data_type}_{collinearity_method}.csv')\n\n\t\t\tperformance_all_df = pd.concat(performance_all, axis=0, ignore_index=True)\n\t\t\tperformance_all_df = performance_all_df.sort_index().round(2)\n\t\t\tperformance_all_df['vars_count'] = [n[1] for n in vars_count_by_thesh_id] * performance_all[0].shape[0]\n\t\t\tperformance_all_df = performance_all_df.reset_index(drop=True)\n\t\t\tperformance_all_df.to_csv(input_dir+f'test_performance_all_{data_type}_{collinearity_method}.csv')\n\n\n\n\n\n\n\t# Plot\n\t# =====\n\tboxpoints = 'outliers'\n\n\timport plotly.graph_objects as go\n\tfor collinearity_method in collinearity_methods:\n\t\tfor data_type in data_types:\n\t\t# for data_type in ['speech', 'vowel', 'both']:\n\t\t\tperformance_all_df = pd.read_csv(input_dir + f'test_performance_all_{data_type}_{collinearity_method}.csv', index_col = 0)\n\t\t\tx = performance_all_df.job_id.values\n\n\t\t\tfig = go.Figure()\n\n\t\t\tfig.add_trace(go.Box(\n\t\t\t y=performance_all_df.LogisticRegressionCV.values,\n\t\t\t x=x,\n\t\t\t name='Logistic Regression',\n\t\t\t\tboxpoints=boxpoints,\n\t\t\t # marker_color='#3D9970'\n\t\t\t))\n\t\t\tfig.add_trace(go.Box(\n\t\t\t y=performance_all_df.SGDClassifier.values,\n\t\t\t x=x,\n\t\t\t name='SGD',\n\t\t\t\tboxpoints=boxpoints,\n\t\t\t # marker_color='#FF4136'\n\t\t\t))\n\t\t\tfig.add_trace(go.Box(\n\t\t\t y=performance_all_df.MLPClassifier.values,\n\t\t\t x=x,\n\t\t\t name='MLP',\n\t\t\t\tboxpoints=boxpoints,\n\n\t\t\t # marker_color='blue'\n\t\t\t))\n\n\t\t\tfig.add_trace(go.Box(\n\t\t\t y=performance_all_df.RandomForestClassifier.values,\n\t\t\t x=x,\n\t\t\t name='Random Forest',\n\t\t\t\tboxpoints=boxpoints,\n\t\t\t\t# marker_color = '#FECB52'\n\t\t\t))\n\n\n\n\t\t\tfig.update_layout(\n\t\t\t\ttemplate='ggplot2',\n\t\t\t yaxis_title='ROC AUC score',\n\t\t\t\txaxis_title='Feature set size',\n\t\t\t\ttitle = f'{data_type} {collinearity_method}',\n\t\t\t\tboxmode='group', # group together boxes of the different traces for each value of x\n\t\t\t\tyaxis = dict(\n\n\t\t\t\t\trange=[0.3, 1],autorange=False\n\n\t\t\t\t),\n\n\t\t\t\txaxis = dict(\n\t\t\t\t\ttickvals = list(range(1,thresholds_n+1)),\n\t\t\t\t\tticktext = performance_all_df.vars_count[:thresholds_n].values),\n\n\t\t\t)\n\n\n\n\t\t\t# fig.show()\n\t\t\tfig.to_image(format=\"png\", engine=\"orca\")\n\t\t\tfig.write_image(input_dir+f'{data_type}_{collinearity_method}.png', scale=6)\n\n\n\t# Performance with all features\n\tinput_dir = './../vfp_v7_indfact/outputs/'\n\n\t# add performance to fig 5.\n\tinput_dir = './../../datum/vfp/vfp/data/output/vfp_v8_top1outof5/outputs/'\n\n\tdirs = os.listdir(input_dir)\n\tdirs.remove('.DS_Store')\n\tdirs.sort()\n\tfor d in dirs:\n\t\tdf = pd.read_csv(input_dir+d+'/test_performance_with_null_roc_auc_score.csv', index_col = 0).values[0][0]\n\t\tprint('=====')\n\t\tprint(d)\n\t\tprint(df.split('(')[0])\n\t\tprint('('+df.split('(')[1].split(';')[0]+')')\n\n" ]
[ [ "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.figure", "numpy.median", "pandas.DataFrame", "numpy.percentile", "numpy.round", "matplotlib.pyplot.clf", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
photoszzt/onnxruntime
[ "523db6ef443b0a41de1401004ae4b394dad3314c" ]
[ "orttraining/orttraining/python/training/ortmodule/_io.py" ]
[ "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nfrom collections import abc\nimport copy\nimport inspect\nimport torch\nimport warnings\nimport gc\n\nclass _OutputIdentityOp(torch.autograd.Function):\n '''Internal class used to prepend Identity ops in model's outputs\n\n This class is required to support ONNX models which passthrough [some of] the models's inputs\n directly to the graph output. This is an issue because ONNX Runtime cannot build proper\n gradient graph based on this pattern.\n\n Adding a direct Identity Op to the user model doesn't work as the ONNX exporter would optimize it away,\n resulting in the same issue.\n\n Therefore a custom Autograd function was introduced to add an Identity right before the output\n in a way the ONNX exporter will not optimize it away.\n\n Given the model below\n\n .. code-block:: python\n\n class PassthroughNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(PassthroughNet, self).__init__()\n self.fc1_1 = torch.nn.Linear(input_size, hidden_size)\n self.relu1 = torch.nn.ReLU()\n self.fc1_2 = torch.nn.Linear(hidden_size, num_classes)\n def forward(self, input1, passthrough_input):\n out1 = self.fc1_2(self.relu1(self.fc1_1(input1)))\n # use shape from passthrough_input\n out1 = out1.view(passthrough_input.size()[0], -1)\n return out1, passthrough_input\n\n We can see `passthrough_input` is part of both model input and output and the resulting\n ONNX subgraph would contain something like `output2 -> output2`.\n\n By prepending each model output to an :class:`_OutputIdentityOp` op, the resulting\n onnx subgraph for this example would be `passthrough_input -> Identity -> output2`.\n\n TODO: Remove once PyTorch 1.8.2 or newer is released\n '''\n @staticmethod\n def forward(ctx, input):\n return torch.nn.Identity()(input)\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output\n @staticmethod\n def symbolic(g, self):\n return g.op(\"Identity\", self)\n\nclass _PrimitiveType(object):\n _primitive_types = {int, bool, float}\n @staticmethod\n def is_primitive_type(value):\n return type(value) in _PrimitiveType._primitive_types\n\n @staticmethod\n def get_tensor(value, device):\n return torch.tensor(value, device=device)\n\n @staticmethod\n def get_primitive_dtype(value):\n # If `value` is a boolean, save the value of the boolean in dtype.\n # This way, if the value changes from one forward call to the next, the schema will mismatch,\n # and the model will be re-exported.\n return f\"{str(type(value))}_{value}\" if isinstance(value, bool) else str(type(value))\n\nclass _InputInfo(object):\n def __init__(self,\n names,\n shape,\n require_grad_names=None,\n dynamic_axes=None,\n schema=None,\n num_positionals=0,\n num_positionals_non_none=0,\n keyword_names=None):\n self.names = names\n self.shape = shape\n self.require_grad_names = require_grad_names if require_grad_names else []\n self.dynamic_axes = dynamic_axes if dynamic_axes else {}\n self.schema = schema if schema else []\n self.num_positionals = num_positionals\n self.num_positionals_non_none = num_positionals_non_none\n self.keyword_names = keyword_names\n\n def __repr__(self) -> str:\n return f'''_InputInfo class:\n \\tNames: {self.names}\n \\tShape: {self.shape}\n \\tRequire gradient: {self.require_grad_names}\n \\tDynamic axes: {self.dynamic_axes}\n \\tSchema: {self.schema}\n \\t#Positionals (total): {self.num_positionals}\n \\t#Positionals (non-None): {self.num_positionals_non_none}\n \\tKeyword names: {self.keyword_names}'''\n\n def flatten(self, args, kwargs, device):\n '''Flatten args and kwargs in a single tuple of tensors with strict ordering'''\n\n ret = [_PrimitiveType.get_tensor(arg, device) if _PrimitiveType.is_primitive_type(arg) else arg for arg in args]\n ret += [_PrimitiveType.get_tensor(kwargs[name], device) if _PrimitiveType.is_primitive_type(kwargs[name])\n else kwargs[name] for name in self.names if name in kwargs]\n\n return ret\n\n def unflatten(self, flat_args):\n '''Unflatten tuple of tensors into args and kwargs'''\n\n args = tuple(flat_args[:self.num_positionals])\n kwargs = {name: arg for name, arg in zip(self.names[self.num_positionals_non_none:], flat_args[self.num_positionals:]) \\\n if name in self.keyword_names}\n return args, kwargs\n\ndef _combine_input_buffers_initializers(params, onnx_input_names, input_info, buffer_names, inputs, kwargs, device):\n '''Creates forward `*inputs` list from user input and PyTorch initializers\n\n ONNX Runtime forward requires an ordered list of:\n * User input: computed from forward InferenceSession\n * Initializers: computed from original PyTorch model parameters\n '''\n\n # User inputs\n non_none_inputs = [inp for inp in inputs if inp is not None]\n buffer_names_dict = {buffer_name: inp for buffer_name, inp in buffer_names}\n result = []\n\n for input_idx, name in enumerate(onnx_input_names):\n inp = None\n if name in kwargs and kwargs[name] is not None:\n # Only use keywords coming from user that are expected by ONNX model\n inp = kwargs[name]\n\n if inp is None:\n try:\n # Only use positionals coming from user that are expected by ONNX model\n # if input_idx >= len(input_info.names), IndexError will be thrown\n if name != input_info.names[input_idx]:\n # When ONNX drops unused inputs, get correct index from user input\n # if name is not in input_info.names, ValueError will be thrown\n input_idx = input_info.names.index(name)\n inp = non_none_inputs[input_idx]\n except (IndexError, ValueError):\n # ONNX input name is not present in input_info.names.\n pass\n\n if inp is None:\n # Registered buffers are translated to user_input+initializer in ONNX\n try:\n inp = buffer_names_dict[name]\n except KeyError:\n # ONNX input name is not present in the registered buffer dict.\n pass\n\n if inp is not None:\n if _PrimitiveType.is_primitive_type(inp):\n inp = _PrimitiveType.get_tensor(inp, device)\n result.append(inp)\n else:\n raise RuntimeError(f'Input is present in ONNX graph but not provided: {name}.')\n\n # params is a list of all initializers known to the onnx graph\n result.extend(params)\n\n return result\n\n\ndef deepcopy_model_input(*inputs, **kwargs):\n def extract_tensor(value):\n if isinstance(value, torch.Tensor):\n if value.requires_grad:\n return value.data.requires_grad_()\n else:\n return value.data\n else:\n return value\n sample_inputs_copy = [extract_tensor(value) for value in inputs]\n sample_inputs_copy = copy.deepcopy(tuple(sample_inputs_copy))\n\n sample_kwargs_copy = {}\n for name, value in kwargs.items():\n sample_kwargs_copy[name] = extract_tensor(value)\n sample_kwargs_copy = copy.deepcopy(sample_kwargs_copy)\n\n return sample_inputs_copy, sample_kwargs_copy\n\n\nclass _TensorStub(object):\n '''Tensor stub class used to represent model's input or output'''\n\n def __init__(self, name=None, dtype=None, shape=None, shape_dims=None):\n self.name = name\n self.dtype = dtype\n self.shape = shape\n self.shape_dims = shape_dims\n\n def __repr__(self) -> str:\n result = '_TensorStub('\n if self.name is not None:\n result += f'name={self.name}'\n if self.dtype is not None:\n if result[-1] != '(':\n result += ', '\n result += f'dtype={self.dtype}'\n if self.shape is not None:\n if result[-1] != '(':\n result += ', '\n result += f'shape={self.shape}'\n if self.shape_dims is not None:\n if result[-1] != '(':\n result += ', '\n result += f'shape_dims={self.shape_dims}'\n result += ')'\n return result\n\n def __eq__(self, other):\n if not other:\n return False\n elif not isinstance(other, _TensorStub):\n raise NotImplemented('_TensorStub must only be compared to another _TensorStub instance!')\n elif self.name != other.name:\n return False\n elif self.dtype != other.dtype:\n return False\n elif self.shape != other.shape:\n return False\n elif self.shape_dims != other.shape_dims:\n return False\n return True\n\n\ndef unflatten_user_output(output_schema, outputs):\n \"\"\"Follows the schema to generate an output that is expected by the user\"\"\"\n\n def _replace_stub_with_tensor_value(user_output, outputs, output_idx):\n # Recursively traverse across user_output and replace all _TensorStub\n # with torch.Tensor values from outputs following output_idx\n\n if user_output is None:\n return None\n elif isinstance(user_output, _TensorStub):\n output_idx[0] += 1\n return outputs[output_idx[0]-1]\n\n if isinstance(user_output, abc.Sequence):\n sequence_type = type(user_output)\n user_output = list(user_output)\n for idx in range(len(user_output)):\n user_output[idx] = _replace_stub_with_tensor_value(user_output[idx], outputs, output_idx)\n try:\n # namedtuple can be created by passing the list sequence to method _make\n user_output = sequence_type._make(user_output)\n except AttributeError:\n # If attribute error encountered, create the sequence directly\n user_output = sequence_type(user_output)\n elif isinstance(user_output, abc.Mapping):\n for key in sorted(user_output):\n user_output[key] = _replace_stub_with_tensor_value(user_output[key], outputs, output_idx)\n else:\n raise TypeError(f'ORTModule does not support the following model output type {type(user_output)}.')\n\n return user_output\n\n # Replace every _TensorStub value in the schema with the torch.Tensor outputs calculated\n output_schema_copy = copy.deepcopy(output_schema)\n\n # It is expected that the outputs are ordered in the way defined in the exported onnx model\n # which is the order in which the output schema was saved.\n output_idx = [0]\n user_output = _replace_stub_with_tensor_value(output_schema_copy, outputs, output_idx)\n return user_output\n\n\ndef _extract_schema(data):\n \"\"\"Extract the data schema by replacing every torch.Tensor value with _TensorStub\"\"\"\n\n if data is None:\n return None\n elif _PrimitiveType.is_primitive_type(data):\n return _TensorStub(dtype=_PrimitiveType.get_primitive_dtype(data), shape_dims=0)\n # Depth first traversal to iterate over the data to replace every tensor with a stub\n elif isinstance(data, torch.Tensor):\n return _TensorStub(dtype=str(data.dtype), shape_dims=len(data.size()))\n\n if isinstance(data, abc.Sequence) and not isinstance(data, str):\n sequence_type = type(data)\n data = list(data)\n for idx in range(len(data)):\n data[idx] = _extract_schema(data[idx])\n try:\n # namedtuple can be created by passing the list sequence to method _make\n data = sequence_type._make(data)\n except AttributeError:\n # If attribute error encountered, create the sequence directly\n data = sequence_type(data)\n elif isinstance(data, abc.Mapping):\n for key in sorted(data):\n data[key] = _extract_schema(data[key])\n else:\n raise TypeError(f'ORTModule does not support the following model data type {type(data)}')\n return data\n\n\ndef _parse_outputs_and_extract_names_and_dynamic_axes(module_output):\n \"\"\"Parses through the module output and returns output names and dynamic axes\"\"\"\n\n def _populate_output_names_and_dynamic_axes(output, output_names, output_dynamic_axes, output_idx):\n # Depth first traversal to traverse through the entire output collecting output names and dynamic axes\n\n if output is None:\n return\n elif isinstance(output, torch.Tensor):\n # Naming the outputs with a hyphen ensures that there can be no input with the same\n # name, preventing collisions with other NodeArgs (for example an input to forward called output0)\n output_name = f'output-{output_idx[0]}'\n output_idx[0] += 1\n output_names.append(output_name)\n output_dynamic_axes[output_name] = {}\n for dim_idx in range(len(output.shape)):\n output_dynamic_axes[output_name].update({dim_idx: f'{output_name}_dim{dim_idx}'})\n return\n\n if isinstance(output, abc.Sequence):\n for value in output:\n _populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)\n elif isinstance(output, abc.Mapping):\n for _, value in sorted(output.items()):\n _populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)\n else:\n raise TypeError(f'ORTModule does not support the following model output type {type(output)}')\n\n output_names = []\n output_dynamic_axes = {}\n output_idx = [0]\n _populate_output_names_and_dynamic_axes(module_output, output_names, output_dynamic_axes, output_idx)\n\n return output_names, output_dynamic_axes\n\n\ndef _transform_output_to_flat_tuple(data):\n \"\"\"Converts the data to a flat tuple by iterating over the entire data structure\"\"\"\n\n def _flatten_data(data, flat_data):\n # Recursively traverse over the data and populate the flat_data with torch.Tensors\n\n if data is None:\n return\n elif isinstance(data, torch.Tensor):\n identity = _OutputIdentityOp.apply\n flat_data.append(identity(data))\n elif isinstance(data, abc.Sequence):\n for value in data:\n _flatten_data(value, flat_data)\n elif isinstance(data, abc.Mapping):\n for _, value in sorted(data.items()):\n _flatten_data(value, flat_data)\n else:\n raise TypeError(f'ORTModule does not support the following data type {type(data)}.')\n\n flat_data = []\n _flatten_data(data, flat_data)\n return tuple(flat_data)\n\n\nclass _FlattenedModule(torch.nn.Module):\n def __init__(self, original_module):\n super(_FlattenedModule, self).__init__()\n self._original_module = original_module\n\n # Before `forward` is called, _ort_module must be assigned\n # Updated input info is needed to expand args into *args, **kwargs\n self._input_info = None\n\n def forward(self, *args):\n new_args, new_kwargs = self._input_info.unflatten(args)\n return _transform_output_to_flat_tuple(self._original_module(*new_args, **new_kwargs))\n\n\ndef parse_inputs_for_onnx_export(all_input_parameters, onnx_graph, inputs, kwargs):\n\n def _add_dynamic_shape(name, input):\n dynamic_axes[name] = {}\n for dim_idx in range(len(input.shape)):\n dynamic_axes[name].update({dim_idx: f'{name}_dim{dim_idx}'})\n return dynamic_axes\n\n def _add_input(name, input, onnx_graph, onnx_graph_input_names):\n if input is None:\n # Drop all None inputs.\n return\n\n # InputInfo should contain all the names irrespective of whether they are\n # a part of the onnx graph or not.\n input_names.append(name)\n\n if (onnx_graph is None or name in onnx_graph_input_names) and isinstance(input, torch.Tensor):\n if input.requires_grad:\n input_names_require_grad.append(name)\n dynamic_axes.update(_add_dynamic_shape(name, input))\n input_shape.append(list(input.size()))\n\n # Ignore optional inputs explicitly specified as None\n # ONNX exporter may remove unused inputs\n onnx_graph_input_names = []\n if onnx_graph is not None:\n onnx_graph_input_names = {inp.name for inp in onnx_graph.graph.input}\n\n input_names = []\n dynamic_axes = {}\n input_names_require_grad = []\n input_shape = []\n var_positional_idx = 0\n\n for input_idx, input_parameter in enumerate(all_input_parameters):\n if input_parameter.kind == inspect.Parameter.VAR_POSITIONAL:\n # VAR_POSITIONAL parameter carries all *args parameters from original forward method\n\n for args_i in range(input_idx, len(inputs)):\n name = f'{input_parameter.name}_{var_positional_idx}'\n var_positional_idx += 1\n inp = inputs[args_i]\n _add_input(name, inp, onnx_graph, onnx_graph_input_names)\n elif input_parameter.kind == inspect.Parameter.POSITIONAL_ONLY or\\\n input_parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD or\\\n input_parameter.kind == inspect.Parameter.KEYWORD_ONLY:\n # All positional non-*args and non-**kwargs are processed here\n name = input_parameter.name\n inp = None\n input_idx += var_positional_idx\n if input_idx < len(inputs) and inputs[input_idx] is not None:\n inp = inputs[input_idx]\n elif name in kwargs and kwargs[name] is not None:\n inp = kwargs[name]\n _add_input(name, inp, onnx_graph, onnx_graph_input_names)\n elif input_parameter.kind == inspect.Parameter.VAR_KEYWORD:\n # **kwargs is always the last argument of forward()\n for name,inp in kwargs.items():\n if name not in input_names:\n _add_input(name, inp, onnx_graph, onnx_graph_input_names)\n\n # Shallow copy is ok as we need the data structure, not the content\n schema = _extract_schema({'args': copy.copy(inputs), 'kwargs': copy.copy(kwargs)})\n\n return _InputInfo(names=input_names,\n shape=input_shape,\n require_grad_names=input_names_require_grad,\n dynamic_axes=dynamic_axes,\n schema=schema,\n num_positionals=len(inputs),\n num_positionals_non_none=len([i for i in inputs if i is not None]),\n keyword_names=kwargs.keys())\n\n\ndef parse_outputs_for_onnx_export_and_extract_schema(module, inputs, kwargs):\n\n # Do an inference to grab outputs\n is_train_mode = module.training\n module.eval()\n output_names = None\n output_dynamic_axes = None\n is_deepcopy = False\n with torch.no_grad():\n # Deepcopy inputs, since input values may change after model run.\n sample_inputs_copy, sample_kwargs_copy = deepcopy_model_input(*inputs, **kwargs)\n try:\n # Deepcopy model, in case model is stateful and changes after model run.\n model_copy = copy.deepcopy(module)\n is_deepcopy = True\n except Exception:\n model_copy = module\n warnings.warn(\"This model cannot be deep copied (or pickled), \"\n \"which is a required step for stateful models to be properly exported to ONNX.\"\n \" Compute will continue, but unexpected results may occur!\")\n\n sample_outputs = model_copy(*sample_inputs_copy, **sample_kwargs_copy)\n\n # Parse the output and extract the output_names and output_dynamic_axes to be used for onnx export\n output_names, output_dynamic_axes = _parse_outputs_and_extract_names_and_dynamic_axes(sample_outputs)\n if is_train_mode:\n module.train()\n output_schema = _extract_schema(sample_outputs)\n if is_deepcopy:\n del model_copy\n gc.collect()\n # Return output names, output dynamic axes and output schema\n return output_names, output_dynamic_axes, output_schema\n" ]
[ [ "torch.nn.Identity", "torch.no_grad", "torch.tensor" ] ]
PhiladelphiaController/covid19-forecaster
[ "e2b4718b51c4b250db74269a73451f616db43bb5" ]
[ "covid19_forecaster/forecasters.py" ]
[ "import pandas as pd\n\n\ndef check_date_bounds(date, start_date, stop_date):\n \"\"\"Check the bounds of a specific date.\"\"\"\n\n # Check min date\n if date < pd.to_datetime(start_date):\n date_str = date.strftime(\"%Y-%m-%d\")\n raise ValueError(\n f\"Date {date_str} before min forecast date ('{start_date}')\"\n )\n\n # Check max date\n if date > pd.to_datetime(stop_date):\n date_str = date.strftime(\"%Y-%m-%d\")\n raise ValueError(\n f\"Date {date_str} after max forecast date ('{stop_date}')\"\n )\n\n\nclass DefaultForecaster:\n \"\"\"\n Default forecaster to make a prediction based on a\n decline from a baseline forecast.\n \"\"\"\n\n ASSUMPTIONS = None\n\n def get_forecast_value(self, date, baseline, scenario):\n \"\"\"\n For a given scenario (and optionally sector), return the revenue\n decline from the baseline forecast for the specific date.\n\n Parameters\n ----------\n date : pandas.Timestamp\n the date object for the month to forecast\n \"\"\"\n # Check inputs\n assert self.ASSUMPTIONS is not None\n if isinstance(self.ASSUMPTIONS, dict):\n assert scenario is not None\n\n # Check bounds of the date\n check_date_bounds(date, self.forecast_start, self.forecast_stop)\n\n # Get the scenario assumptions\n declines = self.ASSUMPTIONS[scenario]\n\n # Check length\n if len(self.forecast_dates) != len(declines):\n raise ValueError(\n f\"Size mismatch between forecast dates (length={len(self.forecast_dates)}) \"\n f\"and forecast declines (length={len(declines)})\"\n )\n\n # Get the matching index\n # Default behavior: find the PREVIOUS index value if no exact match.\n i = self.forecast_dates.get_loc(date, method=\"ffill\")\n\n # Retune 1 - decline\n return baseline * (1 - declines[i])\n\n\nclass NoBaselineForecasterBySector:\n \"\"\"\n Default forecaster to make a prediction based on a\n decline from a baseline forecast.\n \"\"\"\n\n ASSUMPTIONS = None\n\n def get_forecast_value(self, date, baseline, scenario):\n \"\"\"\n For a given scenario (and optionally sector), return the revenue\n decline from the baseline forecast for the specific date.\n\n Parameters\n ----------\n date : pandas.Timestamp\n the date object for the month to forecast\n \"\"\"\n # Check inputs\n assert self.ASSUMPTIONS is not None\n if isinstance(self.ASSUMPTIONS, dict):\n assert scenario is not None\n\n # Check bounds of the date\n check_date_bounds(date, self.forecast_start, self.forecast_stop)\n\n # Get the scenario assumptions\n values = self.ASSUMPTIONS[scenario]\n\n # Get the matching index\n # Default behavior: find the PREVIOUS index value if no exact match.\n i = self.forecast_dates.get_loc(date, method=\"ffill\")\n\n out = baseline.copy()\n for sector in out.index:\n\n sector_values = values[sector]\n if len(self.forecast_dates) != len(sector_values):\n raise ValueError(\n f\"Size mismatch between forecast dates (length={len(self.forecast_dates)}) \"\n f\"and forecast declines (length={len(sector_values)})\"\n )\n\n out.loc[sector] = sector_values[i]\n\n return out\n\n\nclass SectorForecaster:\n \"\"\"\n Default sector-based forecaster to make a prediction\n based on a decline from a baseline forecast.\n \"\"\"\n\n GROUPS = None\n ASSUMPTIONS = None\n\n def get_forecast_value(self, date, baseline, scenario):\n \"\"\"\n For a given scenario (and optionally sector), return the revenue\n decline from the baseline forecast for the specific date.\n\n Parameters\n ----------\n date : pandas.Timestamp\n the date object for the month to forecast\n \"\"\"\n # Check inputs\n assert self.ASSUMPTIONS is not None\n assert self.GROUPS is not None\n\n # Check bounds of the date\n check_date_bounds(date, self.forecast_start, self.forecast_stop)\n\n # Get the scenario assumptions\n declines = self.ASSUMPTIONS[scenario]\n\n # Get the matching index\n # Default behavior: find the PREVIOUS index value if no exact match.\n i = self.forecast_dates.get_loc(date, method=\"ffill\")\n\n out = baseline.copy()\n for sector in out.index:\n\n # Get the group label for this sector\n group = \"default\"\n for label in self.GROUPS:\n if sector in self.GROUPS[label]:\n group = label\n break\n\n sector_declines = declines[group]\n if len(self.forecast_dates) != len(sector_declines):\n raise ValueError(\n f\"Size mismatch between forecast dates (length={len(self.forecast_dates)}) \"\n f\"and forecast declines (length={len(sector_declines)})\"\n )\n\n # Multiply by 1 - decline\n out.loc[sector] *= 1 - sector_declines[i]\n\n return out\n" ]
[ [ "pandas.to_datetime" ] ]
Sillte/fairyimage
[ "1b84136984d392b187712333064da707e42bbb6e" ]
[ "src/fairyimage/captioner.py" ]
[ "from typing import Dict, List\nimport numpy as np\nfrom PIL import Image \n\nimport fairyimage as fi\nfrom fairyimage import AlignMode\n\nclass Captioner:\n \"\"\"Present images with words.\"\"\"\n\n def __init__(\n self,\n fontsize=None,\n backcolor=None,\n frame_width=None,\n frame_color=None,\n align=AlignMode(\"center\"),\n ):\n self.fontsize = fontsize # The fontsize of logo.\n self.backcolor = backcolor\n self.frame_width = None\n self.frame_color = None\n self.align = align\n\n def __call__(self, word_to_image: Dict[str, Image.Image]):\n # Firstly, I'd like to realize the most crude idea.\n\n # parameters which may require modification based on `word_to_image`.\n fontsize = self.to_fontsize(self.fontsize, word_to_image)\n\n word_to_logo = self.make_logos(word_to_image, fontsize)\n if self.frame_width:\n word_to_image = {word: fi.frame(image,\n color=self.frame_color,\n width=self.frame_width,\n inner=False) for word, image in word_to_image.items()}\n images = [\n fi.vstack((word_to_logo[word], word_to_image[word]), align=\"center\")\n for word in word_to_logo\n ]\n return fi.hstack(images, align=\"start\")\n\n def make_logos(self, word_to_image, fontsize) -> Dict[str, Image.Image]:\n word_to_logo = dict()\n for word, _ in word_to_image.items():\n logo = fi.make_logo(word, fontsize=fontsize, backcolor=self.backcolor)\n word_to_logo[word] = logo\n return word_to_logo\n\n def to_fontsize(self, fontsize, word_to_image):\n if isinstance(fontsize, (int, float)):\n return int(fontsize)\n if fontsize is None:\n sizes = [image.size for image in word_to_image.values()]\n heights = [size[1] for size in sizes]\n widths = [size[0] for size in sizes]\n return round(np.mean(heights + widths) * 0.15)\n raise ValueError(\"Specification of `fontsize` is invalid.\", fontsize)\n\n\ndef captionize(word_to_image: Dict[str, Image.Image], align=AlignMode(\"center\")):\n captioner = Captioner(align=align)\n return captioner(word_to_image)\n\n" ]
[ [ "numpy.mean" ] ]
tsbiosky/Imbalanced-Dataset-Project
[ "26037dc1edc51228c22372638a1187f5f0ae15e4" ]
[ "predictor.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport cv2\nimport torch\nfrom torchvision import transforms as T\nfrom torchvision.transforms import functional as F\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\nfrom maskrcnn_benchmark import layers as L\nfrom maskrcnn_benchmark.utils import cv2_util\n\nclass Resize(object):\n def __init__(self, min_size, max_size):\n self.min_size = min_size\n self.max_size = max_size\n\n # modified from torchvision to add support for max size\n def get_size(self, image_size):\n w, h = image_size\n size = self.min_size\n max_size = self.max_size\n if max_size is not None:\n min_original_size = float(min((w, h)))\n max_original_size = float(max((w, h)))\n if max_original_size / min_original_size * size > max_size:\n size = int(round(max_size * min_original_size / max_original_size))\n\n if (w <= h and w == size) or (h <= w and h == size):\n return (h, w)\n\n if w < h:\n ow = size\n oh = int(size * h / w)\n else:\n oh = size\n ow = int(size * w / h)\n\n return (oh, ow)\n\n def __call__(self, image):\n size = self.get_size(image.size)\n image = F.resize(image, size)\n return image\nclass COCODemo(object):\n # COCO categories for pretty print\n CATEGORIES = [\n \"__background\",\n \"person\",\n \"bicycle\",\n \"car\",\n \"motorcycle\",\n \"airplane\",\n \"bus\",\n \"train\",\n \"truck\",\n \"boat\",\n \"traffic light\",\n \"fire hydrant\",\n \"stop sign\",\n \"parking meter\",\n \"bench\",\n \"bird\",\n \"cat\",\n \"dog\",\n \"horse\",\n \"sheep\",\n \"cow\",\n \"elephant\",\n \"bear\",\n \"zebra\",\n \"giraffe\",\n \"backpack\",\n \"umbrella\",\n \"handbag\",\n \"tie\",\n \"suitcase\",\n \"frisbee\",\n \"skis\",\n \"snowboard\",\n \"sports ball\",\n \"kite\",\n \"baseball bat\",\n \"baseball glove\",\n \"skateboard\",\n \"surfboard\",\n \"tennis racket\",\n \"bottle\",\n \"wine glass\",\n \"cup\",\n \"fork\",\n \"knife\",\n \"spoon\",\n \"bowl\",\n \"banana\",\n \"apple\",\n \"sandwich\",\n \"orange\",\n \"broccoli\",\n \"carrot\",\n \"hot dog\",\n \"pizza\",\n \"donut\",\n \"cake\",\n \"chair\",\n \"couch\",\n \"potted plant\",\n \"bed\",\n \"dining table\",\n \"toilet\",\n \"tv\",\n \"laptop\",\n \"mouse\",\n \"remote\",\n \"keyboard\",\n \"cell phone\",\n \"microwave\",\n \"oven\",\n \"toaster\",\n \"sink\",\n \"refrigerator\",\n \"book\",\n \"clock\",\n \"vase\",\n \"scissors\",\n \"teddy bear\",\n \"hair drier\",\n \"toothbrush\",\n ]\n\n def __init__(\n self,\n cfg,\n confidence_threshold=0.7,\n show_mask_heatmaps=False,\n masks_per_dim=2,\n min_image_size=224,\n weight_loading = None\n ):\n self.cfg = cfg.clone()\n self.model = build_detection_model(cfg)\n self.model.eval()\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.model.to(self.device)\n self.min_image_size = min_image_size\n\n save_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n \n if weight_loading:\n print('Loading weight from {}.'.format(weight_loading))\n _ = checkpointer._load_model(torch.load(weight_loading))\n \n self.transforms = self.build_transform()\n\n mask_threshold = -1 if show_mask_heatmaps else 0.5\n self.masker = Masker(threshold=mask_threshold, padding=1)\n\n # used to make colors for each class\n self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n\n self.cpu_device = torch.device(\"cpu\")\n self.confidence_threshold = confidence_threshold\n self.show_mask_heatmaps = show_mask_heatmaps\n self.masks_per_dim = masks_per_dim\n\n def build_transform(self):\n \"\"\"\n Creates a basic transformation that was used to train the models\n \"\"\"\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n min_size = cfg.INPUT.MIN_SIZE_TEST\n max_size = cfg.INPUT.MAX_SIZE_TEST\n transform = T.Compose(\n [\n T.ToPILImage(),\n Resize(min_size, max_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform\n\n def run_on_opencv_image(self, image):\n \"\"\"\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n predictions = self.compute_prediction(image)\n top_predictions = self.select_top_predictions(predictions)\n\n result = image.copy()\n if self.show_mask_heatmaps:\n return self.create_mask_montage(result, top_predictions)\n result = self.overlay_boxes(result, top_predictions)\n if self.cfg.MODEL.MASK_ON:\n result = self.overlay_mask(result, top_predictions)\n if self.cfg.MODEL.KEYPOINT_ON:\n result = self.overlay_keypoints(result, top_predictions)\n result = self.overlay_class_names(result, top_predictions)\n\n return result,top_predictions\n\n def compute_prediction(self, original_image):\n \"\"\"\n Arguments:\n original_image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # apply pre-processing to image\n image = self.transforms(original_image)\n # convert to an ImageList, padded so that it is divisible by\n # cfg.DATALOADER.SIZE_DIVISIBILITY\n image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n image_list = image_list.to(self.device)\n # compute predictions\n with torch.no_grad():\n predictions = self.model(image_list)\n predictions = [o.to(self.cpu_device) for o in predictions]\n\n # always single image is passed at a time\n prediction = predictions[0]\n\n # reshape prediction (a BoxList) into the original image size\n height, width = original_image.shape[:-1]\n prediction = prediction.resize((width, height))\n\n if prediction.has_field(\"mask\"):\n # if we have masks, paste the masks in the right position\n # in the image, as defined by the bounding boxes\n masks = prediction.get_field(\"mask\")\n # always single image is passed at a time\n masks = self.masker([masks], [prediction])[0]\n prediction.add_field(\"mask\", masks)\n return prediction\n\n def select_top_predictions(self, predictions):\n \"\"\"\n Select only predictions which have a `score` > self.confidence_threshold,\n and returns the predictions in descending order of score\n\n Arguments:\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores`.\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]\n\n def compute_colors_for_labels(self, labels):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\n def overlay_boxes(self, image, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image\n\n def overlay_mask(self, image, predictions):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n masks = predictions.get_field(\"mask\").numpy()\n labels = predictions.get_field(\"labels\")\n \n colors = self.compute_colors_for_labels(labels).tolist()\n\n for mask, color in zip(masks, colors):\n thresh = mask[0, :, :, None].astype(np.uint8)\n contours, hierarchy = cv2_util.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, contours, -1, color, 3)\n\n composite = image\n\n return composite\n\n def overlay_keypoints(self, image, predictions):\n keypoints = predictions.get_field(\"keypoints\")\n kps = keypoints.keypoints\n scores = keypoints.get_field(\"logits\")\n kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n for region in kps:\n image = vis_keypoints(image, region.transpose((1, 0)))\n return image\n\n def create_mask_montage(self, image, predictions):\n \"\"\"\n Create a montage showing the probability heatmaps for each one one of the\n detected objects\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask`.\n \"\"\"\n masks = predictions.get_field(\"mask\")\n masks_per_dim = self.masks_per_dim\n masks = L.interpolate(\n masks.float(), scale_factor=1 / masks_per_dim\n ).byte()\n height, width = masks.shape[-2:]\n max_masks = masks_per_dim ** 2\n masks = masks[:max_masks]\n # handle case where we have less detections than max_masks\n if len(masks) < max_masks:\n masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n masks_padded[: len(masks)] = masks\n masks = masks_padded\n masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n result = torch.zeros(\n (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n )\n for y in range(masks_per_dim):\n start_y = y * height\n end_y = (y + 1) * height\n for x in range(masks_per_dim):\n start_x = x * width\n end_x = (x + 1) * width\n result[start_y:end_y, start_x:end_x] = masks[y, x]\n return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n\n def overlay_class_names(self, image, predictions):\n \"\"\"\n Adds detected class names and scores in the positions defined by the\n top-left corner of the predicted bounding box\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores` and `labels`.\n \"\"\"\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n\ndef vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n \"\"\"Visualizes keypoints (adapted from vis_one_image).\n kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n \"\"\"\n dataset_keypoints = PersonKeypoints.NAMES\n kp_lines = PersonKeypoints.CONNECTIONS\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw mid shoulder / mid hip first for better visualization.\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n nose_idx = dataset_keypoints.index('nose')\n if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n\n # Draw the keypoints.\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n p1 = kps[0, i1], kps[1, i1]\n p2 = kps[0, i2], kps[1, i2]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n" ]
[ [ "torch.zeros", "torch.load", "torch.cat", "matplotlib.pyplot.get_cmap", "torch.tensor", "numpy.copy", "torch.no_grad", "torch.nonzero", "torch.device" ] ]
Tarekbouamer/Azure_Kinect_ROS_Driver
[ "17442bbc0a1ed18e60dfad2a1f6040900aa30d70" ]
[ "scripts/readDepthPFM.py" ]
[ "import glob, os\nimport re\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport cv2\n\nimport argparse\n\n\nparser = argparse.ArgumentParser(description=\"Data Annotation \")\nparser.add_argument(\"data\", metavar=\"DIR\", type=str, help=\"Path to dataset\")\n\ndef readPFM(file):\n \"\"\" Read a pfm file \"\"\"\n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n header = str(bytes.decode(header, encoding='utf-8'))\n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n pattern = r'^(\\d+)\\s(\\d+)\\s$'\n temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))\n dim_match = re.match(pattern, temp_str)\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n temp_str += str(bytes.decode(file.readline(), encoding='utf-8'))\n dim_match = re.match(pattern, temp_str)\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header: width, height cannot be found')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n # DEY: I don't know why this was there.\n file.close()\n\n return data, scale\n\n\ndef main(args):\n print(\"python main function\")\n depth_pfm_list = glob.glob(args.data + \"*_Depth.pfm\")\n depth_registed_pfm_list = glob.glob(args.data + \"*_Depth_registed.pfm\")\n for file in depth_registed_pfm_list:\n print(file)\n img, scale = readPFM(file)\n\n print(img.shape)\n img = Image.fromarray(img, 'L')\n print('Scale', scale)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n #input(\"Press Enter to continue...\")\n\n print('Done')\n\n\nif __name__ == '__main__':\n main(parser.parse_args())" ]
[ [ "numpy.reshape", "numpy.fromfile" ] ]
jingnanshi/torque_limited_simple_pendulum
[ "82a9379dffe063f863aeeae4fd2e2ef112b1c1fb", "82a9379dffe063f863aeeae4fd2e2ef112b1c1fb" ]
[ "software/python/simple_pendulum/controllers/ddpg/ddpg_controller.py", "software/python/simple_pendulum/reinforcement_learning/ddpg/replay_buffer.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\nfrom simple_pendulum.controllers.abstract_controller import AbstractController\n\ntf.compat.v1.disable_eager_execution()\n\n\nclass ddpg_controller(AbstractController):\n def __init__(self, model_path, torque_limit, state_representation=3):\n self.model = load_model(model_path)\n self.torque_limit = torque_limit\n self.state_representation = state_representation\n\n if state_representation == 2:\n # state is [th, th, vel]\n self.low = np.array([-6*2*np.pi, -20])\n self.high = np.array([6*2*np.pi, 20])\n elif state_representation == 3:\n # state is [cos(th), sin(th), vel]\n self.low = np.array([-1., -1., -8.])\n self.high = np.array([1., 1., 8.])\n\n def get_control_output(self, meas_pos, meas_vel, meas_tau=0, meas_time=0):\n\n pos = float(np.squeeze(meas_pos))\n vel = float(np.squeeze(meas_vel))\n\n state = np.array([pos, vel])\n observation = self.get_observation(state)\n control_output = self.model.predict(np.atleast_2d(observation))\n\n control_output *= self.torque_limit\n\n control_output = np.clip(control_output,\n -self.torque_limit,\n self.torque_limit)\n\n return None, None, control_output\n\n def get_observation(self, state):\n st = np.copy(state)\n st[1] = np.clip(st[1], self.low[-1], self.high[-1])\n if self.state_representation == 2:\n observation = np.array([obs for obs in st], dtype=np.float32)\n elif self.state_representation == 3:\n observation = np.array([np.cos(st[0]),\n np.sin(st[0]),\n st[1]],\n dtype=np.float32)\n\n return observation\n", "import numpy as np\nimport tensorflow as tf\n\n\nclass ReplayBuffer:\n \"\"\"\n Replay buffer class to store experiences for a\n reinforcement learning agent.\n \"\"\"\n def __init__(self, max_size, num_states, num_actions):\n \"\"\"\n Replay buffer class to store experiences for a\n reinforcement learning agent.\n\n Parameters\n ----------\n max_size: int\n maximum number of experiences to store in the repleay buffer.\n When adding experiences beyond this limit, the first entry\n is deleted.\n num_state: int\n the dimension of the state space\n num_actions: int\n the dimension of the action space\n \"\"\"\n self.buffer_capacity = max_size\n self.num_states = num_states\n self.num_actions = num_actions\n\n self.clear()\n\n def append(self, obs_tuple):\n \"\"\"\n Add an experience to the replay buffer.\n When adding experiences beyond the max_size limit,\n the first entry is deleted.\n An observation consists of (state, action, next_state, reward, done)\n\n Parameters\n ----------\n obs_tuple: array-like\n an observation (s,a,s',r,d) to store in the buffer\n \"\"\"\n index = self.size % self.buffer_capacity\n\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.next_state_buffer[index] = obs_tuple[2]\n self.reward_buffer[index] = obs_tuple[3]\n self.done_buffer[index] = obs_tuple[4]\n\n self.size += 1\n\n def sample_batch(self, batch_size):\n \"\"\"\n Sample a batch from the replay buffer.\n\n Parameters\n ----------\n batch_size: int\n number of samples in the returned batch\n\n Returns\n -------\n tuple\n (s_batch,a_batch,s'_batch,r_batch,d_batch)\n a tuple of batches of state, action, reward, next_state, done\n \"\"\"\n record_range = min(self.size, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, batch_size)\n\n # Convert to tensors\n state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])\n action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])\n reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])\n reward_batch = tf.cast(reward_batch, dtype=tf.float32)\n next_state_batch = tf.convert_to_tensor(\n self.next_state_buffer[batch_indices])\n done_batch = self.done_buffer[batch_indices]\n return (state_batch, action_batch, next_state_batch,\n reward_batch, done_batch)\n\n def clear(self):\n \"\"\"\n Clear the Replay Buffer.\n \"\"\"\n self.state_buffer = np.zeros((self.buffer_capacity, self.num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, self.num_actions))\n self.next_state_buffer = np.zeros((self.buffer_capacity,\n self.num_states))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.done_buffer = np.zeros((self.buffer_capacity, 1))\n self.size = 0\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.clip", "numpy.squeeze", "numpy.cos", "numpy.sin", "numpy.atleast_2d", "numpy.copy", "tensorflow.compat.v1.disable_eager_execution", "numpy.array" ], [ "tensorflow.convert_to_tensor", "tensorflow.cast", "numpy.zeros", "numpy.random.choice" ] ]
ZhengPeng7/OCR_TIANCHI_ICPR
[ "8d3865dd83628995f3c81fa4e7adb219fb875fad" ]
[ "single_ctpn/checkpoints_convert.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Convert checkpoints using RNNCells to new name convention.\n\nUsage:\n\n python checkpoint_convert.py [--write_v1_checkpoint] \\\n '/path/to/checkpoint' '/path/to/new_checkpoint'\n\nFor example, if there is a V2 checkpoint to be converted and the files include:\n /tmp/my_checkpoint/model.ckpt.data-00000-of-00001\n /tmp/my_checkpoint/model.ckpt.index\n /tmp/my_checkpoint/model.ckpt.meta\n\nuse the following command:\n mkdir /tmp/my_converted_checkpoint &&\n python checkpoint_convert.py \\\n /tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt\n\nThis will generate three converted checkpoint files corresponding to the three\nold ones in the new directory:\n /tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001\n /tmp/my_converted_checkpoint/model.ckpt.index\n /tmp/my_converted_checkpoint/model.ckpt.meta\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport re\nimport sys\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import saver as saver_lib\n\n# Mapping between old <=> new names. Externalized so that user scripts that\n# may need to consume multiple checkpoint formats can use this metadata.\nRNN_NAME_REPLACEMENTS = collections.OrderedDict([\n ############################################################################\n # contrib/rnn/python/ops/core_rnn_cell_impl.py\n # BasicRNNCell\n ('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'),\n ('basic_rnn_cell/biases', 'basic_rnn_cell/bias'),\n # GRUCell\n ('gru_cell/weights', 'gru_cell/kernel'),\n ('gru_cell/biases', 'gru_cell/bias'),\n ('gru_cell/gates/weights', 'gru_cell/gates/kernel'),\n ('gru_cell/gates/biases', 'gru_cell/gates/bias'),\n ('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'),\n ('gru_cell/candidate/biases', 'gru_cell/candidate/bias'),\n # BasicLSTMCell\n ('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'),\n ('basic_lstm_cell/biases', 'basic_lstm_cell/bias'),\n # LSTMCell\n ('lstm_cell/weights', 'lstm_cell/kernel'),\n ('lstm_cell/biases', 'lstm_cell/bias'),\n ('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'),\n ('lstm_cell/projection/biases', 'lstm_cell/projection/bias'),\n # OutputProjectionWrapper\n ('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'),\n ('output_projection_wrapper/biases', 'output_projection_wrapper/bias'),\n # InputProjectionWrapper\n ('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'),\n ('input_projection_wrapper/biases', 'input_projection_wrapper/bias'),\n ############################################################################\n # contrib/rnn/python/ops/lstm_ops.py\n # LSTMBlockFusedCell ??\n ('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'),\n ('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'),\n ############################################################################\n # contrib/rnn/python/ops/rnn_cell.py\n # LayerNormBasicLSTMCell\n ('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'),\n ('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'),\n # UGRNNCell, not found in g3, but still need it?\n ('ugrnn_cell/weights', 'ugrnn_cell/kernel'),\n ('ugrnn_cell/biases', 'ugrnn_cell/bias'),\n # NASCell\n ('nas_rnn/weights', 'nas_rnn/kernel'),\n ('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'),\n # IntersectionRNNCell\n ('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'),\n ('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'),\n ('intersection_rnn_cell/in_projection/weights',\n 'intersection_rnn_cell/in_projection/kernel'),\n ('intersection_rnn_cell/in_projection/biases',\n 'intersection_rnn_cell/in_projection/bias'),\n # PhasedLSTMCell\n ('phased_lstm_cell/mask_gates/weights',\n 'phased_lstm_cell/mask_gates/kernel'),\n ('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'),\n ('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'),\n ('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'),\n ('phased_lstm_cell/output_gate/weights',\n 'phased_lstm_cell/output_gate/kernel'),\n ('phased_lstm_cell/output_gate/biases',\n 'phased_lstm_cell/output_gate/bias'),\n # AttentionCellWrapper\n ('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'),\n ('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'),\n ('attention_cell_wrapper/attn_output_projection/weights',\n 'attention_cell_wrapper/attn_output_projection/kernel'),\n ('attention_cell_wrapper/attn_output_projection/biases',\n 'attention_cell_wrapper/attn_output_projection/bias'),\n ('attention_cell_wrapper/attention/weights',\n 'attention_cell_wrapper/attention/kernel'),\n ('attention_cell_wrapper/attention/biases',\n 'attention_cell_wrapper/attention/bias'),\n ############################################################################\n # contrib/legacy_seq2seq/python/ops/seq2seq.py\n ('attention_decoder/weights', 'attention_decoder/kernel'),\n ('attention_decoder/biases', 'attention_decoder/bias'),\n ('attention_decoder/Attention_0/weights',\n 'attention_decoder/Attention_0/kernel'),\n ('attention_decoder/Attention_0/biases',\n 'attention_decoder/Attention_0/bias'),\n ('attention_decoder/AttnOutputProjection/weights',\n 'attention_decoder/AttnOutputProjection/kernel'),\n ('attention_decoder/AttnOutputProjection/biases',\n 'attention_decoder/AttnOutputProjection/bias'),\n # contrib/legacy_seq2seq/python/ops/seq2seq.py before cl/140060366\n ('attention_decoder/Attention_0/Linear/Bias',\n 'attention_decoder/Attention_0/bias'),\n ('attention_decoder/Attention_0/Linear/Matrix',\n 'attention_decoder/Attention_0/kernel'),\n ('attention_decoder/AttnOutputProjection/Linear/Bias',\n 'attention_decoder/AttnOutputProjection/bias'),\n ('attention_decoder/AttnOutputProjection/Linear/Matrix',\n 'attention_decoder/AttnOutputProjection/kernel'),\n ('attention_decoder/LSTMCell/B', 'attention_decoder/lstm_cell/bias'),\n ('attention_decoder/LSTMCell/W_0', 'attention_decoder/lstm_cell/kernel'),\n ('attention_decoder/Linear/Bias', 'attention_decoder/bias'),\n ('attention_decoder/Linear/Matrix', 'attention_decoder/kernel')\n])\n\n_RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([\n ('LSTMCell/W_', 'lstm_cell/weights/part_'),\n ('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'),\n ('GRUCell/W_', 'gru_cell/weights/part_'),\n ('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'),\n])\n\n\ndef _rnn_name_replacement(var_name):\n for pattern in RNN_NAME_REPLACEMENTS:\n if pattern in var_name:\n old_var_name = var_name\n var_name = var_name.replace(pattern, RNN_NAME_REPLACEMENTS[pattern])\n logging.info('Converted: %s --> %s' % (old_var_name, var_name))\n break\n return var_name\n\n\ndef _rnn_name_replacement_sharded(var_name):\n for pattern in _RNN_SHARDED_NAME_REPLACEMENTS:\n if pattern in var_name:\n old_var_name = var_name\n var_name = var_name.replace(pattern,\n _RNN_SHARDED_NAME_REPLACEMENTS[pattern])\n logging.info('Converted: %s --> %s' % (old_var_name, var_name))\n return var_name\n\n\ndef _split_sharded_vars(name_shape_map):\n \"\"\"Split shareded variables.\n\n Args:\n name_shape_map: A dict from variable name to variable shape.\n\n Returns:\n not_sharded: Names of the non-sharded variables.\n sharded: Names of the sharded variables.\n \"\"\"\n sharded = []\n not_sharded = []\n for name in name_shape_map:\n if re.match(name, '_[0-9]+$'):\n if re.sub('_[0-9]+$', '_1', name) in name_shape_map:\n sharded.append(name)\n else:\n not_sharded.append(name)\n else:\n not_sharded.append(name)\n return not_sharded, sharded\n\n\ndef convert_names(checkpoint_from_path,\n checkpoint_to_path,\n write_v1_checkpoint=False):\n \"\"\"Migrates the names of variables within a checkpoint.\n\n Args:\n checkpoint_from_path: Path to source checkpoint to be read in.\n checkpoint_to_path: Path to checkpoint to be written out.\n write_v1_checkpoint: Whether the output checkpoint will be in V1 format.\n\n Returns:\n A dictionary that maps the new variable names to the Variable objects.\n A dictionary that maps the old variable names to the new variable names.\n \"\"\"\n with ops.Graph().as_default():\n logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path)\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path)\n name_shape_map = reader.get_variable_to_shape_map()\n not_sharded, sharded = _split_sharded_vars(name_shape_map)\n new_variable_map = {}\n conversion_map = {}\n for var_name in not_sharded:\n new_var_name = _rnn_name_replacement(var_name)\n tensor = reader.get_tensor(var_name)\n var = variables.Variable(tensor, name=var_name)\n new_variable_map[new_var_name] = var\n if new_var_name != var_name:\n conversion_map[var_name] = new_var_name\n for var_name in sharded:\n new_var_name = _rnn_name_replacement_sharded(var_name)\n var = variables.Variable(tensor, name=var_name)\n new_variable_map[new_var_name] = var\n if new_var_name != var_name:\n conversion_map[var_name] = new_var_name\n\n write_version = (saver_pb2.SaverDef.V1\n if write_v1_checkpoint else saver_pb2.SaverDef.V2)\n saver = saver_lib.Saver(new_variable_map, write_version=write_version)\n\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path)\n saver.save(sess, checkpoint_to_path)\n\n logging.info('Summary:')\n logging.info(' Converted %d variable name(s).' % len(new_variable_map))\n return new_variable_map, conversion_map\n\n\ndef main(_):\n convert_names(\n FLAGS.checkpoint_from_path,\n FLAGS.checkpoint_to_path,\n write_v1_checkpoint=FLAGS.write_v1_checkpoint)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument('checkpoint_from_path', type=str,\n help='Path to source checkpoint to be read in.')\n parser.add_argument('checkpoint_to_path', type=str,\n help='Path to checkpoint to be written out.')\n parser.add_argument('--write_v1_checkpoint', action='store_true',\n help='Write v1 checkpoint')\n FLAGS, unparsed = parser.parse_known_args()\n\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.python.platform.app.run", "tensorflow.python.framework.ops.Graph", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.python.ops.variables.Variable", "tensorflow.python.client.session.Session", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.training.saver.Saver" ] ]
dassh-dev/dassh
[ "5cd53fc7883897e59ec1126ee955e6a664995e25" ]
[ "dassh/__init__.py" ]
[ "########################################################################\n# Copyright 2021, UChicago Argonne, LLC\n#\n# Licensed under the BSD-3 License (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a\n# copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n########################################################################\n\"\"\"\ndate: 2022-01-05\nauthor: Milos Atz, Micheal Smith\n\"\"\"\n########################################################################\nimport sys\nimport numpy as np\nfrom dassh.read_input import *\nfrom dassh.pin import *\nfrom dassh.subchannel import *\nfrom dassh.logged_class import *\nfrom dassh.region import *\nfrom dassh.region_rodded import *\nfrom dassh.region_unrodded import *\nfrom dassh.assembly import *\nfrom dassh.core import *\nfrom dassh.material import *\nfrom dassh.power import *\nfrom dassh.reactor import *\nfrom dassh.utils import *\nfrom dassh.table import *\nfrom dassh.pin_model import *\nfrom dassh._ascii import *\nfrom dassh.plot import *\nfrom dassh import mesh_functions\nfrom dassh.orificing import *\nimport dassh.py4c as py4c\n\n\nnp.set_printoptions(threshold=sys.maxsize, linewidth=500)\n\n\n__version__ = '0.9.4'\n" ]
[ [ "numpy.set_printoptions" ] ]
thccheung/cellpose
[ "873641ed5b78c3fd8eff7eeb08bf30c0d751e272" ]
[ "cellpose/transforms.py" ]
[ "import numpy as np\nimport warnings\nimport cv2\n\nimport logging\ntransforms_logger = logging.getLogger(__name__)\n\nfrom . import dynamics, utils\n\ndef _taper_mask(ly=224, lx=224, sig=7.5):\n bsize = max(224, max(ly, lx))\n xm = np.arange(bsize)\n xm = np.abs(xm - xm.mean())\n mask = 1/(1 + np.exp((xm - (bsize/2-20)) / sig))\n mask = mask * mask[:, np.newaxis]\n mask = mask[bsize//2-ly//2 : bsize//2+ly//2+ly%2, \n bsize//2-lx//2 : bsize//2+lx//2+lx%2]\n return mask\n\ndef unaugment_tiles(y, unet=False):\n \"\"\" reverse test-time augmentations for averaging\n\n Parameters\n ----------\n\n y: float32\n array that's ntiles_y x ntiles_x x chan x Ly x Lx where chan = (dY, dX, cell prob)\n\n unet: bool (optional, False)\n whether or not unet output or cellpose output\n \n Returns\n -------\n\n y: float32\n\n \"\"\"\n for j in range(y.shape[0]):\n for i in range(y.shape[1]):\n if j%2==0 and i%2==1:\n y[j,i] = y[j,i, :,::-1, :]\n if not unet:\n y[j,i,0] *= -1\n elif j%2==1 and i%2==0:\n y[j,i] = y[j,i, :,:, ::-1]\n if not unet:\n y[j,i,1] *= -1\n elif j%2==1 and i%2==1:\n y[j,i] = y[j,i, :,::-1, ::-1]\n if not unet:\n y[j,i,0] *= -1\n y[j,i,1] *= -1\n return y\n\ndef average_tiles(y, ysub, xsub, Ly, Lx):\n \"\"\" average results of network over tiles\n\n Parameters\n -------------\n\n y: float, [ntiles x nclasses x bsize x bsize]\n output of cellpose network for each tile\n\n ysub : list\n list of arrays with start and end of tiles in Y of length ntiles\n\n xsub : list\n list of arrays with start and end of tiles in X of length ntiles\n\n Ly : int\n size of pre-tiled image in Y (may be larger than original image if\n image size is less than bsize)\n\n Lx : int\n size of pre-tiled image in X (may be larger than original image if\n image size is less than bsize)\n\n Returns\n -------------\n\n yf: float32, [nclasses x Ly x Lx]\n network output averaged over tiles\n\n \"\"\"\n Navg = np.zeros((Ly,Lx))\n yf = np.zeros((y.shape[1], Ly, Lx), np.float32)\n # taper edges of tiles\n mask = _taper_mask(ly=y.shape[-2], lx=y.shape[-1])\n for j in range(len(ysub)):\n yf[:, ysub[j][0]:ysub[j][1], xsub[j][0]:xsub[j][1]] += y[j] * mask\n Navg[ysub[j][0]:ysub[j][1], xsub[j][0]:xsub[j][1]] += mask\n yf /= Navg\n return yf\n\ndef make_tiles(imgi, bsize=224, augment=False, tile_overlap=0.1):\n \"\"\" make tiles of image to run at test-time\n\n if augmented, tiles are flipped and tile_overlap=2.\n * original\n * flipped vertically\n * flipped horizontally\n * flipped vertically and horizontally\n\n Parameters\n ----------\n imgi : float32\n array that's nchan x Ly x Lx\n\n bsize : float (optional, default 224)\n size of tiles\n\n augment : bool (optional, default False)\n flip tiles and set tile_overlap=2.\n\n tile_overlap: float (optional, default 0.1)\n fraction of overlap of tiles\n\n Returns\n -------\n IMG : float32\n array that's ntiles x nchan x bsize x bsize\n\n ysub : list\n list of arrays with start and end of tiles in Y of length ntiles\n\n xsub : list\n list of arrays with start and end of tiles in X of length ntiles\n\n \n \"\"\"\n\n nchan, Ly, Lx = imgi.shape\n if augment:\n bsize = np.int32(bsize)\n # pad if image smaller than bsize\n if Ly<bsize:\n imgi = np.concatenate((imgi, np.zeros((nchan, bsize-Ly, Lx))), axis=1)\n Ly = bsize\n if Lx<bsize:\n imgi = np.concatenate((imgi, np.zeros((nchan, Ly, bsize-Lx))), axis=2)\n Ly, Lx = imgi.shape[-2:]\n # tiles overlap by half of tile size\n ny = max(2, int(np.ceil(2. * Ly / bsize)))\n nx = max(2, int(np.ceil(2. * Lx / bsize)))\n ystart = np.linspace(0, Ly-bsize, ny).astype(int)\n xstart = np.linspace(0, Lx-bsize, nx).astype(int)\n\n ysub = []\n xsub = []\n\n # flip tiles so that overlapping segments are processed in rotation\n IMG = np.zeros((len(ystart), len(xstart), nchan, bsize, bsize), np.float32)\n for j in range(len(ystart)):\n for i in range(len(xstart)):\n ysub.append([ystart[j], ystart[j]+bsize])\n xsub.append([xstart[i], xstart[i]+bsize])\n IMG[j, i] = imgi[:, ysub[-1][0]:ysub[-1][1], xsub[-1][0]:xsub[-1][1]]\n # flip tiles to allow for augmentation of overlapping segments\n if j%2==0 and i%2==1:\n IMG[j,i] = IMG[j,i, :,::-1, :]\n elif j%2==1 and i%2==0:\n IMG[j,i] = IMG[j,i, :,:, ::-1]\n elif j%2==1 and i%2==1:\n IMG[j,i] = IMG[j,i,:, ::-1, ::-1]\n else:\n tile_overlap = min(0.5, max(0.05, tile_overlap))\n bsizeY, bsizeX = min(bsize, Ly), min(bsize, Lx)\n bsizeY = np.int32(bsizeY)\n bsizeX = np.int32(bsizeX)\n # tiles overlap by 10% tile size\n ny = 1 if Ly<=bsize else int(np.ceil((1.+2*tile_overlap) * Ly / bsize))\n nx = 1 if Lx<=bsize else int(np.ceil((1.+2*tile_overlap) * Lx / bsize))\n ystart = np.linspace(0, Ly-bsizeY, ny).astype(int)\n xstart = np.linspace(0, Lx-bsizeX, nx).astype(int)\n\n ysub = []\n xsub = []\n IMG = np.zeros((len(ystart), len(xstart), nchan, bsizeY, bsizeX), np.float32)\n for j in range(len(ystart)):\n for i in range(len(xstart)):\n ysub.append([ystart[j], ystart[j]+bsizeY])\n xsub.append([xstart[i], xstart[i]+bsizeX])\n IMG[j, i] = imgi[:, ysub[-1][0]:ysub[-1][1], xsub[-1][0]:xsub[-1][1]]\n \n return IMG, ysub, xsub, Ly, Lx\n\ndef normalize99(Y, lower=1,upper=99):\n \"\"\" normalize image so 0.0 is 1st percentile and 1.0 is 99th percentile \"\"\"\n X = Y.copy()\n x01 = np.percentile(X, lower)\n x99 = np.percentile(X, upper)\n X = (X - x01) / (x99 - x01)\n return X\n\ndef move_axis(img, m_axis=-1, first=True):\n \"\"\" move axis m_axis to first or last position \"\"\"\n if m_axis==-1:\n m_axis = img.ndim-1\n m_axis = min(img.ndim-1, m_axis)\n axes = np.arange(0, img.ndim)\n if first:\n axes[1:m_axis+1] = axes[:m_axis]\n axes[0] = m_axis\n else:\n axes[m_axis:-1] = axes[m_axis+1:]\n axes[-1] = m_axis\n img = img.transpose(tuple(axes))\n return img\n\n# This was edited to fix a bug where single-channel images of shape (y,x) would be \n# transposed to (x,y) if x<y, making the labels no longer correspond to the data. \ndef move_min_dim(img, force=False):\n \"\"\" move minimum dimension last as channels if < 10, or force==True \"\"\"\n if len(img.shape) > 2: #only makese sense to do this if channel axis is already present \n min_dim = min(img.shape)\n if min_dim < 10 or force:\n if img.shape[-1]==min_dim:\n channel_axis = -1\n else:\n channel_axis = (img.shape).index(min_dim)\n img = move_axis(img, m_axis=channel_axis, first=False)\n return img\n\ndef update_axis(m_axis, to_squeeze, ndim):\n if m_axis==-1:\n m_axis = ndim-1\n if (to_squeeze==m_axis).sum() == 1:\n m_axis = None\n else:\n inds = np.ones(ndim, bool)\n inds[to_squeeze] = False\n m_axis = np.nonzero(np.arange(0, ndim)[inds]==m_axis)[0]\n if len(m_axis) > 0:\n m_axis = m_axis[0]\n else:\n m_axis = None\n return m_axis\n\ndef convert_image(x, channels, channel_axis=None, z_axis=None,\n do_3D=False, normalize=True, invert=False,\n nchan=2):\n \"\"\" return image with z first, channels last and normalized intensities \"\"\"\n \n # squeeze image, and if channel_axis or z_axis given, transpose image\n if x.ndim > 3:\n to_squeeze = np.array([int(isq) for isq,s in enumerate(x.shape) if s==1])\n # remove channel axis if number of channels is 1\n if len(to_squeeze) > 0: \n channel_axis = update_axis(channel_axis, to_squeeze, x.ndim) if channel_axis is not None else channel_axis\n z_axis = update_axis(z_axis, to_squeeze, x.ndim) if z_axis is not None else z_axis\n x = x.squeeze()\n\n # put z axis first\n if z_axis is not None and x.ndim > 2:\n x = move_axis(x, m_axis=z_axis, first=True)\n if channel_axis is not None:\n channel_axis += 1\n if x.ndim==3:\n x = x[...,np.newaxis]\n \n # put channel axis last\n if channel_axis is not None and x.ndim > 2:\n x = move_axis(x, m_axis=channel_axis, first=False)\n elif x.ndim == 2:\n x = x[:,:,np.newaxis]\n\n if do_3D :\n if x.ndim < 3:\n transforms_logger.critical('ERROR: cannot process 2D images in 3D mode')\n raise ValueError('ERROR: cannot process 2D images in 3D mode') \n elif x.ndim<4:\n x = x[...,np.newaxis]\n\n if channel_axis is None:\n x = move_min_dim(x)\n \n if x.ndim > 3:\n transforms_logger.info('multi-stack tiff read in as having %d planes %d channels'%\n (x.shape[0], x.shape[-1]))\n\n if channels is not None:\n channels = channels[0] if len(channels)==1 else channels\n if len(channels) < 2:\n transforms_logger.critical('ERROR: two channels not specified')\n raise ValueError('ERROR: two channels not specified') \n x = reshape(x, channels=channels)\n \n else:\n # code above put channels last\n if x.shape[-1] > nchan:\n transforms_logger.warning('WARNING: more than %d channels given, use \"channels\" input for specifying channels - just using first %d channels to run processing'%(nchan,nchan))\n x = x[...,:nchan]\n\n if not do_3D and x.ndim>3:\n transforms_logger.critical('ERROR: cannot process 4D images in 2D mode')\n raise ValueError('ERROR: cannot process 4D images in 2D mode')\n \n if x.shape[-1] < nchan:\n x = np.concatenate((x, \n np.tile(np.zeros_like(x), (1,1,nchan-1))), \n axis=-1)\n \n if normalize or invert:\n x = normalize_img(x, invert=invert)\n \n return x\n\ndef reshape(data, channels=[0,0], chan_first=False):\n \"\"\" reshape data using channels\n\n Parameters\n ----------\n\n data : numpy array that's (Z x ) Ly x Lx x nchan\n if data.ndim==8 and data.shape[0]<8, assumed to be nchan x Ly x Lx\n\n channels : list of int of length 2 (optional, default [0,0])\n First element of list is the channel to segment (0=grayscale, 1=red, 2=green, 3=blue).\n Second element of list is the optional nuclear channel (0=none, 1=red, 2=green, 3=blue).\n For instance, to train on grayscale images, input [0,0]. To train on images with cells\n in green and nuclei in blue, input [2,3].\n\n invert : bool\n invert intensities\n\n Returns\n -------\n data : numpy array that's (Z x ) Ly x Lx x nchan (if chan_first==False)\n\n \"\"\"\n data = data.astype(np.float32)\n if data.ndim < 3:\n data = data[:,:,np.newaxis]\n elif data.shape[0]<8 and data.ndim==3:\n data = np.transpose(data, (1,2,0))\n\n # use grayscale image\n if data.shape[-1]==1:\n data = np.concatenate((data, np.zeros_like(data)), axis=-1)\n else:\n if channels[0]==0:\n data = data.mean(axis=-1, keepdims=True)\n data = np.concatenate((data, np.zeros_like(data)), axis=-1)\n else:\n chanid = [channels[0]-1]\n if channels[1] > 0:\n chanid.append(channels[1]-1)\n data = data[...,chanid]\n for i in range(data.shape[-1]):\n if np.ptp(data[...,i]) == 0.0:\n if i==0:\n warnings.warn(\"chan to seg' has value range of ZERO\")\n else:\n warnings.warn(\"'chan2 (opt)' has value range of ZERO, can instead set chan2 to 0\")\n if data.shape[-1]==1:\n data = np.concatenate((data, np.zeros_like(data)), axis=-1)\n if chan_first:\n if data.ndim==4:\n data = np.transpose(data, (3,0,1,2))\n else:\n data = np.transpose(data, (2,0,1))\n return data\n\ndef normalize_img(img, axis=-1, invert=False):\n \"\"\" normalize each channel of the image so that so that 0.0=1st percentile\n and 1.0=99th percentile of image intensities\n\n optional inversion\n\n Parameters\n ------------\n\n img: ND-array (at least 3 dimensions)\n\n axis: channel axis to loop over for normalization\n\n invert: invert image (useful if cells are dark instead of bright)\n\n Returns\n ---------------\n\n img: ND-array, float32\n normalized image of same size\n\n \"\"\"\n if img.ndim<3:\n error_message = 'Image needs to have at least 3 dimensions'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n\n img = img.astype(np.float32)\n img = np.moveaxis(img, axis, 0)\n for k in range(img.shape[0]):\n # ptp can still give nan's with weird images\n i99 = np.percentile(img[k],99)\n i1 = np.percentile(img[k],1)\n if i99 - i1 > +1e-3: #np.ptp(img[k]) > 1e-3:\n img[k] = normalize99(img[k])\n if invert:\n img[k] = -1*img[k] + 1 \n else:\n img[k] = 0\n img = np.moveaxis(img, 0, axis)\n return img\n\ndef reshape_train_test(train_data, train_labels, test_data, test_labels, channels, normalize=True):\n \"\"\" check sizes and reshape train and test data for training \"\"\"\n nimg = len(train_data)\n # check that arrays are correct size\n if nimg != len(train_labels):\n error_message = 'train data and labels not same length'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n return\n if train_labels[0].ndim < 2 or train_data[0].ndim < 2:\n error_message = 'training data or labels are not at least two-dimensional'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n return\n\n if train_data[0].ndim > 3:\n error_message = 'training data is more than three-dimensional (should be 2D or 3D array)'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n return\n\n # check if test_data correct length\n if not (test_data is not None and test_labels is not None and\n len(test_data) > 0 and len(test_data)==len(test_labels)):\n test_data = None\n\n # make data correct shape and normalize it so that 0 and 1 are 1st and 99th percentile of data\n train_data, test_data, run_test = reshape_and_normalize_data(train_data, test_data=test_data, \n channels=channels, normalize=normalize)\n\n if train_data is None:\n error_message = 'training data do not all have the same number of channels'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n return\n\n if not run_test:\n test_data, test_labels = None, None\n\n return train_data, train_labels, test_data, test_labels, run_test\n\ndef reshape_and_normalize_data(train_data, test_data=None, channels=None, normalize=True):\n \"\"\" inputs converted to correct shapes for *training* and rescaled so that 0.0=1st percentile\n and 1.0=99th percentile of image intensities in each channel\n\n Parameters\n --------------\n\n train_data: list of ND-arrays, float\n list of training images of size [Ly x Lx], [nchan x Ly x Lx], or [Ly x Lx x nchan]\n\n test_data: list of ND-arrays, float (optional, default None)\n list of testing images of size [Ly x Lx], [nchan x Ly x Lx], or [Ly x Lx x nchan]\n\n channels: list of int of length 2 (optional, default None)\n First element of list is the channel to segment (0=grayscale, 1=red, 2=green, 3=blue).\n Second element of list is the optional nuclear channel (0=none, 1=red, 2=green, 3=blue).\n For instance, to train on grayscale images, input [0,0]. To train on images with cells\n in green and nuclei in blue, input [2,3].\n\n normalize: bool (optional, True)\n normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel\n\n Returns\n -------------\n\n train_data: list of ND-arrays, float\n list of training images of size [2 x Ly x Lx]\n\n test_data: list of ND-arrays, float (optional, default None)\n list of testing images of size [2 x Ly x Lx]\n\n run_test: bool\n whether or not test_data was correct size and is useable during training\n\n \"\"\"\n\n # if training data is less than 2D\n run_test = False\n for test, data in enumerate([train_data, test_data]):\n if data is None:\n return train_data, test_data, run_test\n nimg = len(data)\n for i in range(nimg):\n if channels is not None:\n data[i] = move_min_dim(data[i], force=True)\n data[i] = reshape(data[i], channels=channels, chan_first=True)\n if data[i].ndim < 3:\n data[i] = data[i][np.newaxis,:,:]\n if normalize:\n data[i] = normalize_img(data[i], axis=0)\n \n nchan = [data[i].shape[0] for i in range(nimg)]\n run_test = True\n return train_data, test_data, run_test\n\ndef resize_image(img0, Ly=None, Lx=None, rsz=None, interpolation=cv2.INTER_LINEAR, no_channels=False):\n \"\"\" resize image for computing flows / unresize for computing dynamics\n\n Parameters\n -------------\n\n img0: ND-array\n image of size [Y x X x nchan] or [Lz x Y x X x nchan] or [Lz x Y x X]\n\n Ly: int, optional\n\n Lx: int, optional\n\n rsz: float, optional\n resize coefficient(s) for image; if Ly is None then rsz is used\n\n interpolation: cv2 interp method (optional, default cv2.INTER_LINEAR)\n\n Returns\n --------------\n\n imgs: ND-array \n image of size [Ly x Lx x nchan] or [Lz x Ly x Lx x nchan]\n\n \"\"\"\n if Ly is None and rsz is None:\n error_message = 'must give size to resize to or factor to use for resizing'\n transforms_logger.critical(error_message)\n raise ValueError(error_message)\n\n if Ly is None:\n # determine Ly and Lx using rsz\n if not isinstance(rsz, list) and not isinstance(rsz, np.ndarray):\n rsz = [rsz, rsz]\n if no_channels:\n Ly = int(img0.shape[-2] * rsz[-2])\n Lx = int(img0.shape[-1] * rsz[-1])\n else:\n Ly = int(img0.shape[-3] * rsz[-2])\n Lx = int(img0.shape[-2] * rsz[-1])\n \n # no_channels useful for z-stacks, sot he third dimension is not treated as a channel\n # but if this is called for grayscale images, they first become [Ly,Lx,2] so ndim=3 but \n if (img0.ndim>2 and no_channels) or (img0.ndim==4 and not no_channels):\n if no_channels:\n imgs = np.zeros((img0.shape[0], Ly, Lx), np.float32)\n else:\n imgs = np.zeros((img0.shape[0], Ly, Lx, img0.shape[-1]), np.float32)\n for i,img in enumerate(img0):\n imgs[i] = cv2.resize(img, (Lx, Ly), interpolation=interpolation)\n else:\n imgs = cv2.resize(img0, (Lx, Ly), interpolation=interpolation)\n return imgs\n\ndef pad_image_ND(img0, div=16, extra = 1):\n \"\"\" pad image for test-time so that its dimensions are a multiple of 16 (2D or 3D)\n\n Parameters\n -------------\n\n img0: ND-array\n image of size [nchan (x Lz) x Ly x Lx]\n\n div: int (optional, default 16)\n\n Returns\n --------------\n\n I: ND-array\n padded image\n\n ysub: array, int\n yrange of pixels in I corresponding to img0\n\n xsub: array, int\n xrange of pixels in I corresponding to img0\n\n \"\"\"\n Lpad = int(div * np.ceil(img0.shape[-2]/div) - img0.shape[-2])\n xpad1 = extra*div//2 + Lpad//2\n xpad2 = extra*div//2 + Lpad - Lpad//2\n Lpad = int(div * np.ceil(img0.shape[-1]/div) - img0.shape[-1])\n ypad1 = extra*div//2 + Lpad//2\n ypad2 = extra*div//2+Lpad - Lpad//2\n\n if img0.ndim>3:\n pads = np.array([[0,0], [0,0], [xpad1,xpad2], [ypad1, ypad2]])\n else:\n pads = np.array([[0,0], [xpad1,xpad2], [ypad1, ypad2]])\n\n I = np.pad(img0,pads, mode='constant')\n\n Ly, Lx = img0.shape[-2:]\n ysub = np.arange(xpad1, xpad1+Ly)\n xsub = np.arange(ypad1, ypad1+Lx)\n return I, ysub, xsub\n\ndef normalize_field(mu):\n mu /= (1e-20 + (mu**2).sum(axis=0)**0.5)\n return mu\n\ndef _X2zoom(img, X2=1):\n \"\"\" zoom in image\n\n Parameters\n ----------\n img : numpy array that's Ly x Lx\n\n Returns\n -------\n img : numpy array that's Ly x Lx\n\n \"\"\"\n ny,nx = img.shape[:2]\n img = cv2.resize(img, (int(nx * (2**X2)), int(ny * (2**X2))))\n return img\n\ndef _image_resizer(img, resize=512, to_uint8=False):\n \"\"\" resize image\n\n Parameters\n ----------\n img : numpy array that's Ly x Lx\n\n resize : int\n max size of image returned\n\n to_uint8 : bool\n convert image to uint8\n\n Returns\n -------\n img : numpy array that's Ly x Lx, Ly,Lx<resize\n\n \"\"\"\n ny,nx = img.shape[:2]\n if to_uint8:\n if img.max()<=255 and img.min()>=0 and img.max()>1:\n img = img.astype(np.uint8)\n else:\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n if np.array(img.shape).max() > resize:\n if ny>nx:\n nx = int(nx/ny * resize)\n ny = resize\n else:\n ny = int(ny/nx * resize)\n nx = resize\n shape = (nx,ny)\n img = cv2.resize(img, shape)\n img = img.astype(np.uint8)\n return img\n\n\ndef random_rotate_and_resize(X, Y=None, scale_range=1., xy = (224,224), \n do_flip=True, rescale=None, unet=False, random_per_image=True):\n \"\"\" augmentation by random rotation and resizing\n X and Y are lists or arrays of length nimg, with dims channels x Ly x Lx (channels optional)\n Parameters\n ----------\n X: LIST of ND-arrays, float\n list of image arrays of size [nchan x Ly x Lx] or [Ly x Lx]\n Y: LIST of ND-arrays, float (optional, default None)\n list of image labels of size [nlabels x Ly x Lx] or [Ly x Lx]. The 1st channel\n of Y is always nearest-neighbor interpolated (assumed to be masks or 0-1 representation).\n If Y.shape[0]==3 and not unet, then the labels are assumed to be [cell probability, Y flow, X flow]. \n If unet, second channel is dist_to_bound.\n scale_range: float (optional, default 1.0)\n Range of resizing of images for augmentation. Images are resized by\n (1-scale_range/2) + scale_range * np.random.rand()\n xy: tuple, int (optional, default (224,224))\n size of transformed images to return\n do_flip: bool (optional, default True)\n whether or not to flip images horizontally\n rescale: array, float (optional, default None)\n how much to resize images by before performing augmentations\n unet: bool (optional, default False)\n random_per_image: bool (optional, default True)\n different random rotate and resize per image\n Returns\n -------\n imgi: ND-array, float\n transformed images in array [nimg x nchan x xy[0] x xy[1]]\n lbl: ND-array, float\n transformed labels in array [nimg x nchan x xy[0] x xy[1]]\n scale: array, float\n amount each image was resized by\n \"\"\"\n scale_range = max(0, min(2, float(scale_range)))\n nimg = len(X)\n if X[0].ndim>2:\n nchan = X[0].shape[0]\n else:\n nchan = 1\n imgi = np.zeros((nimg, nchan, xy[0], xy[1]), np.float32)\n\n lbl = []\n if Y is not None:\n if Y[0].ndim>2:\n nt = Y[0].shape[0]\n else:\n nt = 1\n lbl = np.zeros((nimg, nt, xy[0], xy[1]), np.float32)\n\n scale = np.ones(nimg, np.float32)\n \n for n in range(nimg):\n Ly, Lx = X[n].shape[-2:]\n\n if random_per_image or n==0:\n # generate random augmentation parameters\n flip = np.random.rand()>.5\n theta = np.random.rand() * np.pi * 2\n scale[n] = (1-scale_range/2) + scale_range * np.random.rand()\n if rescale is not None:\n scale[n] *= 1. / rescale[n]\n dxy = np.maximum(0, np.array([Lx*scale[n]-xy[1],Ly*scale[n]-xy[0]]))\n dxy = (np.random.rand(2,) - .5) * dxy\n\n # create affine transform\n cc = np.array([Lx/2, Ly/2])\n cc1 = cc - np.array([Lx-xy[1], Ly-xy[0]])/2 + dxy\n pts1 = np.float32([cc,cc + np.array([1,0]), cc + np.array([0,1])])\n pts2 = np.float32([cc1,\n cc1 + scale[n]*np.array([np.cos(theta), np.sin(theta)]),\n cc1 + scale[n]*np.array([np.cos(np.pi/2+theta), np.sin(np.pi/2+theta)])])\n M = cv2.getAffineTransform(pts1,pts2)\n\n img = X[n].copy()\n if Y is not None:\n labels = Y[n].copy()\n if labels.ndim<3:\n labels = labels[np.newaxis,:,:]\n\n if flip and do_flip:\n img = img[..., ::-1]\n if Y is not None:\n labels = labels[..., ::-1]\n if nt > 1 and not unet:\n labels[2] = -labels[2]\n\n for k in range(nchan):\n I = cv2.warpAffine(img[k], M, (xy[1],xy[0]), flags=cv2.INTER_LINEAR)\n imgi[n,k] = I\n\n if Y is not None:\n for k in range(nt):\n if k==0:\n lbl[n,k] = cv2.warpAffine(labels[k], M, (xy[1],xy[0]), flags=cv2.INTER_NEAREST)\n else:\n lbl[n,k] = cv2.warpAffine(labels[k], M, (xy[1],xy[0]), flags=cv2.INTER_LINEAR)\n\n if nt > 1 and not unet:\n v1 = lbl[n,2].copy()\n v2 = lbl[n,1].copy()\n lbl[n,1] = (-v1 * np.sin(-theta) + v2*np.cos(-theta))\n lbl[n,2] = (v1 * np.cos(-theta) + v2*np.sin(-theta))\n\n return imgi, lbl, scale" ]
[ [ "numpy.pad", "numpy.linspace", "numpy.arange", "numpy.int32", "numpy.ptp", "numpy.percentile", "numpy.ones", "numpy.transpose", "numpy.ceil", "numpy.sin", "numpy.cos", "numpy.zeros_like", "numpy.random.rand", "numpy.moveaxis", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
ntropy-network/ML-tools
[ "279cc6c8918ab4f1407a891f9f80480b9d8cc178" ]
[ "SNGP/test_gp.py" ]
[ "import torch\nimport numpy as np\nimport itertools as it\nimport gaussian_process\nfrom tqdm import tqdm\nimport pytest\nfrom gaussian_process import RandomFeatureGaussianProcess, LaplaceRandomFeatureCovariance, mean_field_logits\nfrom random_fourier_features import RandomFourierFeatures\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, data):\n self.data= data.astype(np.float32)\n def __len__(self):\n return len(self.data)\n def __getitem__(self, idx):\n return self.data[idx]\n\ndef exact_gaussian_kernel(x1, x2):\n \"\"\"Computes exact Gaussian kernel value(s) for tensors x1 and x2.\"\"\"\n x1 = torch.tensor(x1, dtype=torch.float32)\n x2 = torch.tensor(x2, dtype=torch.float32)\n x1_squared = torch.sum(x1 ** 2, list(range(1, len(x1.shape))))\n x2_squared = torch.sum(x2 ** 2, list(range(1, len(x2.shape))))\n square = x1_squared[:, None] + x2_squared[None, :] - 2 * x1 @ x2.T\n return torch.exp(-square / 2.)\n\n\ndef _generate_normal_data(num_sample, num_dim, loc):\n \"\"\"Generates random data sampled from i.i.d. normal distribution.\"\"\"\n return np.random.normal(\n size=(num_sample, num_dim),\n loc=loc,\n scale=1. / np.sqrt(num_dim)\n ).astype(np.float32)\n\ndef _generate_rbf_data(x_data, orthogonal=True):\n \"\"\"Generates high-dim data that are the eigencomponents of an RBF kernel.\"\"\"\n k_rbf = exact_gaussian_kernel(x_data, x_data)\n x_orth, x_diag, _ = np.linalg.svd(k_rbf)\n if orthogonal:\n return x_orth\n return np.diag(np.sqrt(x_diag)) @ x_orth.T\n\ndef _make_minibatch_iterator(data_numpy, batch_size, num_epoch):\n \"\"\"Makes a tf.data.Dataset for given batch size and num epoches.\"\"\"\n return it.chain.from_iterable([\n torch.utils.data.DataLoader(Dataset(data_numpy), batch_size=batch_size)\n for _ in range(num_epoch)\n ])\n\ndef _compute_posterior_kernel(x_train, x_test, kernel_func, ridge_penalty):\n \"\"\"Computes the posterior covariance matrix of a Gaussian process.\"\"\"\n num_sample = x_train.shape[0]\n k_tt_inv = np.linalg.inv(kernel_func(x_train, x_train) + ridge_penalty * np.eye(num_sample, dtype=np.float32))\n k_ts = kernel_func(x_train, x_test)\n k_ss = kernel_func(x_test, x_test)\n\n return k_ss - k_ts.T @ k_tt_inv @ k_ts\n\nnum_data_dim = 10\nnum_inducing = 2048\nnum_train_sample = 1600\nnum_test_sample = 256\nprec_tolerance = {'atol': 1e-3, 'rtol': 5e-2}\ncov_tolerance = {'atol': 5e-2, 'rtol': 2.}\n\nrbf_kern_func = exact_gaussian_kernel\n\nx_tr = _generate_normal_data(num_train_sample, num_data_dim, loc=0.)\nx_ts = _generate_normal_data(num_test_sample, num_data_dim, loc=1.)\n\ndef test_save_load_random_fourier_features(tmp_path):\n name = tmp_path / 'model.bin'\n model = RandomFourierFeatures(1, 10)\n inp = torch.randn(20, 1)\n prev = model(inp)\n torch.save(model.state_dict(), name)\n model = RandomFourierFeatures(1, 10)\n model.load_state_dict(torch.load(name))\n curr = model(inp)\n np.testing.assert_allclose(prev.detach().numpy(), curr.detach().numpy())\n\n# @pytest.mark.skip(reason='takes too long, and it works')\n@pytest.mark.parametrize('generate_orthogonal_data', [(False,), (True,)], ids=['rbf_kernel', 'orthogonal'])\ndef test_laplace_covariance_minibatch(generate_orthogonal_data):\n \"\"\"Tests if model correctly learns population-level precision matrix.\"\"\"\n batch_size = 64\n epochs = 1000\n x_data = _generate_rbf_data(x_ts, generate_orthogonal_data)\n data_iterator = _make_minibatch_iterator(x_data, batch_size, epochs)\n\n # Estimates precision matrix using minibatch.\n cov_estimator = gaussian_process.LaplaceRandomFeatureCovariance(\n in_features=x_data.shape[-1],\n momentum=0.999,\n ridge_penalty=0,\n ).train()\n\n for minibatch_data in tqdm(data_iterator, total=epochs * num_test_sample // batch_size):\n cov_estimator(minibatch_data)\n\n # Evaluation\n prec_mat_expected = x_data.T @ x_data\n prec_mat_computed = (cov_estimator.precision.numpy() * num_test_sample)\n\n np.testing.assert_allclose(prec_mat_computed, prec_mat_expected, **prec_tolerance)\n\ndef test_random_feature_prior_approximation():\n \"\"\"Tests random feature GP's ability to approximate the exact GP prior.\"\"\"\n random_features = 10240\n rfgp_model = gaussian_process.RandomFeatureGaussianProcess(\n in_features=x_tr.shape[-1],\n out_features=1,\n random_features=random_features,\n normalize_input=False,\n kernel_type='gaussian',\n return_random_features=True,\n use_custom_random_features=False,\n )\n\n # Extract random features\n rfgp_model.train()\n gp_feature = rfgp_model(torch.tensor(x_tr).float())['random_features']\n rfgp_model.eval()\n gp_feature_np = gp_feature.detach().numpy()\n\n prior_kernel_computed = gp_feature_np @ gp_feature_np.T\n prior_kernel_expected = rbf_kern_func(x_tr, x_tr)\n np.testing.assert_allclose(prior_kernel_computed, prior_kernel_expected, **cov_tolerance)\n\ndef test_random_feature_posterior_approximation():\n \"\"\"Tests random feature GP's ability in approximating exact GP posterior.\"\"\"\n # Set momentum = 0.5 so posterior precision matrix is 0.5 * (I + K).\n gp_cov_momentum = 0.5\n gp_cov_ridge_penalty = 1.\n random_features = 1024\n\n rfgp_model = gaussian_process.RandomFeatureGaussianProcess(\n in_features=x_tr.shape[-1],\n out_features=1,\n random_features=random_features,\n normalize_input=False,\n kernel_type='gaussian',\n covariance_momentum=gp_cov_momentum,\n covariance_ridge_penalty=gp_cov_ridge_penalty)\n\n # Computes posterior covariance on test data.\n rfgp_model.train()\n rfgp_model(torch.tensor(x_tr))\n rfgp_model.eval()\n gp_cov_ts = rfgp_model(torch.tensor(x_ts))['covariance']\n\n # Scale up covariance estimate since prec matrix is down-scaled by momentum.\n post_kernel_computed = gp_cov_ts * gp_cov_momentum\n post_kernel_expected = _compute_posterior_kernel(\n x_tr.astype(np.float32), x_ts.astype(np.float32), rbf_kern_func, gp_cov_ridge_penalty\n )\n np.testing.assert_allclose(post_kernel_computed, post_kernel_expected, **cov_tolerance)\n\ndef test_random_feature_linear_kernel():\n \"\"\"Tests if linear kernel indeed leads to an identity mapping.\"\"\"\n # Specify linear kernel\n gp_kernel_type = 'linear'\n normalize_input = False\n scale_random_features = False\n use_custom_random_features = True\n\n rfgp_model = gaussian_process.RandomFeatureGaussianProcess(\n in_features=x_tr.shape[-1],\n out_features=1,\n normalize_input=normalize_input,\n kernel_type=gp_kernel_type,\n scale_random_features=scale_random_features,\n use_custom_random_features=use_custom_random_features,\n return_random_features=True)\n\n gp_feature = rfgp_model.train()(torch.tensor(x_tr))['random_features']\n\n # Check if linear kernel leads to identity mapping.\n np.testing.assert_allclose(gp_feature, x_tr, **prec_tolerance)\n\ndef test_no_matrix_update_during_test():\n \"\"\"Tests that the precision matrix is not updated during testing.\"\"\"\n rfgp_model = gaussian_process.RandomFeatureGaussianProcess(x_tr.shape[-1], 1)\n\n # Training.\n gp_covmat_null = rfgp_model.train()(torch.tensor(x_tr))['covariance']\n precision_mat_before_test = rfgp_model.covariance_layer.precision\n\n # Testing.\n rfgp_model.eval()(torch.tensor(x_ts))\n precision_mat_after_test = rfgp_model.covariance_layer.precision\n\n np.testing.assert_allclose(\n gp_covmat_null, torch.eye(num_train_sample), atol=1e-4)\n np.testing.assert_allclose(\n precision_mat_before_test, precision_mat_after_test, atol=1e-4)\n\ndef test_save_load_gaussian_process(tmp_path):\n name = tmp_path / 'model.bin'\n\n model = gaussian_process.RandomFeatureGaussianProcess(x_tr.shape[-1], 1)\n\n gp_covmat_null = model.train()(torch.tensor(x_tr))['covariance']\n covariance_before = model.eval()(torch.tensor(x_ts))['covariance']\n\n torch.save(model.state_dict(), name)\n model = gaussian_process.RandomFeatureGaussianProcess(x_tr.shape[-1], 1)\n model.load_state_dict(torch.load(name))\n covariance_after = model.eval()(torch.tensor(x_ts))['covariance']\n\n np.testing.assert_allclose(covariance_before.detach().numpy(), covariance_after.detach().numpy())\n\n\ndef tes_mean_field_logits_likelihood():\n \"\"\"Tests if scaling is correct under different likelihood.\"\"\"\n batch_size = 10\n num_classes = 12\n variance = 1.5\n mean_field_factor = 2.\n\n rng = np.random.RandomState(0)\n logits = torch.randn(batch_size, num_classes)\n covmat = torch.diag([variance] * batch_size)\n\n logits_logistic = mean_field_logits(\n logits, covmat, mean_field_factor=mean_field_factor)\n\n np.testing.assert_allclose(logits_logistic, logits / 2., atol=1e-4)\n\ndef test_mean_field_logits_temperature_scaling():\n \"\"\"Tests using mean_field_logits as temperature scaling method.\"\"\"\n batch_size = 10\n num_classes = 12\n\n rng = np.random.RandomState(0)\n logits = torch.tensor(np.random.randn(batch_size, num_classes))\n\n # Test if there's no change to logits when mean_field_factor < 0.\n logits_no_change = mean_field_logits(\n logits, covariance_matrix=None, mean_field_factor=-1)\n\n # Test if mean_field_logits functions as a temperature scaling method when\n # mean_field_factor > 0, with temperature = sqrt(1. + mean_field_factor).\n logits_scale_by_two = mean_field_logits(\n logits, covariance_matrix=None, mean_field_factor=3.)\n\n np.testing.assert_allclose(logits_no_change, logits, atol=1e-4)\n np.testing.assert_allclose(logits_scale_by_two, logits / 2., atol=1e-4)\n" ]
[ [ "numpy.linalg.svd", "numpy.sqrt", "torch.load", "torch.randn", "numpy.eye", "torch.eye", "torch.tensor", "torch.exp", "numpy.random.randn", "numpy.testing.assert_allclose", "torch.diag", "numpy.random.RandomState" ] ]