repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
thetomcraig/HPI
|
[
"5eecd8721dc0cbfc68040106bb7b540b1567dff3"
] |
[
"my/jawbone/__init__.py"
] |
[
"#!/usr/bin/env python3\nfrom typing import Dict, Any, List\nimport json\nfrom functools import lru_cache\nfrom datetime import datetime, date, time, timedelta\nfrom pathlib import Path\nimport logging\nimport pytz\n\nfrom my.config import jawbone as config\n\n\nBDIR = config.export_dir\nPHASES_FILE = BDIR / 'phases.json'\nSLEEPS_FILE = BDIR / 'sleeps.json'\nGRAPHS_DIR = BDIR / 'graphs'\n\n\ndef get_logger():\n return logging.getLogger('jawbone-provider')\n\n\nXID = str # TODO how to shared with backup thing?\n\nPhases = Dict[XID, Any]\n@lru_cache(1)\ndef get_phases() -> Phases:\n return json.loads(PHASES_FILE.read_text())\n\n# TODO use awakenings and quality\nclass SleepEntry:\n def __init__(self, js) -> None:\n self.js = js\n\n # TODO @memoize decorator?\n @property\n def date_(self) -> date:\n return self.sleep_end.date()\n\n def _fromts(self, ts: int) -> datetime:\n return pytz.utc.localize(datetime.utcfromtimestamp(ts)).astimezone(self._tz).astimezone(self._tz)\n @property\n def _tz(self):\n return pytz.timezone(self._details['tz'])\n\n @property\n def title(self) -> str:\n return self.js['title']\n\n @property\n def xid(self) -> XID:\n return self.js['xid']\n\n @property\n def _details(self):\n return self.js['details']\n\n # TODO figure out timezones..\n # not sure how.. I guess by the american ones\n @property\n def created(self) -> datetime:\n return self._fromts(self.js['time_created'])\n\n @property\n def completed(self) -> datetime:\n return self._fromts(self.js['time_completed'])\n\n @property\n def asleep(self) -> datetime:\n return self._fromts(self._details['asleep_time'])\n\n @property\n def sleep_start(self) -> datetime:\n return self.asleep # TODO careful, maybe use same logic as emfit\n\n @property\n def bed_time(self) -> int:\n return int((self.sleep_end - self.sleep_start).total_seconds()) // 60\n\n @property\n def sleep_end(self) -> datetime:\n return self._fromts(self._details['awake_time'])\n\n @property\n def graph(self) -> Path:\n return GRAPHS_DIR / (self.xid + \".png\")\n\n # TODO might be useful to cache these??\n @property\n def phases(self) -> List[datetime]:\n # TODO make sure they are consistent with emfit?\n return [self._fromts(i['time']) for i in get_phases()[self.xid]]\n\n def __str__(self) -> str:\n return f\"{self.date_.strftime('%a %d %b')} {self.title}\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\ndef load_sleeps() -> List[SleepEntry]:\n sleeps = json.loads(SLEEPS_FILE.read_text())\n return [SleepEntry(js) for js in sleeps]\n\n\nimport numpy as np # type: ignore\nimport matplotlib.pyplot as plt # type: ignore\nfrom matplotlib.figure import Figure # type: ignore\nfrom matplotlib.axes import Axes # type: ignore\n\n# pip install imageio\nfrom imageio import imread # type: ignore\n\n\ndef hhmm(time: datetime):\n return time.strftime(\"%H:%M\")\n\n\n# def xpos(time: datetime) -> float:\n# tick = span / width\n# fromstart = time - sleep.created\n# return fromstart / tick\n\nimport matplotlib.dates as mdates # type: ignore\nfrom matplotlib.ticker import MultipleLocator, FixedLocator # type: ignore\n\ndef plot_one(sleep: SleepEntry, fig: Figure, axes: Axes, xlims=None, showtext=True):\n span = sleep.completed - sleep.created\n print(f\"{sleep.xid} span: {span}\")\n\n img = imread(sleep.graph)\n # all of them are 300x300 images apparently\n # span for image\n xspan = [sleep.created, sleep.completed]\n xspan = [mdates.date2num(i) for i in xspan]\n if xlims is None:\n tt = sleep.created\n hour = tt.hour\n # TODO maybe assert that hour is somewhere between 20 and 8 or something\n start: datetime\n starttime = time(23, 00)\n if hour >= 20:\n # went to bed before midnight\n start = datetime.combine(tt.date(), starttime)\n elif hour <= 8:\n # went to bed after midnight\n start = datetime.combine(tt.date() - timedelta(days=1), starttime)\n else:\n print(\"wtf??? weird time for sleep...\")\n # choosing at random\n start = datetime.combine(tt.date(), starttime)\n end = start + timedelta(hours=10)\n xlims = [start, end]\n\n # axes.figure(figsize=(10, 5))\n axes.set_xlim(xlims)\n hhmm_fmt = mdates.DateFormatter('%H:%M')\n axes.xaxis.set_major_formatter(hhmm_fmt)\n ticks = sleep.phases if showtext else []\n axes.xaxis.set_ticks(ticks)\n axes.yaxis.set_ticks([])\n axes.tick_params(\n axis='both',\n which='major',\n length=0,\n labelsize=7,\n rotation=30,\n pad=-14, # err... hacky\n )\n\n ylims = [0, 50]\n axes.set_ylim(ylims)\n\n axes.imshow(\n img,\n zorder=0,\n extent=[\n xspan[0], xspan[1],\n ylims[0], ylims[1],\n ],\n aspect='auto',\n )\n # axes.set_title(str(sleep))\n # axes.title.set_size(10)\n\n if showtext:\n axes.text(xlims[1] - timedelta(hours=1.5), 20, str(sleep),)\n # plt.text(sleep.asleep(), 0, hhmm(sleep.asleep()))\n\nfrom ..common import group_by_key\n\ndef sleeps_by_date() -> Dict[date, SleepEntry]:\n logger = get_logger()\n\n sleeps = load_sleeps()\n sleeps = [s for s in sleeps if s.graph.exists()] # TODO careful..\n res = {}\n for dd, group in group_by_key(sleeps, key=lambda s: s.date_).items():\n if len(group) == 1:\n res[dd] = group[0]\n else:\n # TODO short ones I can ignore I guess. but won't bother now\n logger.error('multiple sleeps on %s: %s', dd, group)\n return res\n\n# sleeps_count = 35 # len(sleeps) # apparently MPL fails at 298 with outofmemory or something\n# start = 40\n# 65 is arount 1 july\n# sleeps = sleeps[start: start + sleeps_count]\n# sleeps = sleeps[:sleeps_count]\n# dt = {k: v for k, v in dt.items() if v is not None}\n\n# TODO not really sure it belongs here...\n# import melatonin\n# dt = melatonin.get_data()\n\ndef predicate(sleep: SleepEntry):\n \"\"\"\n Filter for comparing similar sleep sesssions\n \"\"\"\n start = sleep.created.time()\n end = sleep.completed.time()\n if (time(23, 0) <= start <= time(23, 30)) and (time(5, 30) <= end <= time(6, 30)):\n return True\n return False\n\n\ndef plot():\n # TODO FIXME melatonin data\n melatonin_data = {} # type: ignore[var-annotated]\n\n # TODO ??\n sleeps = list(filter(predicate, load_sleeps()))\n sleeps_count = len(sleeps)\n print(sleeps_count)\n\n fig: Figure = plt.figure(figsize=(15, sleeps_count * 1))\n\n axarr = fig.subplots(nrows=len(sleeps))\n for i, (sleep, axes) in enumerate(zip(sleeps, axarr)):\n plot_one(sleep, fig, axes, showtext=True)\n used = melatonin_data.get(sleep.date_, None)\n sused: str\n color: str\n # used = True if used is None else False # TODO?\n if used is True:\n sused = \"YES\"\n color = 'green'\n elif used is False:\n sused = \"NO\"\n color = 'red'\n else:\n sused = \"??\"\n color = 'white'\n axes.text(axes.get_xlim()[0], 20, sused)\n axes.patch.set_alpha(0.5)\n axes.set_facecolor(color)\n\n\n plt.tight_layout()\n plt.subplots_adjust(hspace=0.0)\n # er... this saves with a different aspect ratio for some reason.\n # tap 'ctrl-s' on mpl plot window to save..\n # plt.savefig('res.png', asp)\n plt.show()\n\nimport pandas as pd # type: ignore\ndef get_dataframe():\n sleeps = sleeps_by_date()\n items = []\n for dd, s in sleeps.items():\n items.append({\n 'date' : dd, # TODO not sure... # TODO would also be great to sync column names...\n 'sleep_start': s.sleep_start,\n 'sleep_end' : s.sleep_end,\n 'bed_time' : s.bed_time,\n })\n # TODO tz is in sleeps json\n res = pd.DataFrame(items)\n return res\n\n\ndef test_tz():\n sleeps = sleeps_by_date()\n for s in sleeps.values():\n assert s.sleep_start.tzinfo is not None\n assert s.sleep_end.tzinfo is not None\n\n for dd, exp in [\n (date(year=2015, month=8 , day=28), time(hour=7, minute=20)),\n (date(year=2015, month=9 , day=15), time(hour=6, minute=10)),\n ]:\n sleep = sleeps[dd]\n end = sleep.sleep_end\n\n assert end.time() == exp\n\n # TODO fuck. on 0909 I woke up at around 6 according to google timeline\n # but according to jawbone, it was on 0910?? eh. I guess it's jus shitty tracking.\n\n\ndef main():\n # TODO eh. vendorize klogging already?\n from kython.klogging import setup_logzero\n setup_logzero(get_logger())\n test_tz()\n # print(get_dataframe())\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.dates.date2num",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
ericchen321/ros_x_habitat
|
[
"f256b62fe8dda059baaf9bad87cf53f7d769f2f9"
] |
[
"src/test/test_habitat_ros/test_habitat_ros_env_node_discrete.py"
] |
[
"import os\nimport shlex\nimport unittest\nfrom subprocess import Popen\n\nimport numpy as np\nimport rostest\n\nfrom mock_habitat_ros_evaluator import MockHabitatROSEvaluator\nfrom src.evaluators.habitat_sim_evaluator import HabitatSimEvaluator\nfrom src.constants.constants import NumericalMetrics, PACKAGE_NAME\nfrom src.test.data.data import TestHabitatROSData\n\n\nclass HabitatROSEnvNodeDiscreteCase(unittest.TestCase):\n r\"\"\"\n Test cases for Habitat agent + Habitat sim through ROS.\n \"\"\"\n\n def setUp(self):\n # define env node pub rate\n self.env_pub_rate = 5.0\n\n # define the env node's name\n self.env_node_under_test_name = \"env_node_under_test\"\n\n def tearDown(self):\n pass\n\n def test_env_node_discrete(self):\n # start the env node\n env_node_args = shlex.split(\n f\"python src/nodes/habitat_env_node.py --node-name {self.env_node_under_test_name} --task-config {TestHabitatROSData.test_acts_and_obs_discrete_task_config} --sensor-pub-rate {self.env_pub_rate}\"\n )\n Popen(env_node_args)\n\n # start the mock agent node\n agent_node_args = shlex.split(\n f\"python src/test/test_habitat_ros/mock_agent_node.py\"\n )\n Popen(agent_node_args)\n\n # init the mock evaluator node\n mock_evaluator = MockHabitatROSEvaluator(\n node_name=\"mock_habitat_ros_evaluator_node\",\n env_node_name=self.env_node_under_test_name,\n agent_node_name=\"mock_agent_node\",\n )\n\n # mock-eval one episode\n dict_of_metrics = mock_evaluator.evaluate(\n str(int(TestHabitatROSData.test_acts_and_obs_discrete_episode_id) - 1),\n TestHabitatROSData.test_acts_and_obs_discrete_scene_id,\n )\n metrics = HabitatSimEvaluator.compute_avg_metrics(dict_of_metrics)\n print(f\"success: {metrics[NumericalMetrics.SUCCESS]}\")\n print(f\"spl: {metrics[NumericalMetrics.SPL]}\")\n assert (\n np.linalg.norm(metrics[NumericalMetrics.SUCCESS] - 1.0) < 1e-5\n and np.linalg.norm(metrics[NumericalMetrics.SPL] - 0.68244) < 1e-5\n )\n\n # shut down nodes\n mock_evaluator.shutdown_agent_node()\n mock_evaluator.shutdown_env_node()\n\n\ndef main():\n rostest.rosrun(\n PACKAGE_NAME,\n \"tests_habitat_ros_env_node_discrete\",\n HabitatROSEnvNodeDiscreteCase,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.linalg.norm"
]
] |
liuweilin17/cleverhans
|
[
"13b248e88c1955ef585e235db8e7ca65bae8b2d5"
] |
[
"examples/imagenet_featadvs/model.py"
] |
[
"import functools\nimport tensorflow as tf\nfrom cleverhans.model import Model\nfrom cleverhans_tutorials.tutorial_models import HeReLuNormalInitializer\n\n\nclass ModelImageNetCNN(Model):\n def __init__(self, scope, nb_classes=1000, **kwargs):\n del kwargs\n Model.__init__(self, scope, nb_classes, locals())\n\n def fprop(self, x, **kwargs):\n del kwargs\n my_conv = functools.partial(tf.layers.conv2d,\n kernel_size=3,\n strides=2,\n padding='valid',\n activation=tf.nn.relu,\n kernel_initializer=HeReLuNormalInitializer)\n my_dense = functools.partial(\n tf.layers.dense, kernel_initializer=HeReLuNormalInitializer)\n\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n for depth in [96, 256, 384, 384, 256]:\n x = my_conv(x, depth)\n y = tf.layers.flatten(x)\n y = my_dense(y, 4096, tf.nn.relu)\n y = fc7 = my_dense(y, 4096, tf.nn.relu)\n y = my_dense(y, 1000)\n return {'fc7': fc7,\n self.O_LOGITS: y,\n self.O_PROBS: tf.nn.softmax(logits=y)}\n\n\ndef make_imagenet_cnn(input_shape=(None, 224, 224, 3)):\n return ModelImageNetCNN('imagenet')\n"
] |
[
[
"tensorflow.variable_scope",
"tensorflow.layers.flatten",
"tensorflow.nn.softmax"
]
] |
XiaojingGeorgeZhang/PythonRobotics
|
[
"2723a0a2eb3c8ccddf5810aa9380f1f9276caabd"
] |
[
"PathTracking/lqr/lqr_tracking.py"
] |
[
"#! /usr/bin/python\n\"\"\"\n\nPath tracking simulation with LQR steering control and PID speed control.\n\nauthor: Atsushi Sakai\n\n\"\"\"\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport unicycle_model\nfrom pycubicspline import pycubicspline\nfrom matplotrecorder import matplotrecorder\nimport scipy.linalg as la\n\nKp = 1.0 # speed proportional gain\n\n# LQR parameter\nQ = np.eye(4)\nR = np.eye(1)\n\nanimation = True\n# animation = False\n\n#matplotrecorder.donothing = True\n\n\ndef PIDControl(target, current):\n a = Kp * (target - current)\n\n return a\n\n\ndef pi_2_pi(angle):\n while (angle > math.pi):\n angle = angle - 2.0 * math.pi\n\n while (angle < -math.pi):\n angle = angle + 2.0 * math.pi\n\n return angle\n\n\ndef solve_DARE(A, B, Q, R):\n \"\"\"\n solve a discrete time_Algebraic Riccati equation (DARE)\n \"\"\"\n X = Q\n maxiter = 150\n eps = 0.01\n\n for i in range(maxiter):\n Xn = A.T * X * A - A.T * X * B * \\\n la.inv(R + B.T * X * B) * B.T * X * A + Q\n if (abs(Xn - X)).max() < eps:\n X = Xn\n break\n X = Xn\n\n return Xn\n\n\ndef dlqr(A, B, Q, R):\n \"\"\"Solve the discrete time lqr controller.\n x[k+1] = A x[k] + B u[k]\n cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]\n # ref Bertsekas, p.151\n \"\"\"\n\n # first, try to solve the ricatti equation\n X = solve_DARE(A, B, Q, R)\n\n # compute the LQR gain\n K = np.matrix(la.inv(B.T * X * B + R) * (B.T * X * A))\n\n eigVals, eigVecs = la.eig(A - B * K)\n\n return K, X, eigVals\n\n\ndef lqr_steering_control(state, cx, cy, cyaw, ck, pe, pth_e):\n ind, e = calc_nearest_index(state, cx, cy, cyaw)\n\n k = ck[ind]\n v = state.v\n th_e = pi_2_pi(state.yaw - cyaw[ind])\n\n A = np.matrix(np.zeros((4, 4)))\n A[0, 0] = 1.0\n A[0, 1] = unicycle_model.dt\n A[1, 2] = v\n A[2, 2] = 1.0\n A[2, 3] = unicycle_model.dt\n # print(A)\n\n B = np.matrix(np.zeros((4, 1)))\n B[3, 0] = v / unicycle_model.L\n\n K, _, _ = dlqr(A, B, Q, R)\n\n x = np.matrix(np.zeros((4, 1)))\n\n x[0, 0] = e\n x[1, 0] = (e - pe)/unicycle_model.dt\n x[2, 0] = th_e\n x[3, 0] = (th_e - pth_e)/unicycle_model.dt\n\n ff = math.atan2(unicycle_model.L * k, 1)\n fb = pi_2_pi((-K * x)[0, 0])\n\n delta = ff + fb\n\n return delta, ind, e, th_e\n\n\ndef calc_nearest_index(state, cx, cy, cyaw):\n dx = [state.x - icx for icx in cx]\n dy = [state.y - icy for icy in cy]\n\n d = [abs(math.sqrt(idx ** 2 + idy ** 2)) for (idx, idy) in zip(dx, dy)]\n\n mind = min(d)\n\n ind = d.index(mind)\n\n dxl = cx[ind] - state.x\n dyl = cy[ind] - state.y\n\n angle = pi_2_pi(cyaw[ind] - math.atan2(dyl, dxl))\n if angle < 0:\n mind *= -1\n\n return ind, mind\n\n\ndef closed_loop_prediction(cx, cy, cyaw, ck, speed_profile, goal):\n T = 500.0 # max simulation time\n goal_dis = 0.3\n stop_speed = 0.05\n\n state = unicycle_model.State(x=-0.0, y=-0.0, yaw=0.0, v=0.0)\n\n time = 0.0\n x = [state.x]\n y = [state.y]\n yaw = [state.yaw]\n v = [state.v]\n t = [0.0]\n target_ind = calc_nearest_index(state, cx, cy, cyaw)\n\n e, e_th = 0.0, 0.0\n\n while T >= time:\n dl, target_ind, e, e_th = lqr_steering_control(state, cx, cy, cyaw, ck, e, e_th)\n\n ai = PIDControl(speed_profile[target_ind], state.v)\n # state = unicycle_model.update(state, ai, di)\n state = unicycle_model.update(state, ai, dl)\n\n if abs(state.v) <= stop_speed:\n target_ind += 1\n\n time = time + unicycle_model.dt\n\n # check goal\n dx = state.x - goal[0]\n dy = state.y - goal[1]\n if math.sqrt(dx ** 2 + dy ** 2) <= goal_dis:\n print(\"Goal\")\n break\n\n x.append(state.x)\n y.append(state.y)\n yaw.append(state.yaw)\n v.append(state.v)\n t.append(time)\n\n if target_ind % 1 == 0 and animation:\n plt.cla()\n plt.plot(cx, cy, \"-r\", label=\"course\")\n plt.plot(x, y, \"ob\", label=\"trajectory\")\n plt.plot(cx[target_ind], cy[target_ind], \"xg\", label=\"target\")\n plt.axis(\"equal\")\n plt.grid(True)\n plt.title(\"speed[km/h]:\" + str(round(state.v * 3.6, 2)) +\n \",target index:\" + str(target_ind))\n plt.pause(0.0001)\n matplotrecorder.save_frame() # save each frame\n\n plt.close()\n return t, x, y, yaw, v\n\n\ndef calc_speed_profile(cx, cy, cyaw, target_speed):\n speed_profile = [target_speed] * len(cx)\n\n direction = 1.0\n\n # Set stop point\n for i in range(len(cx) - 1):\n dyaw = cyaw[i + 1] - cyaw[i]\n switch = math.pi / 4.0 <= dyaw < math.pi / 2.0\n\n if switch:\n direction *= -1\n\n if direction != 1.0:\n speed_profile[i] = - target_speed\n else:\n speed_profile[i] = target_speed\n\n if switch:\n speed_profile[i] = 0.0\n\n speed_profile[-1] = 0.0\n\n # flg, ax = plt.subplots(1)\n # plt.plot(speed_profile, \"-r\")\n # plt.show()\n\n return speed_profile\n\n\ndef main():\n print(\"LQR steering control tracking start!!\")\n ax = [0.0, 6.0, 12.5, 10.0, 7.5, 3.0, -1.0]\n ay = [0.0, -3.0, -5.0, 6.5, 3.0, 5.0, -2.0]\n goal = [ax[-1], ay[-1]]\n\n cx, cy, cyaw, ck, s = pycubicspline.calc_spline_course(ax, ay, ds=0.1)\n target_speed = 10.0 / 3.6\n\n sp = calc_speed_profile(cx, cy, cyaw, target_speed)\n\n t, x, y, yaw, v = closed_loop_prediction(cx, cy, cyaw, ck, sp, goal)\n\n if animation:\n matplotrecorder.save_movie(\"animation.gif\", 0.1) # gif is ok.\n\n flg, _ = plt.subplots(1)\n plt.plot(ax, ay, \"xb\", label=\"input\")\n plt.plot(cx, cy, \"-r\", label=\"spline\")\n plt.plot(x, y, \"-g\", label=\"tracking\")\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel(\"x[m]\")\n plt.ylabel(\"y[m]\")\n plt.legend()\n\n flg, ax = plt.subplots(1)\n plt.plot(s, [math.degrees(iyaw) for iyaw in cyaw], \"-r\", label=\"yaw\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"yaw angle[deg]\")\n\n flg, ax = plt.subplots(1)\n plt.plot(s, ck, \"-r\", label=\"curvature\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"curvature [1/m]\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.eye",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"matplotlib.pyplot.grid",
"scipy.linalg.inv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.pause",
"scipy.linalg.eig",
"matplotlib.pyplot.ylabel"
]
] |
zhangganlin/Improved-PSMNet-for-Deep-Stereo-Disparity-Estimation
|
[
"47fa95a52360ce1e2b507e385fc21ad2567b86ee"
] |
[
"models/dilatedcostfiltering.py"
] |
[
"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nfrom .submodule import *\n\nclass dilated_sub(nn.Module):\n def __init__(self, inplanes):\n super(dilated_sub, self).__init__()\n\n self.conv0 = nn.Sequential(convbn_3d(inplanes, inplanes, 3, 2, 1), \n nn.ReLU(inplace=True)) #39\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes, 3, 1, 1),\n nn.ReLU(inplace=True)) #40\n\n self.conv2_1 = nn.Sequential(convbn_3d(inplanes, inplanes, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2_2 = nn.Sequential(convbn_3d(inplanes, inplanes, kernel_size=3, stride=1, pad=2, dil = 2),\n nn.ReLU(inplace=True))\n\n self.conv2_3 = nn.Sequential(convbn_3d(inplanes, inplanes, kernel_size=3, stride=1, pad=4, dil = 4),\n nn.ReLU(inplace=True))\n\n self.conv3_cat = nn.Sequential(convbn_3d(inplanes*3, inplanes, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n \n self.deconv = nn.Sequential(nn.ConvTranspose3d(inplanes, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm3d(32))\n self.pred_conv = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n def forward(self, x, last):\n out_1 = self.conv0(x)\n out_2 = self.conv3_cat(torch.cat((self.conv2_1(out_1),self.conv2_2(out_1),self.conv2_3(out_1)),1))\n out_1 = self.conv1(out_1)+last\n \n out_2 = self.deconv(out_2)\n out_3 = self.pred_conv(out_2)\n return out_1,out_2,out_3\n\nclass PSMNet(nn.Module):\n def __init__(self, maxdisp, gpu=True, num_groups = 40, concat_channels=12, seg=False):\n super(PSMNet, self).__init__()\n self.maxdisp = maxdisp\n\n self.seg = seg\n\n self.gpu = gpu\n\n self.num_groups = num_groups\n\n self.concat_channels = concat_channels #cost volume channels: num_groups + 2*concat_channels\n\n self.feature_extraction = feature_extraction(self.concat_channels)\n\n self.layer37_38 = nn.Sequential(convbn_3d(self.num_groups + self.concat_channels*2 + self.seg, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.layer39_46 = dilated_sub(32)\n self.layer47_54 = dilated_sub(32)\n self.layer55_62 = dilated_sub(32)\n\n\n self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n\n def forward(self, left, right, seg):\n\n refimg_fea, refimg_fea_gwc = self.feature_extraction(left)\n targetimg_fea, targetimg_fea_gwc = self.feature_extraction(right)\n\n if self.seg:\n seg_downsample = F.interpolate(seg,scale_factor=0.25)\n B, C, H, W = refimg_fea.shape\n volume_seg = refimg_fea.new_zeros([B, 1, self.maxdisp // 4, H, W])\n for i in range(self.maxdisp // 4):\n if i > 0:\n volume_seg[:, :, i, :, i:] = seg_downsample[:, :,:, i:]\n else:\n volume_seg[:, :, i, :, :] = seg_downsample\n volume_seg = volume_seg.contiguous()\n\n\n if self.num_groups == 0:\n concat_volume = build_concat_volume(refimg_fea, targetimg_fea, self.maxdisp // 4)\n if self.seg:\n volume = torch.cat((concat_volume, volume_seg), 1)\n else:\n volume = concat_volume\n else:\n gwc_volume = build_gwc_volume(refimg_fea_gwc, targetimg_fea_gwc, self.maxdisp // 4, self.num_groups)\n concat_volume = build_concat_volume(refimg_fea, targetimg_fea, self.maxdisp // 4)\n if self.seg:\n volume = torch.cat((gwc_volume, concat_volume, volume_seg), 1)\n else:\n volume = torch.cat((gwc_volume, concat_volume), 1)\n\n layer38_out = self.layer37_38(volume)\n B,C,H,W,D = layer38_out.shape\n layer40_out, layer45_out, cost1 = self.layer39_46(layer38_out,layer38_out.new_zeros([B,C,H//2,W//2,D//2]))\n cost1 = cost1+layer38_out\n\n layer48_out, layer53_out, cost2 = self.layer47_54(layer45_out,layer40_out)\n cost2 = cost2+layer38_out\n\n _,_,cost3 = self.layer55_62(layer53_out,layer48_out)\n cost3 = cost3+layer38_out\n\n cost1 = self.classif1(cost1)\n cost2 = self.classif2(cost2) + cost1\n cost3 = self.classif3(cost3) + cost2\n\n if self.training:\n cost1 = F.upsample(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')\n cost2 = F.upsample(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')\n\n cost1 = torch.squeeze(cost1,1)\n pred1 = F.softmax(cost1,dim=1)\n pred1 = disparityregression(self.maxdisp,self.gpu)(pred1)\n\n cost2 = torch.squeeze(cost2,1)\n pred2 = F.softmax(cost2,dim=1)\n pred2 = disparityregression(self.maxdisp,self.gpu)(pred2)\n\n cost3 = F.upsample(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')\n cost3 = torch.squeeze(cost3,1)\n pred3 = F.softmax(cost3,dim=1)\n #For your information: This formulation 'softmax(c)' learned \"similarity\" \n #while 'softmax(-c)' learned 'matching cost' as mentioned in the paper.\n #However, 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.\n pred3 = disparityregression(self.maxdisp,self.gpu)(pred3)\n\n if self.training:\n return pred1, pred2, pred3\n else:\n return pred3\n\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.ConvTranspose3d",
"torch.nn.Conv3d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.squeeze"
]
] |
VandinLab/MASTRO
|
[
"0197aaf49e497d6742a236fd47b1c9a6e4865145"
] |
[
"MASTRO/plot_results.py"
] |
[
"import pandas as pd\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-r\", help=\"input file with results\",default=\"edges_matrix_final.txt\")\nparser.add_argument(\"-minp\", help=\"path of minpvalues\",default=\"minpvals.csv\")\nparser.add_argument(\"-t\", help=\"plot title\",default=\"AML\")\nparser.add_argument(\"-p\", type=int, help=\"type of test (0 = indipendent, 1 = permutation, 2 = topologies)\",default=0)\nargs = parser.parse_args()\n\npval_field = \"pval_ind\"\nif args.p == 1:\n pval_field = \"pval_perm\"\nif args.p == 2:\n pval_field = \"pval_topol\"\n\ndf = pd.read_csv(args.r,sep=\";\")\nprint(df)\nprint(df.columns)\n\ndf_minp = pd.read_csv(args.minp,header=None)\nminpvals = df_minp[0].values\nminpvals.sort()\nprint(minpvals)\nprint(\"numpermutations:\",minpvals.shape[0])\nsign_thr = dict()\nfor quant in [0.01 , 0.05 , 0.1]:\n sign_thr[quant] = minpvals[int(quant*int(minpvals.shape[0]))]\n print(quant,\"quant\",sign_thr[quant])\n\nnum_nodes = []\nfor traj_edges in df[\"edges_traj\"]:\n traj_edges = traj_edges.replace(\"[\",\"\")\n traj_edges = traj_edges.replace(\"]\",\"\")\n edges_ = traj_edges.split(\",\")\n nodes_set = set()\n for edge_ in edges_:\n edge_sep = \"->-\"\n if \"-?-\" in edge_:\n edge_sep = \"-?-\"\n if \"-/-\" in edge_:\n edge_sep = \"-/-\"\n nodes = edge_.split(edge_sep)\n nodes_set.add(nodes[0])\n nodes_set.add(nodes[1])\n if \"g\" in nodes_set:\n nodes_set.remove(\"g\")\n num_nodes.append(len(nodes_set))\ndf[\"numnodes\"] = num_nodes\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rc('legend', fontsize=8)\n#plt.style.use('seaborn-whitegrid')\nplt.yscale('log')\nplt.xscale('log')\n\nfig = plt.figure(figsize=(5,4))\nax1 = fig.add_subplot()\nplt.title(r\"$p$-values and Sign. Thresholds (\"+str(args.t)+\" data)\")\nax1.set_xlabel(r'1/Rank')\nax1.set_ylabel(r'p-value')\nax1.set_xscale('log')\nax1.set_yscale('log')\nmarker_index = 0\nax1.grid(which='both',color=\"0.95\",zorder=0)\nuniform_pval = [1/i for i in range(1,df[pval_field].shape[0]+1)]\nuniform_pval.sort()\nsorted_pval = list(df[pval_field])\nsorted_pval.sort()\nax1.scatter(uniform_pval, sorted_pval, marker=\".\", zorder=5,alpha=1.,edgecolors='none')\nendpoints_ = [uniform_pval[0],uniform_pval[-1]]\nax1.plot(endpoints_,endpoints_,color=\"gray\")\nax1.set_xlim([1.2*uniform_pval[-1],0.9*uniform_pval[0]])\nax1.set_ylim([1.2*uniform_pval[-1],0.5*sorted_pval[0]])\nfor quant in [0.01 , 0.05 , 0.1]:\n plt.axhline(y = sign_thr[quant], color = 'black', linestyle = '-',label=r\"$\\alpha=\"+str(quant)+\"$\")\n\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"unif_vs_pval_\"+args.t+\".pdf\")\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rc('legend', fontsize=8)\n#plt.style.use('seaborn-whitegrid')\nplt.yscale('log')\nplt.xscale('log')\n\nfig = plt.figure(figsize=(5,4))\nax1 = fig.add_subplot()\nplt.title(r\"$p$-values vs. support (\"+str(args.t)+\" data)\")\nax1.set_xlabel(r'Support')\nax1.set_ylabel(r'p-value')\n#ax1.set_xscale('log')\nax1.set_yscale('log')\nmarker_index = 0\nax1.grid(which='both',color=\"0.95\",zorder=0)\nax1.scatter(df[\"traj_supp\"], df[pval_field], zorder=5,alpha=0.2,edgecolors='none')\nax1.set_xlim([1,1+df[\"traj_supp\"].max()])\nax1.set_ylim([df[pval_field].min()*0.25 , 1.0])\n#plt.legend(ncol=2)\nplt.tight_layout()\nplt.savefig(\"supp_vs_pval_\"+args.t+\".pdf\")\n\n\nfig = plt.figure(figsize=(5,4))\nax1 = fig.add_subplot()\nplt.title(r\"$p$-values vs. number of alterations (\"+str(args.t)+\" data)\")\nax1.set_xlabel(r'Number of alterations')\nax1.set_ylabel(r'p-value')\n#ax1.set_xscale('log')\nax1.set_yscale('log')\nmarker_index = 0\nax1.grid(which='both',color=\"0.95\",zorder=0)\nax1.scatter(df[\"numnodes\"], df[pval_field], zorder=5,alpha=0.2,edgecolors='none')\nax1.set_xlim([1,1+df[\"numnodes\"].max()])\nax1.set_ylim([df[pval_field].min()*0.25 , 1.0])\n#plt.legend(ncol=2)\nplt.tight_layout()\nplt.savefig(\"numalterations_vs_pval_\"+args.t+\".pdf\")\n\n\nfig = plt.figure(figsize=(5,4))\nax1 = fig.add_subplot()\nplt.title(r\"Support vs. number of alterations (\"+str(args.t)+\" data)\")\nax1.set_xlabel(r'Number of alterations')\nax1.set_ylabel(r'Support')\n#ax1.set_xscale('log')\nax1.set_yscale('log')\nmarker_index = 0\nax1.grid(which='both',color=\"0.95\",zorder=0)\nax1.scatter(df[\"numnodes\"], df[\"traj_supp\"],zorder=5,alpha=0.2,edgecolors='none')\nax1.set_xlim([1,1+df[\"numnodes\"].max()])\nax1.set_ylim([1,df[\"traj_supp\"].max()+1])\n#plt.legend(ncol=2)\nplt.tight_layout()\nplt.savefig(\"numalterations_vs_freq_\"+args.t+\".pdf\")\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xscale",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
] |
ThoughtWorksInc/tf-image-intepreter
|
[
"113fc808a081984c8be4814bc3403b908bb6b2c6"
] |
[
"image_interpreter/layers/rpn_data.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom image_interpreter.layers.common import AnchorTargetMixin\n\n\nclass RpnData(AnchorTargetMixin):\n def __init__(self, debug=False):\n super().__init__(debug=debug)\n\n def generate(self, image, scale, bboxes):\n shape = tf.shape(image)\n # TODO: NotImplementedError: Negative start indices are not currently supported\n # height, width = shape[-2:]\n # height, width = shape[-2:]\n height = shape[1]\n width = shape[2]\n\n if self._debug:\n height = tf.Print(height, [height], message='image height: ')\n width = tf.Print(width, [width], message='image width: ')\n\n anchors = self._generate_valid_anchors(width, height)\n overlaps = self._calculate_overlaps(tf.cast(anchors, dtype=tf.float32), tf.cast(bboxes, dtype=tf.float32))\n\n labels = self._generate_labels(overlaps)\n\n labels = self._subsample_positive(labels)\n labels = self._subsample_negative(labels)\n\n return labels\n\n def _generate_labels(self, overlaps):\n labels = tf.Variable(tf.ones(shape=(tf.shape(overlaps)[0],), dtype=tf.float32) * -1, trainable=False,\n validate_shape=False)\n gt_max_overlaps = tf.arg_max(overlaps, dimension=0)\n anchor_max_overlaps = tf.arg_max(overlaps, dimension=1)\n mask = tf.one_hot(anchor_max_overlaps, tf.shape(overlaps)[1], on_value=True, off_value=False)\n max_overlaps = tf.boolean_mask(overlaps, mask)\n if self._debug:\n max_overlaps = tf.Print(max_overlaps, [max_overlaps])\n labels = tf.scatter_update(labels, gt_max_overlaps, tf.ones((tf.shape(gt_max_overlaps)[0],)))\n # TODO: extract config object\n over_threshold_mask = tf.reshape(tf.where(max_overlaps > 0.5), (-1,))\n if self._debug:\n over_threshold_mask = tf.Print(over_threshold_mask, [over_threshold_mask], message='over threshold index : ')\n labels = tf.scatter_update(labels, over_threshold_mask, tf.ones((tf.shape(over_threshold_mask)[0],)))\n # TODO: support clobber positive in the origin implement\n below_threshold_mask = tf.reshape(tf.where(max_overlaps < 0.3), (-1,))\n if self._debug:\n below_threshold_mask = tf.Print(below_threshold_mask, [below_threshold_mask], message='below threshold index : ')\n labels = tf.scatter_update(labels, below_threshold_mask, tf.zeros((tf.shape(below_threshold_mask)[0],)))\n return labels\n\n def _generate_valid_anchors(self, width, height):\n shifts = self._generate_shifts(width, height)\n all_anchors = self._generate_all_anchors(shifts)\n anchors = self._filter_inside_anchors(all_anchors, height, width)\n return anchors\n\n def _filter_inside_anchors(self, all_anchors, height, width):\n # filter anchors\n inds_inside = tf.where(\n (all_anchors[:, 0] > 0) &\n (all_anchors[:, 1] > 0) &\n (all_anchors[:, 2] < width) &\n (all_anchors[:, 3] < height)\n )\n if self._debug:\n inds_inside = tf.Print(inds_inside, [tf.shape(inds_inside)], message='inside anchors: ')\n anchors = tf.gather(all_anchors, inds_inside)\n return anchors\n\n def _subsample_positive(self, labels):\n # TODO: not implemented\n return labels\n\n def _subsample_negative(self, labels):\n # TODO: not implemented\n return labels\n\n\nif __name__ == '__main__':\n with tf.Session() as sess:\n rpn_data = RpnData(debug=True)\n test_image = tf.reshape(tf.constant(np.ones((600, 400))), (1, 600, 400, 1))\n fake_bboxes = tf.constant([\n [10, 10, 150, 150],\n [70, 10, 150, 50],\n [10, 70, 50, 150],\n [150, 10, 70, 50],\n [10, 150, 50, 70],\n [10, 10, 390, 590],\n ], dtype=tf.int32)\n test_overlaps = rpn_data.generate(test_image, None, bboxes=fake_bboxes)\n sess.run(tf.initialize_all_variables())\n print_op = tf.Print(test_overlaps, [tf.shape(test_overlaps), tf.where(test_overlaps > 0), test_overlaps],\n summarize=10)\n sess.run(print_op)\n"
] |
[
[
"tensorflow.boolean_mask",
"tensorflow.constant",
"tensorflow.Print",
"tensorflow.arg_max",
"tensorflow.shape",
"tensorflow.cast",
"numpy.ones",
"tensorflow.gather",
"tensorflow.initialize_all_variables",
"tensorflow.where",
"tensorflow.Session"
]
] |
gafusion/omas
|
[
"6740fd040d6af59e0aec54f977637b221733bd07"
] |
[
"omas/examples/omas_resample.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nInterpolate whole ODS\n=====================\nSeamless handling of coordinates within OMAS makes it easy to reinterpolate a whole ODS on a new grid\n\"\"\"\n\nimport numpy\nfrom omas import *\n\n# original ODS\nods = ODS()\nods.sample_equilibrium()\n\n# interpolated ODS\nods_interpolated = ODS()\n\n# define new psi grid of ods_interpolated\nnew_psi = numpy.linspace(ods['equilibrium.time_slice.0.profiles_1d.psi'][0], ods['equilibrium.time_slice.0.profiles_1d.psi'][-1], 21)\nods_interpolated['equilibrium.time_slice.0.profiles_1d.psi'] = new_psi\n\n# interpolate whole ods on new psi grid\nwith omas_environment(ods_interpolated, coordsio=ods):\n ods_interpolated.update(ods)\n\n# print some quantity from interpolated ods\nassert len(ods_interpolated['equilibrium.time_slice.0.profiles_1d.pressure']) == 21\nprint(ods_interpolated['equilibrium.time_slice.0.profiles_1d.pressure'])\n"
] |
[
[
"numpy.linspace"
]
] |
kashif/neon
|
[
"d4d8ed498ee826b67f5fda1746d2d65c8ce613d2"
] |
[
"neon/params/val_init.py"
] |
[
"# ----------------------------------------------------------------------------\n# Copyright 2014 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\"\"\"\nClasses used to construct and initialize the values of parameter Tensors.\n\"\"\"\n\nimport logging\nimport math\nimport numpy as np\n\nfrom neon.util.param import opt_param\nfrom neon.util.persist import YAMLable\n\nlogger = logging.getLogger(__name__)\n\n\ndef gen_weights(backend, is_local, distribution, *myargs, **kwargs):\n if backend.is_dist:\n kwargs['ptype'] = 'replica' if is_local else 'vfragment'\n return getattr(backend, distribution)(*myargs, **kwargs)\n\n\nclass ValGen(YAMLable):\n \"\"\"\n Base class used to generate new Tensors initialized in a specific manner.\n \"\"\"\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_param(self, ['backend'])\n opt_param(self, ['is_local'], False)\n\n def __str__(self):\n return (\"{cl_nm} utilizing {be_nm} backend\".format(\n cl_nm=self.__class__.__name__,\n be_nm=self.backend.__class__.__name__))\n\n def initialize(self, backend):\n \"\"\"\n Perform any additional setup (like attaching the backend), required\n prior to generating values.\n \"\"\"\n self.backend = backend\n\n def log_weight_creation(self, shape):\n if hasattr(self, 'is_local'):\n if not self.is_local and self.backend.is_dist:\n shape = (shape[0] * self.backend.num_dev, shape[1])\n logger.info(\"Generating {cl_nm} values of shape {shape}\".format(\n cl_nm=self.__class__.__name__, shape=shape))\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n raise NotImplementedError('This class should not be instantiated.')\n\n\nclass UniformValGen(ValGen):\n \"\"\"\n Uniform random value initialization scheme. All values are chosen to lie\n in the range [low, high) with equal probability.\n\n Arguments:\n low (float, optional): Minimal sample value. Defaults to 0.0\n high (float, optional): Maximal sample value (open-ended range).\n Defaults to 1.0\n \"\"\"\n def __init__(self, **kwargs):\n super(UniformValGen, self).__init__(**kwargs)\n opt_param(self, ['low'], 0.0)\n opt_param(self, ['high'], 1.0)\n\n def __str__(self):\n return (super(UniformValGen, self).__str__() +\n \"\\n\\tlow: {self.low}, high: {self.high}\".format(self=self))\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n self.log_weight_creation(shape)\n return gen_weights(self.backend, self.is_local, 'uniform',\n low=self.low, high=self.high, size=shape,\n dtype=dtype)\n\n\nclass AutoUniformValGen(UniformValGen):\n \"\"\"\n Uniform random value initialization scheme with low and high automatically\n inferred from the dimensions of the shape being passed. Typically this\n will be uniform between +/- 1/sqrt(fan-in). If relu is passed in and true\n we further scale these values by sqrt(2).\n\n Arguments:\n relu (bool, optional): Producing values for ReLU activated weights,\n further scale values by sqrt(2). Defaults to\n False\n \"\"\"\n def __init__(self, **kwargs):\n super(AutoUniformValGen, self).__init__(**kwargs)\n opt_param(self, ['relu'], False)\n\n self.low = float('nan')\n self.high = float('nan')\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n if self.is_local:\n self.low = - 1.0 / math.sqrt(shape[0])\n else:\n self.low = - 1.0 / math.sqrt(shape[-1])\n if self.relu:\n self.low *= math.sqrt(2) ** self.relu\n self.high = - self.low\n return super(AutoUniformValGen, self).generate(shape, dtype)\n\n\nclass GaussianValGen(ValGen):\n \"\"\"\n Gaussian (aka Normal) distributed random value initialization scheme.\n\n Arguments:\n loc (float, optional): Central value location. Defaults to 0.0\n scale (float, optional): Standard deviation for samples. Defaults to\n 1.0\n \"\"\"\n def __init__(self, **kwargs):\n super(GaussianValGen, self).__init__(**kwargs)\n opt_param(self, ['loc'], 0.0)\n opt_param(self, ['scale'], 1.0)\n\n def __str__(self):\n return (super(GaussianValGen, self).__str__() +\n \"\\n\\tloc: {self.loc}, scale: {self.scale}\".format(self=self))\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n self.log_weight_creation(shape)\n return gen_weights(self.backend, self.is_local, 'normal',\n loc=self.loc, scale=self.scale, size=shape,\n dtype=dtype)\n\n\n# alias NormalValGen as GaussianValGen\nNormalValGen = GaussianValGen\n\n\nclass SparseEigenValGen(ValGen):\n \"\"\"\n Sparse Eigenvalue based initialization scheme suitable for recurrent neural\n networks, as described in Sutskever2013.\n\n Arguments:\n sparseness (int, optional): controls number of non-zero entries.\n Should set to a value between 1 (extremely\n sparse) and fan-in count (dense). Defaults\n to 15.\n eigenvalue (float, optional): For square matrices, we scale by this\n value after dividing by the maximum\n eigenvalue. Defaults to 1.2\n \"\"\"\n def __init__(self, **kwargs):\n super(SparseEigenValGen, self).__init__(**kwargs)\n opt_param(self, ['sparseness'], 15)\n opt_param(self, ['eigenvalue'], 1.2)\n\n def __str__(self):\n return (super(SparseEigenValGen, self).__str__() +\n \"\\n\\tsparseness: {self.sparseness}, eigenvalue: \"\n \"{self.eigenvalue}\".format(self=self))\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n self.log_weight_creation(shape)\n if len(shape) < 2:\n raise ValueError(\"Can only generate Tensors with at least 2\"\n \" dimensions, you gave: {}\".format(len(shape)))\n elements = shape[-2] * shape[-1]\n nonzeros = shape[-2] * self.sparseness\n weights = np.zeros(shape).flatten()\n nonzeroindex = np.random.permutation(elements)[0:nonzeros]\n weights[nonzeroindex] = 0.3 * np.random.randn(nonzeros)\n weights = weights.reshape(shape)\n if shape[-2] == shape[-1]:\n temp = np.linalg.eig(weights)\n max_eig = np.max(np.absolute(temp[0]))\n logger.info(\"dividing by max eigenvalue %2.2f\", max_eig)\n weights = self.eigenvalue * weights / max_eig\n else:\n logger.info(\"matrix is non-square, no eigenvalue scaling.\")\n return self.backend.array(weights)\n\n\nclass NodeNormalizedValGen(ValGen):\n \"\"\"\n Normalized initialization as described in Glorot2010. Values are uniform\n distributed to lie in the range:\n scale * [ - sqrt(6) / sqrt(sum(shape)), sqrt(6) / sqrt(sum(shape)))\n\n Arguments:\n scale (float, optional): Additional scalar to multiply by to extend the\n range. Defaults to 1.0.\n \"\"\"\n def __init__(self, **kwargs):\n super(NodeNormalizedValGen, self).__init__(**kwargs)\n opt_param(self, ['scale'], 1.0)\n\n def __str__(self):\n return (super(NodeNormalizedValGen, self).__str__() +\n \"\\n\\tscale: {self.scale}\".format(self=self))\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n self.log_weight_creation(shape)\n\n denom = sum(shape)\n if self.backend.is_dist and not self.is_local:\n denom = shape[1] + shape[0] * self.backend.num_dev\n node_norm = self.scale * math.sqrt(6.0 / denom)\n\n return gen_weights(self.backend, self.is_local, 'uniform',\n low=-node_norm, high=node_norm, size=shape,\n dtype=dtype)\n\n\nclass OrthoNormalizedValGen(ValGen):\n \"\"\"\n Orthogonal matrix initialization. As described in Saxe et al.\n (http://arxiv.org/abs/1312.6120)\n \"\"\"\n def __init__(self, **kwargs):\n super(OrthoNormalizedValGen, self).__init__(**kwargs)\n opt_param(self, ['relu'], False)\n\n def generate(self, shape, dtype=None):\n self.log_weight_creation(shape)\n if len(shape) != 2:\n raise ValueError(\"Can only generate Tensors with exactly 2\"\n \" dimensions, you gave: {}\".format(len(shape)))\n\n # Non local, shape is O x I, local shape is (C x R x S) x O\n oi_shape = shape if self.is_local else (shape[1], shape[0])\n init_wts = np.random.normal(0.0, 1.0, oi_shape)\n u, _, v = np.linalg.svd(init_wts, full_matrices=False)\n q = u if u.shape == oi_shape else v\n q.shape = oi_shape\n if self.relu:\n q *= math.sqrt(2)\n return self.backend.array(q, dtype)\n\n\nclass IdentityValGen(ValGen):\n \"\"\"\n Identity matrix initialization. As described in Saxe et al.\n (http://arxiv.org/pdf/1504.00941v2.pdf)\n \"\"\"\n def __init__(self, **kwargs):\n super(IdentityValGen, self).__init__(**kwargs)\n opt_param(self, ['scale'], 1.0)\n\n def generate(self, shape, dtype=None):\n \"\"\"\n Construct and initialize a new Tensor object of the specified shape.\n\n Arguments:\n shape (list of ints): The size of each dimension of the Tensor.\n dtype (dtype, optional): Element data type. If not specifed we use\n the default dtype associated with that\n backend.\n\n Returns:\n neon.backends.Tensor: newly initialized data structure.\n \"\"\"\n logger.info(\"Generating {cl_nm} values of shape {shape}\".format(\n cl_nm=self.__class__.__name__, shape=shape))\n if len(shape) != 2:\n raise ValueError(\"Can only generate Tensors with exactly 2\"\n \" dimensions, you gave: {}\".format(len(shape)))\n return self.backend.array(self.scale * np.eye(*shape), dtype)\n"
] |
[
[
"numpy.linalg.svd",
"numpy.absolute",
"numpy.linalg.eig",
"numpy.eye",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.random.randn",
"numpy.zeros"
]
] |
sutianjin/MachineLearn
|
[
"1597516f56f1320b7d024ed1ad41c3ad3ee9eb7c"
] |
[
"tensorflow/day1/fetch-op.py"
] |
[
"import tensorflow as tf\n\ninput1 = tf.constant(3.0)\ninput2 = tf.constant(2.0)\ninput3 = tf.constant(5.0)\nintermed = tf.add(input2, input3)\nmul = tf.multiply(input1, intermed)\n\nwith tf.Session() as sess:\n\tresult = sess.run([mul, intermed])\n\tprint(result)"
] |
[
[
"tensorflow.add",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.Session"
]
] |
watronfire/monterey
|
[
"d1fea8e8a98dd39a6658fa7e5b2dd6a0fc3ec5e0"
] |
[
"workflow/scripts/shuffle_tips.py"
] |
[
"import pandas as pd\nfrom dendropy import Tree\nfrom epiweeks import Week\nfrom subprocess import run\n\ndef shuffle_tips( tree, metadata, id_col, date_col, map_file, output_loc ):\n \n # Get all the tips in tree\n gotree_output = run( f\"gotree labels -i {tree}\", capture_output=True, text=True, shell=True )\n tip_labels = gotree_output.stdout.split( \"\\n\" )\n\n md = pd.read_csv( metadata, usecols=[id_col, date_col], parse_dates=[date_col] )\n md = md.loc[md[id_col].isin( tip_labels )]\n assert md.shape[0] > 0, f\"{id_col} column of metadata doesn't contain any tree tips\"\n\n md[\"week\"] = md[date_col].apply( lambda x: Week.fromdate(x).startdate() )\n\n def shuffle_columns( entry, column ):\n entry[\"shuffled\"] = entry[column].sample( frac=1, replace=False ).to_list()\n return entry\n\n md_shuffled = md.groupby( \"week\" ).apply( shuffle_columns, column=id_col )\n\n assert md_shuffled.shape[0] == md.shape[0], f\"Shuffled dataframe doesn't have the same number of rows (shuffled: {md_shuffled.shape[0]} vs. original: {md.shape[0]})\"\n assert not md_shuffled[\"shuffled\"].equals( md_shuffled[id_col] ), f\"Shuffled column is identical to original column\"\n\n md_shuffled[[id_col, \"shuffled\"]].to_csv( map_file, index=False, sep=\"\\t\", header=False )\n\n run( f\"gotree rename -m {map_file} -i {tree} -o {output_loc}\", shell=True)\n\nshuffle_tips( snakemake.input.tree,\n snakemake.input.metadata,\n snakemake.params.id_col,\n snakemake.params.date_col,\n snakemake.output.map_file,\n snakemake.output.shuffled_tree )\n\n"
] |
[
[
"pandas.read_csv"
]
] |
dolphin-zs/HierST
|
[
"34a5e2c6729b849ba7534408e3d04aa336621aa1"
] |
[
"src/run_ensemble.py"
] |
[
"import argparse\nimport os\nimport torch\nimport time\nimport numpy as np\nimport pandas as pd\nimport shutil\n\nfrom data_utils import g_node_col, g_date_col, process_cdc_truth_from_csse, process_cdc_loc, get_all_cdc_label, read_cdc_forecast\nfrom base_task import load_json_from\n\n\n# exp_dir_template = '../Exp_us_{}_{}' # level, forecast_date\ncdc_forecast_dir = '../covid19-forecast-hub/data-processed'\n\n\ndef load_exp_res(exp_dir, extra_columns=None):\n task_dirs = os.listdir(exp_dir)\n\n test_results = []\n for task_dir in task_dirs:\n task_items = task_dir.split('_')\n target, horizon, model, seed = task_items[:4]\n horizon = int(horizon)\n if len(task_items) == 4:\n seed = int(seed.lstrip('seed'))\n else:\n seed = '_'.join([seed.lstrip('seed')] + task_items[4:])\n\n if model == 'gbm':\n gbm_out = pd.read_csv(os.path.join(exp_dir, task_dir, 'test_out.csv'), parse_dates=[g_date_col])\n test_res = gbm_out[[g_date_col, g_node_col, 'pred', 'label']].fillna(0)\n else:\n try:\n nn_out = torch.load(os.path.join(exp_dir, task_dir, 'Output/test.out.cpt'))\n except:\n print(f'Warning: {os.path.join(exp_dir, task_dir)} is an incomplete task directory! ...skip...')\n continue\n if 'y_scale' in nn_out and nn_out['y_scale'] == 'linear':\n log_scale = False\n else:\n log_scale = True\n nn_pred = nn_out['pred'].reset_index(drop=False)\n nn_pred['pred'] = np.expm1(nn_pred['val']) if log_scale else nn_pred['val']\n nn_pred[g_date_col] = nn_out['dates']\n nn_pred[g_node_col] = nn_out['countries'] if 'countries' in nn_out else nn_out['nodes']\n nn_label = nn_out['label'].reset_index(drop=False)\n nn_label['label'] = np.expm1(nn_label['val']) if log_scale else nn_label['val']\n nn_label[g_date_col] = nn_out['dates']\n nn_label[g_node_col] = nn_out['countries'] if 'countries' in nn_out else nn_out['nodes']\n test_res = pd.merge(nn_pred, nn_label, on=[g_date_col, g_node_col])[[g_date_col, g_node_col, 'pred', 'label']]\n\n if extra_columns is not None:\n cfg = load_json_from(os.path.join(exp_dir, task_dir, 'config.json'))\n for extra_col in extra_columns:\n if extra_col == 'best_epoch':\n test_res[extra_col] = nn_out['epoch']\n else:\n test_res[extra_col] = cfg[extra_col]\n\n test_res['target'] = target\n test_res['horizon'] = horizon\n test_res['model'] = model\n test_res['seed'] = seed\n test_results.append(test_res)\n\n exp_res = pd.concat(test_results, axis=0).sort_values(['target', 'horizon', 'model', 'seed', g_node_col]).reset_index(drop=True)\n\n return exp_res\n\n\ndef merge_cdc_loc(raw_pred):\n # ensure the order\n raw_pred = raw_pred.sort_values([g_date_col, g_node_col, 'target', 'horizon'])\n # align g_node_col with cdc location\n locs = process_cdc_loc()\n node2loc = dict(zip(locs[g_node_col], locs['location']))\n raw_pred['location'] = raw_pred[g_node_col].map(lambda x: node2loc.get(x, pd.NA))\n\n return raw_pred\n\n\ndef merge_last_cum_truth(raw_pred, forecast_date, cdc_cum_truth=None):\n if 'location' not in raw_pred.columns:\n raw_pred = merge_cdc_loc(raw_pred)\n if cdc_cum_truth is None:\n cdc_confirmed_cum_truth = process_cdc_truth_from_csse('confirmed', stat_type='cum')\n cdc_deaths_cum_truth = process_cdc_truth_from_csse('deaths', stat_type='cum')\n cdc_confirmed_cum_truth['target'] = 'confirmed'\n cdc_deaths_cum_truth['target'] = 'deaths'\n cdc_cum_truth = pd.concat([cdc_confirmed_cum_truth, cdc_deaths_cum_truth], axis=0, ignore_index=True)\n\n # merge cdc cumulative info into forecasting results\n last_date = pd.to_datetime(forecast_date) + pd.Timedelta(-1, unit='day')\n last_cum_truth = cdc_cum_truth[cdc_cum_truth['date'] == last_date]\n raw_pred = pd.merge(raw_pred, last_cum_truth[['location', 'target', 'value']].rename(columns={'value': 'cum_sum'}),\n on=['location', 'target'], how='left')\n\n # remove useless nodes that do not have a cdc location\n # TODO: do this when training our models\n useless_nodes = raw_pred[raw_pred['location'].isnull()][g_node_col].unique()\n if useless_nodes.size > 0:\n print(f'# useless nodes in our models {useless_nodes.size}, ...removed...')\n raw_pred = raw_pred.dropna(subset=['location', 'cum_sum']).reset_index(drop=True)\n\n return raw_pred\n\n\ndef transform_to_cdc_format(raw_pred, forecast_date):\n if 'cum_sum' not in raw_pred.columns:\n raise Exception('You should run merge_last_cum_truth before this function')\n # raw_pred = merge_last_cum_truth(raw_pred, forecast_date)\n\n # transform into CDC formats\n target2tag = {\n 'confirmed': 'case',\n 'deaths': 'death',\n }\n cdc_results = []\n for target in ['confirmed', 'deaths']:\n tag = target2tag[target]\n for n_week in [1, 2, 3, 4]:\n horizon = n_week * 7\n for stat_type in ['inc', 'cum']:\n cdc_target = f'{n_week} wk ahead {stat_type} {tag}'\n cdc_target_end_date = pd.to_datetime(forecast_date) + pd.Timedelta(horizon-1, unit='day')\n # print(cdc_target)\n cdc_res = raw_pred[(raw_pred['target'] == target) & (raw_pred['horizon'] == horizon)].reset_index(drop=True).copy()\n if stat_type == 'inc':\n if n_week == 1:\n cdc_res['value'] = cdc_res['pred']\n else:\n cdc_res['value'] = cdc_res['pred'] - raw_pred[(raw_pred['target'] == target) & (raw_pred['horizon'] == horizon-7)].reset_index(drop=True)['pred']\n else:\n cdc_res['value'] = cdc_res['cum_sum'] + cdc_res['pred']\n cdc_res = cdc_res.rename(columns={g_date_col: 'forecast_date', 'target': 'model_target'})\n cdc_res['target'] = cdc_target\n cdc_res['target_end_date'] = cdc_target_end_date\n cdc_res['type'] = 'point'\n cdc_res['quantile'] = pd.NA\n cdc_results.append(cdc_res[[\n 'forecast_date', 'target', 'target_end_date', 'location', 'type', 'quantile', 'value'\n ]])\n all_cdc_res = pd.concat(cdc_results, axis=0, ignore_index=True)\n\n return all_cdc_res\n\n\ndef eval_cdc_pred(model_pred_pairs, all_cdc_label, forecast_date,\n dropna=False, pre_loc_set=None,\n level='county', n_week=1, stat_type='inc', value_type='case'):\n target = f'{n_week} wk ahead {stat_type} {value_type}'\n target_end_date = pd.to_datetime(forecast_date) + pd.Timedelta(n_week*7-1, unit='day')\n locs = process_cdc_loc()\n if pre_loc_set is None:\n if level == 'us':\n loc_set = set(locs['location'][:1])\n elif level == 'state':\n loc_set = set(locs['location'][1:58])\n elif level == 'county':\n loc_set = set(locs['location'][58:])\n else:\n loc_set = set(locs['location'])\n else:\n loc_set = pre_loc_set\n\n eval_res = all_cdc_label[(all_cdc_label['target'] == target) &\n (all_cdc_label['target_end_date'] == target_end_date) &\n all_cdc_label['location'].isin(loc_set)].copy()\n\n # location includes the cdc code, while g_node_col presents the node name\n eval_res = pd.merge(eval_res, locs[['location', g_node_col]], on='location', how='left')[[\n 'forecast_date', 'target', 'target_end_date', 'location', g_node_col, 'label'\n ]]\n\n value_cols = ['label']\n mae_cols = []\n mape_cols = []\n for model_name, pred_df in model_pred_pairs:\n pred_col = f'Pred_{model_name}'\n mae_col = f'MAE_{model_name}'\n mape_col = f'MAPE_{model_name}'\n value_cols.append(pred_col)\n mae_cols.append(mae_col)\n mape_cols.append(mape_col)\n\n eval_res = pd.merge(eval_res,\n pred_df[pred_df['type'] == 'point'][['target', 'target_end_date', 'location', 'value']].\\\n rename(columns={'value': pred_col}),\n on=['target', 'target_end_date', 'location'], how='left')\n eval_res[mae_col] = np.abs(eval_res['label'] - eval_res[pred_col])\n eval_res[mape_col] = np.abs(eval_res['label'] - eval_res[pred_col]) / (eval_res['label'] + 1)\n non_pred_node_num = eval_res[pred_col].isnull().sum()\n if non_pred_node_num > 3:\n print(f'{model_name} drops {non_pred_node_num} nodes, such as')\n print(eval_res[eval_res[pred_col].isnull()][g_node_col].unique()[:10])\n\n if dropna:\n eval_res = eval_res.dropna(axis=0)\n\n print('-' * 30)\n print(f'forecast date: {forecast_date}')\n print(f'{level}-level {target} {target_end_date}')\n print('Label & Forecasting')\n print(eval_res[value_cols].mean().sort_values())\n print('MAE (Sorted)')\n print(eval_res[mae_cols].mean().sort_values())\n print('MAPE (Sorted)')\n print(eval_res[mape_cols].mean().sort_values())\n\n return eval_res\n\n\ndef get_model_seed_sort_by_mae(exp_res, cdc_label, forecast_date, level='county', n_week=1, stat_type='inc', value_type='case'):\n assert 'location' in exp_res.columns and 'cum_sum' in exp_res.columns\n exp_res = exp_res.set_index(['model', 'seed']).sort_index()\n model_pred_pairs = []\n for model, seed in exp_res.index.unique():\n model_tag = f'{model}~{seed}'\n cur_cdc_res = transform_to_cdc_format(exp_res.loc[(model, seed)], forecast_date)\n model_pred_pairs.append((model_tag, cur_cdc_res))\n eval_res = eval_cdc_pred(model_pred_pairs, cdc_label, forecast_date,\n level=level, n_week=n_week, stat_type=stat_type, value_type=value_type)\n mae_cols = [col for col in eval_res.columns if col.startswith('MAE_')]\n sorted_model_seed_pairs = []\n for mae_col in eval_res[mae_cols].mean().sort_values().index:\n model, seed = mae_col[4:].split('~')\n if '_' in seed:\n sorted_model_seed_pairs.append((model, seed))\n else:\n sorted_model_seed_pairs.append((model, int(seed)))\n\n return sorted_model_seed_pairs, eval_res\n\n\ndef load_all_cdc_forecasts(min_date='2020-09-01'):\n cdc_model_pred_pairs = []\n for sub_dir in os.listdir(cdc_forecast_dir):\n sub_dir_path = os.path.join(cdc_forecast_dir, sub_dir)\n if os.path.isdir(sub_dir_path):\n model = sub_dir\n print(time.asctime(), f'Load forecast results for {model}')\n preds = []\n for idx, csv_fn in enumerate(os.listdir(sub_dir_path)):\n if not csv_fn.endswith('.csv'):\n continue\n\n csv_date = '-'.join(csv_fn.split('-')[:3])\n if pd.to_datetime(csv_date) < pd.to_datetime(min_date):\n continue\n\n csv_fp = os.path.join(sub_dir_path, csv_fn)\n print(f'--[{idx}] Load {csv_fp}')\n pred = read_cdc_forecast(csv_fp)\n preds.append(pred)\n\n if len(preds) > 0:\n all_pred = pd.concat(preds, axis=0, ignore_index=True)\n cdc_model_pred_pairs.append((model, all_pred))\n else:\n print('--Skip because no csv')\n\n return cdc_model_pred_pairs\n\n\ndef extract_cdc_forecast(raw_pred, level, target, target_end_date):\n raw_pred = raw_pred[(raw_pred['target'] == target) & (raw_pred['target_end_date'] == pd.to_datetime(target_end_date))]\n if level == 'county':\n raw_pred = raw_pred[raw_pred['location'].map(lambda x: len(x) == 5)]\n else:\n raw_pred = raw_pred[raw_pred['location'].map(lambda x: len(x) == 2)]\n\n return raw_pred\n\n\ndef filter_valid_cdc_forecasts(model_pred_pairs, forecast_date, min_loc_num, fixed_model_set=None, dup_keep='first', level='county', n_week=1, stat_type='inc', value_type='case'):\n target = f'{n_week} wk ahead {stat_type} {value_type}'\n target_end_date = pd.to_datetime(forecast_date) + pd.Timedelta(n_week*7-1, unit='day')\n print('-'*30)\n print(f'{level}-level {target} {target_end_date}')\n\n valid_model_pred_pairs = []\n for model, raw_pred in model_pred_pairs:\n if fixed_model_set is not None and model not in fixed_model_set:\n continue\n raw_valid_pred = extract_cdc_forecast(raw_pred, level, target, target_end_date)\n valid_pred = raw_valid_pred.drop_duplicates(subset=['location', 'target', 'type', 'quantile', 'target_end_date'],\n keep=dup_keep, ignore_index=True)\n if valid_pred.shape[0] > 0 and valid_pred['location'].unique().size > min_loc_num:\n print(f'Add {model} as a candidate [{len(valid_model_pred_pairs)}] (drop {raw_valid_pred.shape[0]-valid_pred.shape[0]} duplicated forecasts)')\n valid_model_pred_pairs.append((model, valid_pred))\n\n return valid_model_pred_pairs\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Ensemble Generation')\n parser.add_argument('--forecast_date', type=str, required=True)\n parser.add_argument('--level', type=str, default='all')\n parser.add_argument('--exp_dir_template', type=str, default='../Exp_us_{}')\n parser.add_argument('--output_dir', type=str, default='../outputs')\n parser.add_argument('--ens_num', type=int, default=50)\n # the following arguments are for evaluation purposes only\n # we will produce ensemble outputs for all kinds of targets\n parser.add_argument('--n_week', type=int, default=1)\n parser.add_argument('--stat_type', type=str, default='inc')\n parser.add_argument('--value_type', type=str, default='case')\n parser.add_argument('--use_last_ens', action='store_true')\n parser.add_argument('--do_eval', action='store_true')\n\n args = parser.parse_args()\n forecast_date = args.forecast_date\n level = args.level # state or county\n exp_dir_template = args.exp_dir_template\n output_dir = args.output_dir\n n_week = args.n_week # 1, 2, 3, 4\n stat_type = args.stat_type # inc or cum\n value_type = args.value_type # case or death\n ens_num = args.ens_num # how many models to be included into the ensemble one\n\n exp_dir = exp_dir_template.format(forecast_date)\n forecast_dt = pd.to_datetime(forecast_date)\n next_dt = forecast_dt + pd.Timedelta(days=1)\n next_date = next_dt.strftime('%Y-%m-%d')\n\n print(time.asctime(), f'Load experimental results from {exp_dir}')\n exp_res = load_exp_res(exp_dir)\n print(time.asctime(), 'Merge last cumulative truths')\n exp_res = merge_last_cum_truth(exp_res, forecast_date)\n sel_exp_res = exp_res\n\n # transform to the cdc format per model and seed\n sel_exp_res = sel_exp_res.set_index(['model', 'seed']).sort_index()\n cdc_res_list = []\n for (m, s) in sel_exp_res.index.unique():\n cur_cdc_res = transform_to_cdc_format(sel_exp_res.loc[(m, s)], forecast_date)\n cur_cdc_res['model'] = m\n cur_cdc_res['seed'] = s\n cdc_res_list.append(cur_cdc_res)\n cdc_res = pd.concat(cdc_res_list, axis=0)\n cdc_res.loc[cdc_res['value'] < 0, 'value'] = 0\n cdc_res = cdc_res.dropna(subset=['value'])\n\n # point estimation\n point_cdc_res = cdc_res.groupby([\n 'forecast_date', 'target', 'target_end_date', 'location', 'type',\n ])[['value']].mean().reset_index(drop=False)\n\n # quantile estimation\n fine_quantile_list = [\n 0.010, 0.025, 0.050, 0.100, 0.150, 0.200, 0.250, 0.300, 0.350, 0.400, 0.450, 0.500,\n 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900, 0.950, 0.975, 0.990\n ]\n coarse_quantile_list = [0.025, 0.100, 0.250, 0.500, 0.750, 0.900, 0.975]\n # only calculate quantiles for state and country\n quant_cdc_res = cdc_res[cdc_res.location.map(lambda x: len(x) == 2)]\n quant_cdc_res.loc[quant_cdc_res.index, 'type'] = 'quantile'\n # for 'inc case', cdc requires coarse-grained quantiles\n coarse_cond = quant_cdc_res.target.map(lambda x: x.endswith(' inc case'))\n coarse_quant_cdc_res = quant_cdc_res[coarse_cond]\n fine_quant_cdc_res = quant_cdc_res[~coarse_cond]\n fine_quant_cdc_res = fine_quant_cdc_res.groupby([\n 'forecast_date', 'target', 'target_end_date', 'location', 'type'\n ])['value'].quantile(fine_quantile_list).reset_index(drop=False).rename(columns={'level_5': 'quantile'})\n coarse_quant_cdc_res = coarse_quant_cdc_res.groupby([\n 'forecast_date', 'target', 'target_end_date', 'location', 'type'\n ])['value'].quantile(coarse_quantile_list).reset_index(drop=False).rename(columns={'level_5': 'quantile'})\n\n os.makedirs(output_dir, exist_ok=True)\n cdc_ens_pred = pd.concat([fine_quant_cdc_res, coarse_quant_cdc_res, point_cdc_res], axis=0)\\\n .sort_values(['target', 'target_end_date', 'location', 'type', 'quantile'])\n # change date to be compatible with cdc\n out_fp = os.path.join(output_dir, f'{forecast_date}_forecasts.csv')\n print(time.asctime(), 'Dump cdc results to', out_fp)\n # cdc does not allow 'cum case' for all targets and only allows 'inc case' for county-level forecasts\n cdc_ens_pred[\n cdc_ens_pred.apply(\n lambda x:\n (not x['target'].endswith('cum case')) and\n (len(x['location']) == 2 or x['target'].endswith('inc case')) and\n (x['location'] != '02063') and (x['location'] != '02066'),\n axis=1\n )\n ].groupby(\n ['forecast_date', 'target', 'target_end_date', 'location', 'type', 'quantile',],\n dropna=False\n ).mean().reset_index(drop=False).to_csv(out_fp, index=False)"
] |
[
[
"pandas.concat",
"pandas.to_datetime",
"pandas.merge",
"numpy.abs",
"pandas.Timedelta",
"numpy.expm1"
]
] |
mohitsudhakar/visual-dialog-experiments
|
[
"77cc65938b0ce99fc52b839b7821f29c7a6b32a0"
] |
[
"visdialch/decoders/decoder.py"
] |
[
"import torch\nfrom torch import nn\nimport numpy as np\n# from gensim.test.utils import common_texts\n# from gensim.models import Word2Vec\nfrom visdialch.utils import DynamicRNN\nfrom visdialch.utils.util import get_pretrained_weights\n\nclass DiscriminativeDecoder(nn.Module):\n def __init__(self, config, vocab):\n super().__init__()\n self.config = config \n # Getting pretrained weights\n weights = get_pretrained_weights(vocab)\n self.word_embed = nn.Embedding.from_pretrained(weights)\n\n # Discriminative decoder generates scores for each option\n self.option_rnn = nn.LSTM(config[\"word_embedding_size\"], config[\"lstm_hidden_size\"], config[\"lstm_num_layers\"], \n batch_first=True, dropout=config[\"dropout\"])\n\n self.option_rnn = DynamicRNN(self.option_rnn)\n\n def forward(self, encoder_output, batch):\n # batch_size x num_rounds x num_options x max_seq_length\n opt = batch[\"opt\"]\n batch_size, num_rounds, num_options, max_seq_length = opt.shape\n # (batch_size*num_rounds*num_options) x max_seq_length\n opt = opt.reshape(-1, opt.shape[-1])\n # (batch_size*num_rounds*num_options) x max_seq_length x embedding_size\n\n # Initally finding the word encoding for the options\n opt_emb = self.word_embed(opt)\n\n # Running the Decoder rnn on opt_emb to remove max_seq_len dimension\n # (batch_size*num_rounds*num_options) x lstm_hidden_size\n _, (opt_emb, _) = self.option_rnn(opt_emb, batch[\"opt_len\"])\n\n # Reshape the encoder output to match with options embedding\n # batch_size x num_rounds x num_options x lstm_hidden_size\n encoder_output = encoder_output.unsqueeze(2).repeat(1,1, num_options, 1)\n\n # Get the product opt_emb & encoder_output\n # (batch_size*num_rounds*num_options) x lstm_hidden_size\n encoder_output = encoder_output.reshape(-1, self.config[\"lstm_hidden_size\"])\n output_scores = torch.sum(opt_emb * encoder_output, 1)\n\n # Finally reshaping to seperate score for each options\n output_scores = output_scores.reshape(batch_size, num_rounds, num_options)\n return output_scores\n"
] |
[
[
"torch.sum",
"torch.nn.Embedding.from_pretrained",
"torch.nn.LSTM"
]
] |
Worm4047/TVR
|
[
"2a8ce2edbdc0966aef3b84c28872267039f01700"
] |
[
"baselines/excl/config.py"
] |
[
"import os\nimport time\nimport torch\nimport argparse\n\nfrom utils.basic_utils import mkdirp, load_json, save_json, make_zipfile\nfrom baselines.clip_alignment_with_language.local_utils.proposal import ProposalConfigs\n\n\nclass BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log.txt\"\n\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n self.initialized = False\n self.opt = None\n\n def initialize(self):\n self.initialized = True\n self.parser.add_argument(\"--dset_name\", type=str, choices=[\"tvr\"])\n self.parser.add_argument(\"--eval_split_name\", type=str, default=\"val\",\n help=\"should match keys in video_duration_idx_path, must set for VCMR\")\n self.parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"debug (fast) mode, break all loops, do not load all data into memory.\")\n self.parser.add_argument(\"--data_ratio\", type=float, default=1.0,\n help=\"how many training and eval data to use. 1.0: use all, 0.1: use 10%.\"\n \"Use small portion for debug purposes. Note this is different from --debug, \"\n \"which works by breaking the loops, typically they are not used together.\")\n self.parser.add_argument(\"--results_root\", type=str, default=\"results\")\n self.parser.add_argument(\"--exp_id\", type=str, default=None, help=\"id of this run, required at training\")\n self.parser.add_argument(\"--seed\", type=int, default=2018, help=\"random seed\")\n self.parser.add_argument(\"--device\", type=int, default=0, help=\"0 cuda, -1 cpu\")\n self.parser.add_argument(\"--device_ids\", type=int, nargs=\"+\", default=[0], help=\"GPU ids to run the job\")\n self.parser.add_argument(\"--num_workers\", type=int, default=8,\n help=\"num subprocesses used to load the data, 0: use main process\")\n self.parser.add_argument(\"--no_core_driver\", action=\"store_true\",\n help=\"hdf5 driver, default use `core` (load into RAM), if specified, use `None`\")\n self.parser.add_argument(\"--no_pin_memory\", action=\"store_true\",\n help=\"Don't use pin_memory=True for dataloader. \"\n \"ref: https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/4\")\n\n # training config\n self.parser.add_argument(\"--lr\", type=float, default=1e-3, help=\"learning rate\")\n self.parser.add_argument(\"--lr_warmup_proportion\", type=float, default=0.01,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n self.parser.add_argument(\"--wd\", type=float, default=0.01, help=\"weight decay\")\n self.parser.add_argument(\"--n_epoch\", type=int, default=30, help=\"number of epochs to run\")\n self.parser.add_argument(\"--max_es_cnt\", type=int, default=10,\n help=\"number of epochs to early stop, use -1 to disable early stop\")\n self.parser.add_argument(\"--stop_task\", type=str, default=\"SVMR\", choices=[\"VCMR\", \"SVMR\", \"VR\"])\n self.parser.add_argument(\"--eval_tasks_at_training\", type=str, nargs=\"+\",\n default=[\"SVMR\"], choices=[\"VCMR\", \"SVMR\", \"VR\"],\n help=\"evaluate and report numbers for tasks specified here.\")\n self.parser.add_argument(\"--bsz\", type=int, default=128, help=\"mini-batch size\")\n self.parser.add_argument(\"--eval_query_bsz\", type=int, default=50,\n help=\"mini-batch size at inference, for query\")\n self.parser.add_argument(\"--eval_context_bsz\", type=int, default=200,\n help=\"mini-batch size at inference, for video/sub\")\n self.parser.add_argument(\"--eval_untrained\", action=\"store_true\", help=\"Evaluate on un-trained model\")\n self.parser.add_argument(\"--grad_clip\", type=float, default=-1, help=\"perform gradient clip, -1: disable\")\n self.parser.add_argument(\"--margin\", type=float, default=0.1, help=\"margin for hinge loss\")\n self.parser.add_argument(\"--lw_neg_q\", type=float, default=1,\n help=\"weight for ranking loss with negative query and positive context\")\n self.parser.add_argument(\"--lw_neg_ctx\", type=float, default=1,\n help=\"weight for ranking loss with positive query and negative context\")\n self.parser.add_argument(\"--lw_st_ed\", type=float, default=0.01, help=\"weight for st ed prediction loss\")\n self.parser.add_argument(\"--train_span_start_epoch\", type=int, default=0,\n help=\"which epoch to start training span prediction, -1 to disable\")\n self.parser.add_argument(\"--ranking_loss_type\", type=str, default=\"hinge\", choices=[\"hinge\", \"lse\"],\n help=\"att loss type, can be hinge loss or its smooth approximation LogSumExp\")\n self.parser.add_argument(\"--hard_negtiave_start_epoch\", type=int, default=20,\n help=\"which epoch to start hard negative sampling for video-level ranking loss,\"\n \"use -1 to disable\")\n self.parser.add_argument(\"--hard_pool_size\", type=int, default=20,\n help=\"hard negatives are still sampled, but from a harder pool.\")\n\n # Model and Data config\n self.parser.add_argument(\"--max_sub_l\", type=int, default=50,\n help=\"max length of all sub sentence 97.71 under 50 for 3 sentences\")\n self.parser.add_argument(\"--max_desc_l\", type=int, default=30, help=\"max length of descriptions\")\n self.parser.add_argument(\"--max_ctx_l\", type=int, default=100,\n help=\"max number of snippets, 100 for tvr clip_length=1.5, oly 109/21825 > 100\")\n\n self.parser.add_argument(\"--train_path\", type=str, default=None)\n self.parser.add_argument(\"--eval_path\", type=str, default=None,\n help=\"Evaluating during training, for Dev set. If None, will only do training, \"\n \"anet_cap and charades_sta has no dev set, so None\")\n self.parser.add_argument(\"--use_glove\", action=\"store_true\", help=\"Use GloVe instead of BERT features\")\n self.parser.add_argument(\"--word2idx_path\", type=str,\n help=\"a dict, {word: word_idx, ...}, \"\n \"special tokens are {<pad>: 0, <unk>: 1, <eos>: 2}\")\n self.parser.add_argument(\"--vocab_size\", type=int, default=-1,\n help=\"Set automatically to len(word2idx)\")\n self.parser.add_argument(\"--glove_path\", type=str,\n help=\"path to file containing the GloVe embeddings for words in word2idx\")\n self.parser.add_argument(\"--desc_bert_path\", type=str, default=None)\n self.parser.add_argument(\"--sub_bert_path\", type=str, default=None)\n self.parser.add_argument(\"--sub_feat_size\", type=int, default=768, help=\"feature dim for sub feature\")\n self.parser.add_argument(\"--q_feat_size\", type=int, default=768, help=\"feature dim for sub feature\")\n self.parser.add_argument(\"--ctx_mode\", type=str, choices=[\"video\", \"sub\", \"video_sub\", \"tef\",\n \"video_tef\", \"sub_tef\", \"video_sub_tef\"],\n help=\"which context to use. a combination of [video, sub, tef]\")\n self.parser.add_argument(\"--video_duration_idx_path\", type=str, default=None)\n self.parser.add_argument(\"--vid_feat_path\", type=str, default=\"\")\n self.parser.add_argument(\"--no_norm_vfeat\", action=\"store_true\",\n help=\"Do not do normalization on video feat, use it when using i3d_resnet concat feat\")\n self.parser.add_argument(\"--no_norm_tfeat\", action=\"store_true\", help=\"Do not do normalization on text feat\")\n self.parser.add_argument(\"--clip_length\", type=float, default=None,\n help=\"each video will be uniformly segmented into small clips, \"\n \"will automatically loaded from ProposalConfigs if None\")\n self.parser.add_argument(\"--vid_feat_size\", type=int, help=\"feature dim for video feature\")\n\n self.parser.add_argument(\"--external_inference_vr_res_path\", type=str, default=None,\n help=\"if set, use external video retrieval results to guide evaluation. \")\n self.parser.add_argument(\"--span_predictor_type\", type=str, default=\"conv\", choices=[\"conv\", \"cat_linear\"],\n help=\"how to generate span predictions, \"\n \"conv: apply 1D-Conv layer on top of NxL dot product of query and clips\"\n \"cat_linear: cat the query and clips then use a linear layer to give output. \"\n \"Note cat_linear is implemented as first project query and clips into scores, \"\n \"separately, then sum them up, this should be similar to first cat then project.\")\n self.parser.add_argument(\"--encoder_type\", type=str, default=\"transformer\",\n choices=[\"gru\", \"lstm\", \"transformer\", \"cnn\"])\n self.parser.add_argument(\"--add_pe_rnn\", action=\"store_true\",\n help=\"Add positional encoding for GRU and LSTM encoder as well\")\n self.parser.add_argument(\"--no_merge_two_stream\", action=\"store_true\", help=\"do not merge video and subtitles\")\n self.parser.add_argument(\"--no_cross_att\", action=\"store_true\",\n help=\"Use cross-attention for modeling video and subtitles\")\n self.parser.add_argument(\"--no_self_att\", action=\"store_true\", help=\"do not use self attention\")\n self.parser.add_argument(\"--no_modular\", action=\"store_true\", help=\"do not use modular attention\")\n self.parser.add_argument(\"--pe_type\", type=str, default=\"cosine\", choices=[\"none\", \"linear\", \"cosine\"],\n help=\"Only for query encoding\")\n self.parser.add_argument(\"--max_position_embeddings\", type=int, default=300)\n self.parser.add_argument(\"--hidden_size\", type=int, default=128)\n self.parser.add_argument(\"--n_heads\", type=int, default=4)\n self.parser.add_argument(\"--input_drop\", type=float, default=0.1, help=\"Applied to all inputs\")\n self.parser.add_argument(\"--drop\", type=float, default=0.1, help=\"Applied to all other layers\")\n self.parser.add_argument(\"--cross_att_drop\", type=float, default=0.1, help=\"Applied to cross-att\")\n self.parser.add_argument(\"--conv_kernel_size\", type=int, default=5)\n self.parser.add_argument(\"--conv_stride\", type=int, default=1)\n self.parser.add_argument(\"--initializer_range\", type=float, default=0.02,\n help=\"initializer range for linear layer\")\n\n # post processing\n self.parser.add_argument(\"--min_pred_l\", type=int, default=2,\n help=\"constrain the [st, ed] with ed - st >= 2\"\n \"(2 clips with length 1.5 each, 3 secs in total\"\n \"this is the min length for proposal-based method)\")\n self.parser.add_argument(\"--max_pred_l\", type=int, default=16,\n help=\"constrain the [st, ed] pairs with ed - st <= 16, 24 secs in total\"\n \"(16 clips with length 1.5 each, \"\n \"this is the max length for proposal-based method)\")\n self.parser.add_argument(\"--q2c_alpha\", type=float, default=20,\n help=\"give more importance to top scored videos' spans, \"\n \"the new score will be: s_new = exp(alpha * s), \"\n \"higher alpha indicates more importance. Note s in [-1, 1]\")\n\n self.parser.add_argument(\"--max_before_nms\", type=int, default=200)\n self.parser.add_argument(\"--max_vcmr_video\", type=int, default=100,\n help=\"re-ranking in top-max_vcmr_video\")\n self.parser.add_argument(\"--nms_thd\", type=float, default=-1,\n help=\"additionally use non-maximum suppression \"\n \"(or non-minimum suppression for distance)\"\n \"to post-processing the predictions. \"\n \"-1: do not use nms. 0.6 for charades_sta, 0.5 for anet_cap,\")\n\n def display_save(self, opt):\n args = vars(opt)\n # Display settings\n print(\"------------ Options -------------\\n{}\\n-------------------\"\n .format({str(k): str(v) for k, v in sorted(args.items())}))\n\n # Save settings\n if not isinstance(self, TestOptions):\n option_file_path = os.path.join(opt.results_dir, self.saved_option_filename) # not yaml file indeed\n save_json(args, option_file_path, save_pretty=True)\n\n def parse(self):\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n\n if opt.debug:\n opt.results_root = os.path.sep.join(opt.results_root.split(os.path.sep)[:-1] + [\"debug_results\", ])\n opt.no_core_driver = True\n opt.num_workers = 0\n opt.eval_query_bsz = 100\n\n if isinstance(self, TestOptions):\n # modify model_dir to absolute path\n opt.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"results\", opt.model_dir)\n saved_options = load_json(os.path.join(opt.model_dir, self.saved_option_filename))\n for arg in saved_options: # use saved options to overwrite all BaseOptions args.\n if arg not in [\"results_root\", \"num_workers\", \"nms_thd\", \"debug\", \"eval_split_name\",\n \"eval_path\", \"max_pred_l\", \"min_pred_l\"]:\n setattr(opt, arg, saved_options[arg])\n # opt.no_core_driver = True\n else:\n if opt.exp_id is None:\n raise ValueError(\"--exp_id is required for at a training option!\")\n\n if opt.clip_length is None:\n opt.clip_length = ProposalConfigs[opt.dset_name][\"clip_length\"]\n print(\"Loaded clip_length {} from proposal config file\".format(opt.clip_length))\n opt.results_dir = os.path.join(opt.results_root,\n \"-\".join([opt.dset_name, opt.ctx_mode, opt.exp_id,\n time.strftime(\"%Y_%m_%d_%H_%M_%S\")]))\n mkdirp(opt.results_dir)\n # save a copy of current code\n code_dir = os.path.dirname(os.path.realpath(__file__))\n code_zip_filename = os.path.join(opt.results_dir, \"code.zip\")\n make_zipfile(code_dir, code_zip_filename,\n enclosing_dir=\"code\",\n exclude_dirs_substring=\"results\",\n exclude_dirs=[\"results\", \"debug_results\", \"__pycache__\"],\n exclude_extensions=[\".pyc\", \".ipynb\", \".swap\"],)\n\n self.display_save(opt)\n\n if \"sub\" in opt.ctx_mode:\n assert opt.dset_name == \"tvr\", \"sub is only supported for tvr dataset\"\n\n if opt.hard_negtiave_start_epoch != -1:\n if opt.hard_pool_size > opt.bsz:\n print(\"[WARNING] hard_pool_size is larger than bsz\")\n\n assert opt.stop_task in opt.eval_tasks_at_training\n opt.ckpt_filepath = os.path.join(opt.results_dir, self.ckpt_filename)\n opt.train_log_filepath = os.path.join(opt.results_dir, self.train_log_filename)\n opt.eval_log_filepath = os.path.join(opt.results_dir, self.eval_log_filename)\n opt.tensorboard_log_dir = os.path.join(opt.results_dir, self.tensorboard_log_dir)\n opt.device = torch.device(\"cuda:%d\" % opt.device_ids[0] if opt.device >= 0 else \"cpu\")\n opt.h5driver = None if opt.no_core_driver else \"core\"\n # num_workers > 1 will only work with \"core\" mode, i.e., memory-mapped hdf5\n opt.num_workers = 1 if opt.no_core_driver else opt.num_workers\n opt.pin_memory = not opt.no_pin_memory\n\n if \"video\" in opt.ctx_mode and opt.vid_feat_size > 3000: # 3072, the normalized concatenation of resnet+i3d\n assert opt.no_norm_vfeat\n\n if \"tef\" in opt.ctx_mode and \"video\" in opt.ctx_mode:\n opt.vid_feat_size += 2\n if \"tef\" in opt.ctx_mode and \"sub\" in opt.ctx_mode:\n opt.sub_feat_size += 2\n\n if \"video\" not in opt.ctx_mode or \"sub\" not in opt.ctx_mode:\n opt.no_merge_two_stream = True\n opt.no_cross_att = True\n\n self.opt = opt\n return opt\n\n\nclass TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n def initialize(self):\n BaseOptions.initialize(self)\n # also need to specify --eval_split_name\n self.parser.add_argument(\"--eval_id\", type=str, help=\"evaluation id\")\n self.parser.add_argument(\"--model_dir\", type=str,\n help=\"dir contains the model file, will be converted to absolute path afterwards\")\n self.parser.add_argument(\"--tasks\", type=str, nargs=\"+\",\n choices=[\"VCMR\", \"SVMR\", \"VR\"], default=[\"VCMR\", \"SVMR\", \"VR\"],\n help=\"Which tasks to run.\"\n \"VCMR: Video Corpus Moment Retrieval;\"\n \"SVMR: Single Video Moment Retrieval;\"\n \"VR: regular Video Retrieval. (will be performed automatically with VCMR)\")\n"
] |
[
[
"torch.device"
]
] |
20centcroak/pyDocxReport
|
[
"5184357241e765c12082c56e2ef1805e36e78f37"
] |
[
"tests/unit/test_databridge.py"
] |
[
"import unittest\nfrom pandas import DataFrame\nfrom yaml import safe_load\n\nfrom pyDocxReport import DataBridge\n\n\nclass TestDataBridge(unittest.TestCase):\n\n def test_bridge(self):\n\n d = {'col1': [1, 2], 'col2': [3, 4]}\n df1 = DataFrame(data=d)\n\n bridge = DataBridge('tests/unit/resources/template.docx')\n\n with open('tests/unit/resources/matchs.yml', 'r') as file:\n matchs = safe_load(file.read())\n\n matchs['_keyword2_'] = {\n 'type': 'table',\n 'replacement': df1,\n 'header': False\n }\n\n bridge.match(matchs)\n bridge.save('output/output.docx')\n"
] |
[
[
"pandas.DataFrame"
]
] |
wangleon/stella
|
[
"3942f8e687065bb96760140596747cbbe6dad04b"
] |
[
"stellarlab/kinetics/orbit.py"
] |
[
"import math\nimport numpy as np\nfrom astropy.coordinates import SkyCoord\n\nfrom ..constant import ALPHA_NGP, DELTA_NGP, L_NCP, AU, tropical_year\n\ndef parse_pairwise(arg):\n \"\"\"Parse value with error\"\"\"\n if (isinstance(arg, list) or isinstance(arg, tuple)) and \\\n len(arg)==2:\n return arg\n else:\n raise ValueError\n\ndef parse_value_err(arg):\n if isinstance(arg, float) or isinstance(arg, int):\n return arg, None\n elif (isinstance(arg, list) or isinstance(arg, tuple)) and \\\n len(arg)==2:\n return arg\n else:\n raise ValueError\n\ndef compute_UVW(**kwargs):\n # ra, dec, rv, parallax, pm_ra, pm_dec,\n # rv_err=None, parallax_err=None, pm_ra_err=None, pm_dec_err=None,\n # hand='left'):\n \"\"\"Compute Galactic velocity components (*U*, *V*, *W*).\n\n Args:\n ra (float): Right Ascension in degree at epoch J2000.0.\n dec (float): Declination in degree at epoch J2000.0.\n eqcoord (:py:class:`astropy.coordinates.SkyCoord`): Sky coordinate of\n object. Either (`ra`, `dec`) or `eqcoord` is necessary\n pm (list or tuple): Proper motion in mas/yr. Either (`pm_RA`, `pm_Dec`)\n or ((`pm_RA`, `pm_RA_err`), (`pm_Dec`, `pm_Dec_err`))\n rv (float, list or tuple): Radial velocity in km/s. Either `rv` as a\n float or (`rv`, `rv_err`)\n parallax (float, list, or tuple): Parallax in mas. Either `parallax` as\n a float or (`parallax`, `parallax_err`).\n U_plus (str): Positive direction (towards Galactic center or\n anti-center) of *U* componenet. Default is `center`.\n [`center`\\ \\|\\ `anticenter`]\n \n Returns:\n UVW: *tuple*\n (*U*, *V*, *W*) velocities or ((*U*, *U_err*), (*V*, *V_err*),\n (*W*, *W_err*)) if all the uncertainties to parallax, proper motion\n and radial velocity are given.\n\n Notes:\n .. |kms| replace:: km s\\ :sup:`โ1`\n Calculate the Galactic space velocity components (*U*, *V*, *W*) using\n the formula given by `Johnson & Soderblom 1987\n <http://adsabs.harvard.edu/abs/1987AJ.....93..864J>`_.\n The coordinate, parallax, and proper motion are required. The resulting\n velocities are relative to the Sun. The positive direction of *U* is\n defined as towards Galactic center in right-handed system, and towards\n Galactic anticenter in left-handed system. To correct to the local\n standard of rest (LSR), the solar motion (*U*:sub:`LSR`, *V*:sub:`LSR`,\n *W*:sub:`LSR`) are needed, e.g. (9.6 ยฑ 3.9, 14.6 ยฑ 5.0, 9.3 ยฑ 1.0) |kms|\n (`Reid et al. 2014\n <http://adsabs.harvard.edu/abs/2014ApJ...783..130R>`_).\n \n\n Examples\n ---------\n Calculate (*U*, *V*, *W*) velocities relative to the sun for HD 9562 (HIP\n 7276). The heliocentric radial velocity is โ13.3 |kms| (`Bensby et al. 2003\n <http://adsabs.harvard.edu/abs/2003A&A...410..527B>`_, Table 2).\n\n .. code-block:: python\n\n from stella.catalog.find_catalog import find_HIP\n from stella.kinetics.orbit import compute_UVW\n\n hip = 7276\n item = find_HIP(hip)\n u, v, w = compute_UVW(ra=item['RAdeg'], dec=item['DEdeg'], parallax=item['Plx'],\n rv=-13.3, pm=(item['pmRA'], item['pmDE']))\n print('%+6.2f %+6.2f %+6.2f'%(u, v, w))\n # output: -8.86 -26.35 +12.39\n\n References\n -----------\n * `Bensby et al., 2003, A&A, 410, 527 <http://adsabs.harvard.edu/abs/2003A&A...410..527B>`_\n * `Johnson & Soderblom, 1987, AJ, 93, 864 <http://adsabs.harvard.edu/abs/1987AJ.....93..864J>`_\n * `Reid et al. 2014, ApJ, 783, 130 <http://adsabs.harvard.edu/abs/2014ApJ...783..130R>`_\n\n \"\"\"\n\n sin = math.sin\n cos = math.cos\n pi = math.pi\n\n alpha = ALPHA_NGP/180.*pi\n delta = DELTA_NGP/180.*pi\n theta = L_NCP/180.*pi\n\n # parse RA and Dec\n if 'eqcoord' in kwargs:\n eqcoord = kwargs.pop('eqcoord')\n if isinstance(eqcoord, SkyCoord):\n icrs = eqcoord.icrs\n ra = icrs.ra.degree\n dec = icrs.dec.degree\n else:\n ra, dec = parse_pairwise(eqcoord)\n elif 'ra' in kwargs and 'dec' in kwargs:\n ra = kwargs.pop('ra')\n dec = kwargs.pop('dec')\n else:\n raise ValueError\n\n ra = ra/180.*pi\n dec = dec/180.*pi\n\n # parse RV\n if 'rv' in kwargs:\n rv, rv_err = parse_value_err(kwargs.pop('rv'))\n\n # parse distance\n if 'distance' in kwargs:\n d, d_err = parse_value_err(kwargs.pop('distance'))\n elif 'parallax' in kwargs:\n para, para_err = parse_value_err(kwargs.pop('parallax'))\n d = 1000./para\n if para_err is None:\n d_err = None\n else:\n d_err = d*para_err/para\n else:\n raise ValueError\n\n # parse proper motion\n if 'pm' in kwargs:\n input_pm_ra, input_pm_dec = parse_pairwise(kwargs.pop('pm'))\n pm_ra, pm_ra_err = parse_value_err(input_pm_ra)\n pm_dec, pm_dec_err = parse_value_err(input_pm_dec)\n pm_ra *= 1e-3\n pm_dec *= 1e-3\n if pm_ra_err is not None:\n pm_ra_err *= 1e-3\n if pm_dec_err is not None:\n pm_dec_err *= 1e-3\n else:\n raise ValueError\n\n T1 = np.mat([[ cos(theta), sin(theta), 0],\n [ sin(theta), -cos(theta), 0],\n [ 0, 0, 1]])\n T2 = np.mat([[-sin(delta), 0, cos(delta)],\n [ 0, -1, 0],\n [ cos(delta), 0, +sin(delta)]])\n T3 = np.mat([[ cos(alpha), sin(alpha), 0],\n [ sin(alpha), -cos(alpha), 0],\n [ 0, 0, 1]])\n\n T = T1*T2*T3\n\n U_plus = kwargs.pop('U_plus', 'center')\n if U_plus == 'center':\n pass\n elif U_plus == 'anticenter':\n T[0][:] = -T[0][:]\n else:\n raise ValueError\n\n\n A1 = np.mat([[ cos(ra), sin(ra), 0],\n [ sin(ra), -cos(ra), 0],\n [ 0, 0, -1]])\n A2 = np.mat([[ cos(dec), 0, -sin(dec)],\n [ 0, -1, 0],\n [-sin(dec), 0, -cos(dec)]])\n A = A1*A2\n\n B = T*A\n\n k = AU*1e-3/tropical_year/86400 # 1 AU/year in unit of km/s\n\n x = np.mat([[rv],\n [k*pm_ra*d],\n [k*pm_dec*d]])\n\n U, V, W = np.array(B*x).flatten()\n\n if None in [pm_ra_err, pm_dec_err, rv_err, d_err]:\n return (U, V, W)\n else:\n C = np.mat(np.array(B)**2)\n\n e11 = rv_err\n e12 = (k*d)**2*(pm_ra_err**2 + (pm_ra*d_err/d)**2)\n e13 = (k*d)**2*(pm_dec_err**2 + (pm_dec*d_err/d)**2)\n\n e1 = np.mat([[e11],[e12],[e13]])\n\n e2c = 2.*pm_ra*pm_dec*k**2*d_err**2/d**4\n\n e = C*e1\n\n U_err = math.sqrt(e[0,0] + e2c*B[0,1]*B[0,2])\n V_err = math.sqrt(e[1,0] + e2c*B[1,1]*B[1,2])\n W_err = math.sqrt(e[2,0] + e2c*B[2,1]*B[2,2])\n return ((U, U_err), (V, V_err), (W, W_err))\n\ndef compute_GalXYZ(**kwargs):\n \"\"\"Compute Galactic position (*X*, *Y*, *Z*) in unit of kpc.\n\n Args:\n ra (float): Right Ascension in degree at epoch J2000.0\n dec (float): Declination in degree at epoch J2000.0\n eqcoord (:py:class:`astropy.coordinates.SkyCoord`, optional): Sky\n coordinate of object\n galactic (list or tuple, optional): Galactic coordinate (`l`, `b`)\n l (float, optional): Galactic longitude in degree\n b (float, optional): Galactic latitude in degree\n distance (float, list, or tuple): Distance in pc. Either `distance` as a\n float or (`distance`, `distance_err`)\n parallax (float, list, or tuple): Parallax in mas. Either `parallax` as\n a float or (`parallax`, `parallax_err`)\n R0 (float): Solar distance to the Galactic center in kpc\n\n Returns:\n tuple: Galactic position (*x*, *y*, *z*) in unit of kpc\n\n \"\"\"\n # parse RA and Dec\n if 'eqcoord' in kwargs:\n eqcoord = kwargs.pop('eqcoord')\n if not isinstance(eqcoord, SkyCoord):\n ra, dec = parse_pairwise(eqcoord)\n frame = kwargs.pop('frame', 'icrs')\n eqcoord = SkyCoord(ra, dec, frame=frame, unit='deg')\n gal = eqcoord.galactic\n l, b = gal.l.degree, gal.b.degree\n elif 'ra' in kwargs and 'dec' in kwargs:\n ra = kwargs.pop('ra')\n dec = kwargs.pop('dec')\n frame = kwargs.pop('frame', 'icrs')\n eqcoord = SkyCoord(ra, dec, frame=frame, unit='deg')\n gal = eqcoord.galactic\n l, b = gal.l.degree, gal.b.degree\n elif 'galactic' in kwargs:\n l, b = parse_pairwise(kwargs.pop('galactic'))\n elif 'l' in kwargs and 'b' in kwargs:\n l = kwargs.pop('l')\n b = kwargs.pop('b')\n else:\n raise ValueError\n\n l = l/180.*math.pi\n b = b/180.*math.pi\n\n # parse distance\n if 'distance' in kwargs:\n d, d_err = parse_value_err(kwargs.pop('distance'))\n elif 'parallax' in kwargs:\n para, para_err = parse_value_err(kwargs.pop('parallax'))\n d = 1000./para\n if para_err is None:\n d_err = None\n else:\n d_err = d*para_err/para\n else:\n raise ValueError\n\n R0 = kwargs.pop('R0', 8.5)\n\n d *= 1e-3\n\n x = R0 - d*math.cos(b)*math.cos(l)\n y = d*math.cos(b)*math.sin(l)\n z = d*math.sin(b)\n\n return (x, y, z)\n\ndef compute_Galorbit(**kwargs):\n \"\"\"Calculate the stellar orbit in the Milky Way.\n\n Args:\n potential (list): List of Galactic potentials.\n xyz (tuple or list): Galactic positions\n uvw (tuple or list): Galactic space velocity\n solar_uvw (tuple or list): Solar space velocity\n t (list): List of integration time\n\n Returns:\n A tuple containing:\n * x_lst (:class:`numpy.ndarray`)\n * y_lst (:class:`numpy.ndarray`)\n * z_lst (:class:`numpy.ndarray`)\n\n Examples:\n Calculate the orbit of the Sun\n\n .. code-block:: python\n\n solar_uvw = (9.6, 255.2, 9.3) # from Reid et al. 2014\n t_lst = np.arange(0, 0.4, 0.0001) # in Gyr\n\n x_lst, y_lst, z_lst = orbit.compute_Galorbit(\n potential = potential_lst,\n xyz=(R0,0.,0.),\n uvw=(0.,0.,0.),\n solar_uvw=solar_uvw,\n t=t_lst)\n\n Calculate the orbit of `HD 122563\n <http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD+122563>`_ (HIP 68594)\n\n .. code-block:: python\n\n from stella.catalog import find_catalog\n hip = 68594\n item = find_catalog.find_HIP2(hip)\n ra, dec = item['RAdeg'], item['DEdeg']\n rv = (-26.58, 0.15) # from SIMBAD\n parallax = (item['Plx'], item['e_Plx'])\n pm = ((item['pmRA'], item['e_pmRA']),(item['pmDE'], item['e_pmDE']))\n uvw = orbit.compute_UVW(ra=ra,dec=dec,rv=rv,parallax=parallax,pm=pm,U_plus='center')\n xyz = orbit.compute_GalXYZ(ra=ra,dec=dec,parallax=parallax,R0=R0)\n x1_lst, y1_lst, z1_lst = orbit.compute_Galorbit(\n potential = potential_lst,\n xyz=xyz,\n uvw=uvw,\n solar_uvw=solar_uvw,\n t=t_lst)\n \"\"\"\n from scipy.integrate import odeint\n from ..constant import pc\n\n potential_lst = kwargs.pop('potential')\n\n if 'xyz' in kwargs:\n x, y, z = kwargs.pop('xyz')\n if isinstance(x, list) or isinstance(x, tuple):\n x, y, z = x[0], y[0], z[0]\n elif isinstance(x, float):\n pass\n else:\n raise ValueError\n\n if 'uvw' in kwargs:\n uvw = kwargs.pop('uvw')\n if isinstance(uvw, tuple) or isinstance(uvw, list):\n u, _ = parse_value_err(uvw[0])\n v, _ = parse_value_err(uvw[1])\n w, _ = parse_value_err(uvw[2])\n else:\n raise ValueError\n\n solar_uvw = kwargs.pop('solar_uvw')\n if isinstance(solar_uvw, tuple) or isinstance(solar_uvw, list):\n solar_u, _ = parse_value_err(solar_uvw[0])\n solar_v, _ = parse_value_err(solar_uvw[1])\n solar_w, _ = parse_value_err(solar_uvw[2])\n else:\n raise ValueError\n\n target_u = u + solar_u\n target_v = v + solar_v\n target_w = w + solar_w\n\n t = kwargs.pop('t')\n t_lst = t*1e9*365.2422*86400 # convert Gyr to second\n\n def derive(var, t, potential_lst):\n x, y, z, vx, vy, vz = var\n acce_lst = np.array([potential.get_acce_cartesian(x, y, z)\n for potential in potential_lst])\n ax = acce_lst[:,0].sum()\n ay = acce_lst[:,1].sum()\n az = acce_lst[:,2].sum()\n return [vx/pc, vy/pc, vz/pc, ax, ay, az]\n\n vx, vy, vz = -target_u, target_v, target_w\n var0 = x, y, z, vx, vy, vz\n sol = odeint(derive, var0, t_lst, args=(potential_lst,))\n\n x_lst = sol[:,0]\n y_lst = sol[:,1]\n z_lst = sol[:,2]\n r_lst = np.sqrt(x_lst**2 + y_lst**2 + z_lst**2)\n\n return x_lst, y_lst, z_lst\n"
] |
[
[
"numpy.array",
"numpy.mat",
"numpy.sqrt",
"scipy.integrate.odeint"
]
] |
rhanschristiansen/association_algo_performance
|
[
"1a03875098eb4f45f7b9cd2dfbd80ae4d87c758d"
] |
[
"src/association/association.py"
] |
[
"import numpy as np\nfrom src.association import munkres\n\n\nclass Association:\n def __init__(self):\n pass\n\n # the evaluate cost function receives to arguments:\n # 1 - a dictionary called cost functions with the function method name as the key and the weight as the value\n #\n # cost_functions = { costs.dist_between_centroids : 0.334,\n # costs.dist_lidar_to_y2estimate : 0.333,\n # costs.inverse_intersection_over_union : 0.333 }\n #\n # 2 - a dictionary contained in **kwargs with the keys containing the names of the two lists of objects\n # to be associated (video_detections and lidar_detections) and the values are the lists of objects to be\n # associated.\n #\n # kwargs = {'video_detections' : video_detections, 'lidar_detections' : lidar_detections}\n #\n #\n # The evaluate cost function evaluates the costs using methods that are contained in the Costs Class in the\n # costs.py file.\n # The function is called like this:\n #\n # a = Association()\n # costs = a.evaluate_cost(cost_functions, **kwargs)\n #\n # each of the elements of the costs array represent the cost value between the\n # i-th video_detection and the j-th lidar_detection where\n # cost (i,j) = weight[0] * cost_function[0](i, j) + weight[1] * cost_function[1](i,j) + ... + weight[n] * cost_function[n](i,j)\n # for n cost functions and weights in the cost_functions dictionary\n\n def evaluate_cost(self, cost_functions, **kwargs):\n\n array_size = []\n for k,v in kwargs.items():\n array_size.append(len(v))\n\n cost = np.zeros((array_size[0], array_size[1]), np.float64 )\n cost_components = []\n\n for function_name, weight in cost_functions.items():\n cost_component = function_name(**kwargs)\n cost += weight * cost_component\n cost_components.append(cost_component)\n\n return cost, cost_components\n\n def compute_munkres(self, cost):\n m = munkres.Munkres()\n assignments = m.compute(cost)\n return assignments\n\n\n# this is a test of the Association class using the cost methods contained in the Costs class\nif __name__ == '__main__':\n\n import src.detection.detection as video_det\n import src.lidar.lidar_detection as lidar_det\n import src.association.costs as costs\n\n costs = costs.Costs()\n\n vdet0 = video_det.Detection()\n vdet0.bbox = [412, 375, 486, 421]\n vdet1 = video_det.Detection()\n vdet1.bbox = [762, 374, 799, 408]\n vdet2 = video_det.Detection()\n vdet2.bbox = [913, 338, 1020, 375]\n vdet3 = video_det.Detection()\n vdet3.bbox = [708, 374, 739, 400]\n vdet4 = video_det.Detection()\n vdet4.bbox = [613, 361, 650, 384]\n vdet5 = video_det.Detection()\n vdet5.bbox = [562, 369, 600, 396]\n vdet6 = video_det.Detection()\n vdet6.bbox = [774, 378, 990, 502]\n vdet7 = video_det.Detection()\n vdet7.bbox = [893, 350, 954, 377]\n vdet8 = video_det.Detection()\n vdet8.bbox = [171, 360, 301, 416]\n\n video_detections = [vdet0, vdet1, vdet2, vdet3, vdet4, vdet5, vdet6, vdet7, vdet8]\n\n ldet0 = lidar_det.LIDAR_detection(840, 2, 38.3435516357, 0)\n ldet1 = lidar_det.LIDAR_detection(840, 11, 12.4829711914, 0)\n ldet2 = lidar_det.LIDAR_detection(840, 12, 12.714263915999998, 0)\n ldet3 = lidar_det.LIDAR_detection(840, 12, 36.3725891113, 0)\n ldet4 = lidar_det.LIDAR_detection(840, 13, 12.671356201199998, 0)\n ldet5 = lidar_det.LIDAR_detection(840, 15, 12.3006744385, 0)\n lidar_detections = [ldet0, ldet1, ldet2, ldet3, ldet4, ldet5]\n\n # enter the cost method names as keys in the dictionary and weights as their values\n # the returned costs array will have a number of rows equal to the number of video_detection objects\n # and a number of columns equal to the nubmer of lidar_detection objects.\n #\n # each of the elements of the array represent the cost value between the i-th video_detection and the j-th lidar_detection\n # cost (i,j) = weight[0] * cost_function[0](i, j) + weight[1] * cost_function[1](i,j) + ... + weight[n] * cost_function[n](i,j)\n # for n cost functions and weights in the cost_functions dictionary\n\n cost_functions = { costs.dist_between_centroids : 0.334,\n costs.dist_lidar_to_y2estimate : 0.333,\n costs.inverse_intersection_over_union : 0.333 }\n\n a = Association()\n\n # enter the video_detections and lidar_detections lists into the kwargs dictionary\n kwargs = {'video_detections' : video_detections, 'lidar_detections' : lidar_detections}\n\n # evaluate the costs array by passing the cost_functions dictionary and the kwargs dictionary to the evaluate_costs method\n costs = a.evaluate_cost(cost_functions, **kwargs)\n\n b = 1"
] |
[
[
"numpy.zeros"
]
] |
AnshMittal1811/Mask_RCNN
|
[
"d4ed6a2065bd3cc2b506ab4dbdfc18efab2f53ea"
] |
[
"mrcnn/parallel_model.py"
] |
[
"\"\"\"\nMask R-CNN\nMulti-GPU Support for Keras.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\nIdeas and a small code snippets from these sources:\nhttps://github.com/fchollet/keras/issues/2436\nhttps://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012\nhttps://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/\nhttps://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py\n\"\"\"\n\nimport tensorflow as tf\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.models as KM\n\n \nclass ParallelModel(KM.Model):\n \"\"\"Subclasses the standard Keras Model and adds multi-GPU support.\n It works by creating a copy of the model on each GPU. Then it slices\n the inputs and sends a slice to each copy of the model, and then\n merges the outputs together and applies the loss on the combined\n outputs.\n \"\"\"\t\n def __init__(self, keras_model, gpu_count):\n \"\"\"Class constructor.\n keras_model: The Keras model to parallelize\n gpu_count: Number of GPUs. Must be > 1\n \"\"\"\n super(ParallelModel, self).__init__()\n self.inner_model = keras_model\n self.gpu_count = gpu_count\n merged_outputs = self.make_parallel()\n super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,\n outputs=merged_outputs)\n\n def __getattribute__(self, attrname):\n \"\"\"Redirect loading and saving methods to the inner model. That's where\n the weights are stored.\"\"\"\n if 'load' in attrname or 'save' in attrname:\n return getattr(self.inner_model, attrname)\n return super(ParallelModel, self).__getattribute__(attrname)\n\n def summary(self, *args, **kwargs):\n \"\"\"Override summary() to display summaries of both, the wrapper\n and inner models.\"\"\"\n super(ParallelModel, self).summary(*args, **kwargs)\n self.inner_model.summary(*args, **kwargs)\n\n def make_parallel(self):\n \"\"\"Creates a new wrapper model that consists of multiple replicas of\n the original model placed on different GPUs.\n \"\"\"\n # Slice inputs. Slice inputs on the CPU to avoid sending a copy\n # of the full inputs to all GPUs. Saves on bandwidth and memory.\n input_slices = {name: tf.split(x, self.gpu_count)\n for name, x in zip(self.inner_model.input_names,\n self.inner_model.inputs)}\n\n output_names = self.inner_model.output_names\n outputs_all = []\n for i in range(len(self.inner_model.outputs)):\n outputs_all.append([])\n\n # Run the model call() on each GPU to place the ops there\n for i in range(self.gpu_count):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i):\n # Run a slice of inputs through this replica\n zipped_inputs = zip(self.inner_model.input_names,\n self.inner_model.inputs)\n inputs = [\n KL.Lambda(lambda s: input_slices[name][i],\n output_shape=lambda s: (None,) + s[1:])(tensor)\n for name, tensor in zipped_inputs]\n # Create the model replica and get the outputs\n outputs = self.inner_model(inputs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n # Save the outputs for merging back together later\n for l, o in enumerate(outputs):\n outputs_all[l].append(o)\n\n # Merge outputs on CPU\n with tf.device('/cpu:0'):\n merged = []\n for outputs, name in zip(outputs_all, output_names):\n # Concatenate or average outputs?\n # Outputs usually have a batch dimension and we concatenate\n # across it. If they don't, then the output is likely a loss\n # or a metric value that gets averaged across the batch.\n # Keras expects losses and metrics to be scalars.\n if K.int_shape(outputs[0]) == ():\n # Average\n m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)\n else:\n # Concatenate\n m = KL.Concatenate(axis=0, name=name)(outputs)\n merged.append(m)\n return merged\n\n\nif __name__ == \"__main__\":\n # Testing code below. It creates a simple model to train on MNIST and\n # tries to run it on 2 GPUs. It saves the graph so it can be viewed\n # in TensorBoard. Run it as:\n #\n # python3 parallel_model.py\n\n import os\n import numpy as np\n import keras.optimizers\n from keras.datasets import mnist\n from keras.preprocessing.image import ImageDataGenerator\n\n GPU_COUNT = 2\n\n # Root directory of the project\n ROOT_DIR = os.path.abspath(\"../\")\n\n # Directory to save logs and trained model\n MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n def build_model(x_train, num_classes):\n # Reset default graph. Keras leaves old ops in the graph,\n # which are ignored for execution but clutter graph\n # visualization in TensorBoard.\n tf.reset_default_graph()\n\n inputs = KL.Input(shape=x_train.shape[1:], name=\"input_image\")\n x = KL.Conv2D(32, (3, 3), activation='relu', padding=\"same\",\n name=\"conv1\")(inputs)\n x = KL.Conv2D(64, (3, 3), activation='relu', padding=\"same\",\n name=\"conv2\")(x)\n x = KL.MaxPooling2D(pool_size=(2, 2), name=\"pool1\")(x)\n x = KL.Flatten(name=\"flat1\")(x)\n x = KL.Dense(128, activation='relu', name=\"dense1\")(x)\n x = KL.Dense(num_classes, activation='softmax', name=\"dense2\")(x)\n\n return KM.Model(inputs, x, \"digit_classifier_model\")\n\n # Load MNIST Data\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = np.expand_dims(x_train, -1).astype('float32') / 255\n x_test = np.expand_dims(x_test, -1).astype('float32') / 255\n\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n # Build data generator and model\n datagen = ImageDataGenerator()\n model = build_model(x_train, 10)\n\n # Add multi-GPU support.\n model = ParallelModel(model, GPU_COUNT)\n\n optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)\n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizer, metrics=['accuracy'])\n\n model.summary()\n\n # Train\n model.fit_generator(\n datagen.flow(x_train, y_train, batch_size=64),\n steps_per_epoch=50, epochs=10, verbose=1,\n validation_data=(x_test, y_test),\n callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,\n write_graph=True)]\n )\n"
] |
[
[
"tensorflow.device",
"numpy.expand_dims",
"tensorflow.reset_default_graph",
"tensorflow.name_scope",
"tensorflow.split",
"tensorflow.add_n"
]
] |
smericks/lenstronomy
|
[
"3f095595e36888ac7b4ee5345b6abc5aaa8275f1",
"3f095595e36888ac7b4ee5345b6abc5aaa8275f1"
] |
[
"lenstronomy/LensModel/Profiles/gauss_decomposition.py",
"lenstronomy/Plots/plot_util.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains the class to compute lensing properties of any\nelliptical profile using Shajib (2019)'s Gauss decomposition.\n\"\"\"\n\n__author__ = 'ajshajib'\n\nimport numpy as np\nimport abc\nfrom scipy.special import comb\n\nfrom lenstronomy.LensModel.Profiles.gaussian_ellipse_kappa import GaussianEllipseKappa\nfrom lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\n\nfrom lenstronomy.Util.package_util import exporter\nexport, __all__ = exporter()\n\n_SQRT_2PI = np.sqrt(2*np.pi)\n\n\n@export\nclass GaussianEllipseKappaSet(LensProfileBase):\n \"\"\"\n This class computes the lensing properties of a set of concentric\n elliptical Gaussian convergences.\n \"\"\"\n param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y']\n lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5,\n 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5,\n 'center_x': 100, 'center_y': 100}\n\n def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5):\n \"\"\"\n\n :param use_scipy_wofz: To initiate ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.\n :type use_scipy_wofz: ``bool``\n :param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.\n :type min_ellipticity: ``float``\n \"\"\"\n self.gaussian_ellipse_kappa = GaussianEllipseKappa(\n use_scipy_wofz=use_scipy_wofz,\n min_ellipticity=min_ellipticity)\n super(GaussianEllipseKappaSet, self).__init__()\n\n def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n Compute the potential function for a set of concentric elliptical\n Gaussian convergence profiles.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param amp: Amplitude of Gaussian, convention: :math:`A/(2 \\pi\\sigma^2) \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`\n :type amp: ``numpy.array`` with ``dtype=float``\n :param sigma: Standard deviation of Gaussian\n :type sigma: ``numpy.array`` with ``dtype=float``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordianate of centroid\n :type center_y: ``float``\n :return: Potential for elliptical Gaussian convergence\n :rtype: ``float``, or ``numpy.array`` with ``shape = x.shape``\n \"\"\"\n function = np.zeros_like(x, dtype=float)\n\n for i in range(len(amp)):\n function += self.gaussian_ellipse_kappa.function(x, y,\n amp[i],\n sigma[i], e1,\n e2,\n center_x,\n center_y)\n return function\n\n def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n Compute the derivatives of function angles :math:`\\partial\n f/\\partial x`, :math:`\\partial f/\\partial y` at :math:`x,\\ y` for a\n set of concentric elliptic Gaussian convergence profiles.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param amp: Amplitude of Gaussian, convention: :math:`A/(2 \\pi\\sigma^2) \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`\n :type amp: ``numpy.array`` with ``dtype=float``\n :param sigma: Standard deviation of Gaussian\n :type sigma: ``numpy.array`` with ``dtype=float``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordianate of centroid\n :type center_y: ``float``\n :return: Deflection angle :math:`\\partial f/\\partial x`, :math:`\\partial f/\\partial y` for elliptical Gaussian convergence\n :rtype: tuple ``(float, float)`` or ``(numpy.array, numpy.array)`` with each ``numpy`` array's shape equal to ``x.shape``\n \"\"\"\n f_x = np.zeros_like(x, dtype=float)\n f_y = np.zeros_like(x, dtype=float)\n\n for i in range(len(amp)):\n f_x_i, f_y_i = self.gaussian_ellipse_kappa.derivatives(x, y,\n amp=amp[i],\n sigma=sigma[i], e1=e1,\n e2=e2, center_x=center_x,\n center_y=center_y)\n f_x += f_x_i\n f_y += f_y_i\n\n return f_x, f_y\n\n def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n Compute Hessian matrix of function :math:`\\partial^2f/\\partial x^2`,\n :math:`\\partial^2 f/\\partial y^2`, :math:`\\partial^2 f/\\partial\n x\\partial y` for a set of concentric elliptic Gaussian convergence\n profiles.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param amp: Amplitude of Gaussian, convention: :math:`A/(2 \\pi\\sigma^2) \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`\n :type amp: ``numpy.array`` with ``dtype=float``\n :param sigma: Standard deviation of Gaussian\n :type sigma: ``numpy.array`` with ``dtype=float``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordianate of centroid\n :type center_y: ``float``\n :return: Hessian :math:`\\partial^2f/\\partial x^2`, :math:`\\partial^2/\\partial x\\partial y`, :math:`\\partial^2/\\partial y\\partial x`, :math:`\\partial^2 f/\\partial y^2` for elliptical Gaussian convergence.\n :rtype: tuple ``(float, float, float)`` , or ``(numpy.array, numpy.array, numpy.array)`` with each ``numpy`` array's shape equal to ``x.shape``\n \"\"\"\n f_xx = np.zeros_like(x, dtype=float)\n f_yy = np.zeros_like(x, dtype=float)\n f_xy = np.zeros_like(x, dtype=float)\n\n for i in range(len(amp)):\n f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_ellipse_kappa.hessian(x, y, amp=amp[i], sigma=sigma[i], e1=e1,\n e2=e2, center_x=center_x, center_y=center_y)\n f_xx += f_xx_i\n f_yy += f_yy_i\n f_xy += f_xy_i\n\n return f_xx, f_xy, f_xy, f_yy\n\n def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n Compute the density of a set of concentric elliptical Gaussian\n convergence profiles :math:`\\sum A/(2\\pi \\sigma^2) \\exp(-(\n x^2+y^2/q^2)/2\\sigma^2)`.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param amp: Amplitude of Gaussian, convention: :math:`A/(2 \\pi\\sigma^2) \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`\n :type amp: ``numpy.array`` with ``dtype=float``\n :param sigma: Standard deviation of Gaussian\n :type sigma: ``numpy.array`` with ``dtype=float``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordianate of centroid\n :type center_y: ``float``\n :return: Density :math:`\\kappa` for elliptical Gaussian convergence\n :rtype: ``float``, or ``numpy.array`` with shape equal to ``x.shape``\n \"\"\"\n density_2d = np.zeros_like(x, dtype=float)\n\n for i in range(len(amp)):\n density_2d += self.gaussian_ellipse_kappa.density_2d(x, y,\n amp=amp[i],\n sigma=sigma[i],\n e1=e1, e2=e2,\n center_x=center_x,\n center_y=center_y)\n\n return density_2d\n\n\n@export\nclass GaussDecompositionAbstract(metaclass=abc.ABCMeta):\n \"\"\"\n This abstract class sets up a template for computing lensing properties of\n an elliptical convergence through Shajib (2019)'s Gauss decomposition.\n \"\"\"\n def __init__(self, n_sigma=15, sigma_start_mult=0.02, sigma_end_mult=15.,\n precision=10, use_scipy_wofz=True, min_ellipticity=1e-5):\n \"\"\"\n Set up settings for the Gaussian decomposition. For more details about\n the decomposition parameters, see Shajib (2019).\n\n :param n_sigma: Number of Gaussian components\n :type n_sigma: ``int``\n :param sigma_start_mult: Lower range of logarithmically spaced sigmas\n :type sigma_start_mult: ``float``\n :param sigma_end_mult: Upper range of logarithmically spaced sigmas\n :type sigma_end_mult: ``float``\n :param precision: Numerical precision of Gaussian decomposition\n :type precision: ``int``\n :param use_scipy_wofz: To be passed to ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.\n :type use_scipy_wofz: ``bool``\n :param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.\n :type min_ellipticity: ``float``\n \"\"\"\n self.gaussian_set = GaussianEllipseKappaSet(\n use_scipy_wofz=use_scipy_wofz,\n min_ellipticity=min_ellipticity)\n\n self.n_sigma = n_sigma\n self.sigma_start_mult = sigma_start_mult\n self.sigma_end_mult = sigma_end_mult\n self.precision = precision\n\n p = self.precision\n # nodes and weights based on Fourier-Euler method\n # for details Abate & Whitt (2006)\n kes = np.arange(2 * p + 1)\n self.betas = np.sqrt(2 * p * np.log(10) / 3. + 2. * 1j * np.pi * kes)\n epsilons = np.zeros(2 * p + 1)\n\n epsilons[0] = 0.5\n epsilons[1:p + 1] = 1.\n epsilons[-1] = 1 / 2. ** p\n\n for k in range(1, p):\n epsilons[2 * p - k] = epsilons[2 * p - k + 1] + 1 / 2. ** p * comb(\n p, k)\n\n self.etas = (-1.) ** kes * epsilons * 10. ** (p / 3.) * 2. * \\\n _SQRT_2PI\n\n def gauss_decompose(self, **kwargs):\n r\"\"\"\n Compute the amplitudes and sigmas of Gaussian components using the\n integral transform with Gaussian kernel from Shajib (2019). The\n returned values are in the convention of eq. (2.13).\n\n :param func: The function to decompose\n :type func: ``function``\n :param \\**kwargs: Keyword arguments to send to ``func``\n :return: Amplitudes and standard deviations of the Gaussian components\n :rtype: tuple ``(numpy.array, numpy.array)``\n \"\"\"\n sigma_start = self.sigma_start_mult*self.get_scale(**kwargs)\n sigma_end = self.sigma_end_mult*self.get_scale(**kwargs)\n\n sigmas = np.logspace(np.log10(sigma_start), np.log10(sigma_end),\n self.n_sigma)\n\n f_sigmas = np.sum(self.etas * self.get_kappa_1d(\n sigmas[:,np.newaxis]*self.betas[np.newaxis, :],\n **kwargs).real,\n axis=1\n )\n\n del_log_sigma = np.abs(np.diff(np.log(sigmas)).mean())\n\n amps = f_sigmas * del_log_sigma / _SQRT_2PI\n\n # weighting for trapezoid method integral\n amps[0] *= 0.5\n amps[-1] *= 0.5\n\n return amps, sigmas\n\n @abc.abstractmethod\n def get_scale(self, **kwargs):\n \"\"\"\n Abstract method to identify the keyword argument for the scale size\n among the profile parameters of the child class' convergence profile.\n\n :param \\**kwargs: Keyword arguments\n :return: Scale size\n :rtype: ``float``\n \"\"\"\n\n @abc.abstractmethod\n def get_kappa_1d(self, y, **kwargs):\n r\"\"\"\n Abstract method to compute the spherical Sersic profile at y.\n The concrete method has to defined by the child class.\n\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param \\**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile\n \"\"\"\n\n def function(self, x, y, e1=0., e2=0., center_x=0.,\n center_y=0., **kwargs):\n r\"\"\"\n Compute the deflection potential of a Gauss-decomposed\n elliptic convergence.\n\n :param x: x coordinate\n :type x: ``float``\n :param y: y coordinate\n :type y: ``float``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordinate of centroid\n :type center_y: ``float``\n :param \\**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile\n :return: Deflection potential\n :rtype: ``float``\n \"\"\"\n amps, sigmas = self.gauss_decompose(**kwargs)\n\n # converting the amplitude convention A -> A/(2*pi*sigma^2)\n amps *= 2.*np.pi * sigmas * sigmas\n\n return self.gaussian_set.function(x, y, amps, sigmas, e1, e2,\n center_x, center_y)\n\n def derivatives(self, x, y, e1=0., e2=0., center_x=0.,\n center_y=0., **kwargs):\n r\"\"\"\n Compute the derivatives of the deflection potential :math:`\\partial\n f/\\partial x`, :math:`\\partial f/\\partial y` for a Gauss-decomposed\n elliptic convergence.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordinate of centroid\n :type center_y: ``float``\n :param \\**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile\n :return: Derivatives of deflection potential\n :rtype: tuple ``(type(x), type(x))``\n \"\"\"\n amps, sigmas = self.gauss_decompose(**kwargs)\n\n # converting the amplitude convention A -> A/(2*pi*sigma^2)\n amps *= 2. * np.pi * sigmas * sigmas\n\n return self.gaussian_set.derivatives(x, y, amps, sigmas, e1, e2,\n center_x, center_y)\n\n def hessian(self, x, y, e1=0., e2=0., center_x=0.,\n center_y=0., **kwargs):\n r\"\"\"\n Compute the Hessian of the deflection potential\n :math:`\\partial^2f/\\partial x^2`, :math:`\\partial^2 f/ \\partial\n y^2`, :math:`\\partial^2 f/\\partial x\\partial y` of a Gauss-decomposed\n elliptic Sersic convergence.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordinate of centroid\n :type center_y: ``float``\n :param \\**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile\n :return: Hessian of deflection potential\n :rtype: tuple ``(type(x), type(x), type(x))``\n \"\"\"\n amps, sigmas = self.gauss_decompose(**kwargs)\n\n # converting the amplitude convention A -> A/(2*pi*sigma^2)\n amps *= 2. * np.pi * sigmas * sigmas\n\n return self.gaussian_set.hessian(x, y, amps, sigmas, e1, e2,\n center_x, center_y)\n\n def density_2d(self, x, y, e1=0., e2=0., center_x=0.,\n center_y=0., **kwargs):\n r\"\"\"\n Compute the convergence profile for Gauss-decomposed elliptic Sersic profile.\n\n :param x: x coordinate\n :type x: ``float`` or ``numpy.array``\n :param y: y coordinate\n :type y: ``float`` or ``numpy.array``\n :param e1: Ellipticity parameter 1\n :type e1: ``float``\n :param e2: Ellipticity parameter 2\n :type e2: ``float``\n :param center_x: x coordinate of centroid\n :type center_x: ``float``\n :param center_y: y coordinate of centroid\n :type center_y: ``float``\n :param \\**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile in the child class.\n :return: Convergence profile\n :rtype: ``type(x)``\n \"\"\"\n amps, sigmas = self.gauss_decompose(**kwargs)\n\n # converting the amplitude convention A -> A/(2*pi*sigma^2)\n amps *= 2. * np.pi * sigmas * sigmas\n\n return self.gaussian_set.density_2d(x, y, amps, sigmas, e1, e2,\n center_x, center_y)\n\n\n@export\nclass SersicEllipseGaussDec(GaussDecompositionAbstract):\n \"\"\"\n This class computes the lensing properties of an elliptical Sersic\n profile using the Shajib (2019)'s Gauss decomposition method.\n \"\"\"\n param_names = ['k_eff', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x',\n 'center_y']\n lower_limit_default = {'k_eff': 0., 'R_sersic': 0., 'n_sersic': 0.5,\n 'e1': -0.5, 'e2': -0.5, 'center_x': -100.,\n 'center_y': -100.}\n upper_limit_default = {'k_eff': 100., 'R_sersic': 100., 'n_sersic': 8.,\n 'e1': 0.5, 'e2': 0.5, 'center_x': 100.,\n 'center_y': 100.}\n\n def get_kappa_1d(self, y, **kwargs):\n r\"\"\"\n Compute the spherical Sersic profile at y.\n\n :param y: y coordinate\n :type y: ``float``\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **n_sersic** (``float``) --\n Sersic index\n * **R_sersic** (``float``) --\n Sersic scale radius\n * **k_eff** (``float``) --\n Sersic convergence at R_sersic\n\n :return: Sersic function at y\n :rtype: ``type(y)``\n \"\"\"\n n_sersic = kwargs['n_sersic']\n R_sersic = kwargs['R_sersic']\n k_eff = kwargs['k_eff']\n\n bn = SersicUtil.b_n(n_sersic)\n\n return k_eff * np.exp(-bn * (y / R_sersic) ** (1. / n_sersic) + bn)\n\n def get_scale(self, **kwargs):\n \"\"\"\n Identify the scale size from the keyword arguments.\n\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **n_sersic** (``float``) --\n Sersic index\n * **R_sersic** (``float``) --\n Sersic scale radius\n * **k_eff** (``float``) --\n Sersic convergence at R_sersic\n\n :return: Sersic radius\n :rtype: ``float``\n \"\"\"\n return kwargs['R_sersic']\n\n\n@export\nclass NFWEllipseGaussDec(GaussDecompositionAbstract):\n \"\"\"\n This class computes the lensing properties of an elliptical, projected NFW\n profile using Shajib (2019)'s Gauss decomposition method.\n \"\"\"\n param_names = ['Rs', 'alpha_Rs', 'e1', 'e2', 'center_x', 'center_y']\n lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'e1': -0.5, 'e2': -0.5,\n 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'e1': 0.5, 'e2': 0.5,\n 'center_x': 100, 'center_y': 100}\n\n def __init__(self, n_sigma=15, sigma_start_mult=0.005, sigma_end_mult=50.,\n precision=10, use_scipy_wofz=True, min_ellipticity=1e-5):\n \"\"\"\n Set up settings for the Gaussian decomposition. For more details about\n the decomposition parameters, see Shajib (2019).\n\n :param n_sigma: Number of Gaussian components\n :type n_sigma: ``int``\n :param sigma_start_mult: Lower range of logarithmically spaced sigmas\n :type sigma_start_mult: ``float``\n :param sigma_end_mult: Upper range of logarithmically spaced sigmas\n :type sigma_end_mult: ``float``\n :param precision: Numerical precision of Gaussian decomposition\n :type precision: ``int``\n :param use_scipy_wofz: To be passed to ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.\n :type use_scipy_wofz: ``bool``\n :param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.\n :type min_ellipticity: ``float``\n \"\"\"\n super(NFWEllipseGaussDec, self).__init__(n_sigma=n_sigma,\n sigma_start_mult=sigma_start_mult,\n sigma_end_mult=sigma_end_mult,\n precision=precision,\n use_scipy_wofz=use_scipy_wofz,\n min_ellipticity=min_ellipticity)\n\n def get_kappa_1d(self, y, **kwargs):\n r\"\"\"\n Compute the spherical projected NFW profile at y.\n\n :param y: y coordinate\n :type y: ``float``\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **alpha_Rs** (``float``) --\n Deflection angle at ``Rs``\n * **R_s** (``float``) --\n NFW scale radius\n\n :return: projected NFW profile at y\n :rtype: ``type(y)``\n \"\"\"\n R_s = kwargs['Rs']\n alpha_Rs = kwargs['alpha_Rs']\n\n kappa_s = alpha_Rs / (4 * R_s * (1 - 0.30102999566))\n # log2 = 0.30102999566\n\n x = y/R_s\n\n f = np.empty(shape=x.shape, dtype=x.dtype)\n\n range1 = (x > 1.)\n if np.any(range1):\n s = x[range1]\n f[range1] = (1 - np.arccos(1 / s) / np.sqrt(s * s - 1)) / (\n s * s - 1)\n\n range2 = (x < 1.)\n if np.any(range2):\n s = x[range2]\n f[range2] = (1 - np.arccosh(1 / s) / np.sqrt(1 - s * s)) / (\n s * s - 1)\n\n range3 = np.logical_and(np.logical_not(range1), np.logical_not(range2))\n if np.any(range3):\n f[range3] = 1. / 3.\n\n return 2 * kappa_s * f\n\n def get_scale(self, **kwargs):\n \"\"\"\n Identify the scale size from the keyword arguments.\n\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **alpha_Rs** (``float``) --\n Deflection angle at ``Rs``\n * **R_s** (``float``) --\n NFW scale radius\n\n :return: NFW scale radius\n :rtype: ``float``\n \"\"\"\n return kwargs['Rs']\n\n\n@export\nclass GaussDecompositionAbstract3D(GaussDecompositionAbstract):\n \"\"\"\n This abstract class sets up a template for computing lensing properties of\n a convergence from 3D spherical profile through Shajib (2019)'s Gauss\n decomposition.\n \"\"\"\n def gauss_decompose(self, **kwargs):\n r\"\"\"\n Compute the amplitudes and sigmas of Gaussian components using the\n integral transform with Gaussian kernel from Shajib (2019). The\n returned values are in the convention of eq. (2.13).\n\n :param func: The function to decompose\n :type func: ``function``\n :param \\**kwargs: Keyword arguments to send to ``func``\n :return: Amplitudes and standard deviations of the Gaussian components\n :rtype: tuple ``(numpy.array, numpy.array)``\n \"\"\"\n\n f_sigmas, sigmas = super(GaussDecompositionAbstract3D,\n self).gauss_decompose(**kwargs)\n\n return f_sigmas * sigmas * _SQRT_2PI, sigmas\n\n\n@export\nclass CTNFWGaussDec(GaussDecompositionAbstract3D):\n \"\"\"\n This class computes the lensing properties of an projection from a\n spherical cored-truncated NFW profile using Shajib (2019)'s Gauss\n decomposition method.\n \"\"\"\n param_names = ['r_s', 'r_core', 'r_trunc', 'a', 'rho_s', 'center_x' \n 'center_y']\n lower_limit_default = {'r_s': 0, 'r_core': 0, 'r_trunc': 0, 'a': 0.,\n 'rho_s': 0, 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'r_s': 100, 'r_core': 100, 'r_trunc': 100, 'a': 10.,\n 'rho_s': 1000, 'center_x': 100, 'center_y': 100}\n\n def __init__(self, n_sigma=15, sigma_start_mult=0.01, sigma_end_mult=20.,\n precision=10, use_scipy_wofz=True):\n \"\"\"\n Set up settings for the Gaussian decomposition. For more details about\n the decomposition parameters, see Shajib (2019).\n\n :param n_sigma: Number of Gaussian components\n :type n_sigma: ``int``\n :param sigma_start_mult: Lower range of logarithmically spaced sigmas\n :type sigma_start_mult: ``float``\n :param sigma_end_mult: Upper range of logarithmically spaced sigmas\n :type sigma_end_mult: ``float``\n :param precision: Numerical precision of Gaussian decomposition\n :type precision: ``int``\n :param use_scipy_wofz: To be passed to ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.\n :type use_scipy_wofz: ``bool``\n :param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.\n :type min_ellipticity: ``float``\n \"\"\"\n super(CTNFWGaussDec, self).__init__(n_sigma=n_sigma,\n sigma_start_mult=sigma_start_mult,\n sigma_end_mult=sigma_end_mult,\n precision=precision, use_scipy_wofz=use_scipy_wofz)\n\n def get_kappa_1d(self, y, **kwargs):\n r\"\"\"\n Compute the spherical cored-truncated NFW profile at y.\n\n :param y: y coordinate\n :type y: ``float``\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **r_s** (``float``) --\n Scale radius\n * **r_trunc** (``float``) --\n Truncation radius\n * **r_core** (``float``) --\n Core radius\n * **rho_s** (``float``) --\n Density normalization\n * **a** (``float``) --\n Core regularization parameter\n\n :return: projected NFW profile at y\n :rtype: ``type(y)``\n \"\"\"\n r_s = kwargs['r_s']\n r_trunc = kwargs['r_trunc']\n r_core = kwargs['r_core']\n rho_s = kwargs['rho_s']\n a = kwargs['a']\n\n beta = r_core/r_s\n tau = r_trunc/r_s\n\n x = y/r_s\n\n return rho_s * (tau*tau / (tau*tau + x*x)) / (x**a + beta**a)**(\n 1./a) / (1. + x)**2\n\n def get_scale(self, **kwargs):\n \"\"\"\n Identify the scale size from the keyword arguments.\n\n :param \\**kwargs: Keyword arguments\n\n :Keyword Arguments:\n * **r_s** (``float``) --\n Scale radius\n * **r_trunc** (``float``) --\n Truncation radius\n * **r_core** (``float``) --\n Core radius\n * **rho_s** (``float``) --\n Density normalization\n * **a** (``float``) --\n Core regularization parameter\n\n :return: NFW scale radius\n :rtype: ``float``\n \"\"\"\n return kwargs['r_s']\n",
"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nfrom lenstronomy.Util.package_util import exporter\nexport, __all__ = exporter()\n\n\n@export\ndef sqrt(inputArray, scale_min=None, scale_max=None):\n \"\"\"Performs sqrt scaling of the input numpy array.\n\n @type inputArray: numpy array\n @param inputArray: image data array\n @type scale_min: float\n @param scale_min: minimum data value\n @type scale_max: float\n @param scale_max: maximum data value\n @rtype: numpy array\n @return: image data array\n\n \"\"\"\n\n imageData = np.array(inputArray, copy=True)\n\n if scale_min is None:\n scale_min = imageData.min()\n if scale_max is None:\n scale_max = imageData.max()\n\n imageData = imageData.clip(min=scale_min, max=scale_max)\n imageData = imageData - scale_min\n indices = np.where(imageData < 0)\n imageData[indices] = 0.0\n imageData = np.sqrt(imageData)\n imageData = imageData / math.sqrt(scale_max - scale_min)\n return imageData\n\n\n@export\ndef text_description(ax, d, text, color='w', backgroundcolor='k',\n flipped=False, font_size=15):\n c_vertical = 1/15. #+ font_size / d / 10.**2\n c_horizontal = 1./30\n if flipped:\n ax.text(d - d * c_horizontal, d - d * c_vertical, text, color=color,\n fontsize=font_size,\n backgroundcolor=backgroundcolor)\n else:\n ax.text(d * c_horizontal, d - d * c_vertical, text, color=color, fontsize=font_size,\n backgroundcolor=backgroundcolor)\n\n\n@export\ndef scale_bar(ax, d, dist=1., text='1\"', color='w', font_size=15, flipped=False):\n if flipped:\n p0 = d - d / 15. - dist\n p1 = d / 15.\n ax.plot([p0, p0 + dist], [p1, p1], linewidth=2, color=color)\n ax.text(p0 + dist / 2., p1 + 0.01 * d, text, fontsize=font_size,\n color=color, ha='center')\n else:\n p0 = d / 15.\n ax.plot([p0, p0 + dist], [p0, p0], linewidth=2, color=color)\n ax.text(p0 + dist / 2., p0 + 0.01 * d, text, fontsize=font_size, color=color, ha='center')\n\n\n@export\ndef coordinate_arrows(ax, d, coords, color='w', font_size=15, arrow_size=0.05):\n d0 = d / 8.\n p0 = d / 15.\n pt = d / 9.\n deltaPix = coords.pixel_width\n ra0, dec0 = coords.map_pix2coord((d - d0) / deltaPix, d0 / deltaPix)\n xx_, yy_ = coords.map_coord2pix(ra0, dec0)\n xx_ra, yy_ra = coords.map_coord2pix(ra0 + p0, dec0)\n xx_dec, yy_dec = coords.map_coord2pix(ra0, dec0 + p0)\n xx_ra_t, yy_ra_t = coords.map_coord2pix(ra0 + pt, dec0)\n xx_dec_t, yy_dec_t = coords.map_coord2pix(ra0, dec0 + pt)\n\n ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_ra - xx_) * deltaPix, (yy_ra - yy_) * deltaPix,\n head_width=arrow_size * d, head_length=arrow_size * d, fc=color, ec=color, linewidth=1)\n ax.text(xx_ra_t * deltaPix, yy_ra_t * deltaPix, \"E\", color=color, fontsize=font_size, ha='center')\n ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_dec - xx_) * deltaPix, (yy_dec - yy_) * deltaPix,\n head_width=arrow_size * d, head_length=arrow_size * d, fc\n =color, ec=color, linewidth=1)\n ax.text(xx_dec_t * deltaPix, yy_dec_t * deltaPix, \"N\", color=color, fontsize=font_size, ha='center')\n\n\n@export\ndef plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=None, flipped_x=False, points_only=False, *args, **kwargs):\n \"\"\"\n plotting a line set on a matplotlib instance where the coordinates are defined in pixel units with the lower left\n corner (defined as origin) is by default (0, 0). The coordinates are moved by 0.5 pixels to be placed in the center\n of the pixel in accordance with the matplotlib.matshow() routine.\n\n :param ax: matplotlib.axis instance\n :param coords: Coordinates() class instance\n :param origin: [x0, y0], lower left pixel coordinate in the frame of the pixels\n :param line_set_list_x: numpy arrays corresponding of different disconnected regions of the line\n (e.g. caustic or critical curve)\n :param line_set_list_y: numpy arrays corresponding of different disconnected regions of the line\n (e.g. caustic or critical curve)\n :param color: string with matplotlib color\n :param flipped_x: bool, if True, flips x-axis\n :param points_only: bool, if True, sets plotting keywords to plot single points without connecting lines\n :return: plot with line sets on matplotlib axis in pixel coordinates\n \"\"\"\n if origin is None:\n origin = [0, 0]\n pixel_width = coords.pixel_width\n pixel_width_x = pixel_width\n if points_only:\n if 'linestyle' not in kwargs:\n kwargs['linestyle'] = \"\"\n if 'marker' not in kwargs:\n kwargs['marker'] = \"o\"\n if 'markersize' not in kwargs:\n kwargs['markersize'] = 0.01\n if flipped_x:\n pixel_width_x = -pixel_width\n if isinstance(line_set_list_x, list):\n for i in range(len(line_set_list_x)):\n x_c, y_c = coords.map_coord2pix(line_set_list_x[i], line_set_list_y[i])\n ax.plot((x_c + 0.5) * pixel_width_x + origin[0], (y_c + 0.5) * pixel_width + origin[1], *args, **kwargs) # ',', color=color)\n else:\n x_c, y_c = coords.map_coord2pix(line_set_list_x, line_set_list_y)\n ax.plot((x_c + 0.5) * pixel_width_x + origin[0], (y_c + 0.5) * pixel_width + origin[1], *args, **kwargs) # ',', color=color)\n return ax\n\n\n@export\ndef image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_list=None, origin=None, flipped_x=False):\n \"\"\"\n\n :param ax: matplotlib axis instance\n :param coords: Coordinates() class instance or inherited class (such as PixelGrid(), or Data())\n :param ra_image: Ra/x-coordinates of image positions (list of arrays in angular units)\n :param dec_image: Dec/y-coordinates of image positions (list of arrays in angular units)\n :param color: color of ticks and text\n :param image_name_list: list of strings for names of the images in the same order as the positions\n :param origin: [x0, y0], lower left pixel coordinate in the frame of the pixels\n :param flipped_x: bool, if True, flips x-axis\n :return: matplotlib axis instance with images plotted on\n \"\"\"\n if origin is None:\n origin = [0, 0]\n pixel_width = coords.pixel_width\n pixel_width_x = pixel_width\n if flipped_x:\n pixel_width_x = -pixel_width\n if image_name_list is None:\n image_name_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']\n\n if not isinstance(ra_image, list):\n ra_image_, dec_image_ = [ra_image], [dec_image]\n else:\n ra_image_, dec_image_ = ra_image, dec_image\n for ra, dec in zip(ra_image_, dec_image_):\n x_image, y_image = coords.map_coord2pix(ra, dec)\n\n for i in range(len(x_image)):\n x_ = (x_image[i] + 0.5) * pixel_width_x + origin[0]\n y_ = (y_image[i] + 0.5) * pixel_width + origin[1]\n ax.plot(x_, y_, 'o', color=color)\n ax.text(x_, y_, image_name_list[i], fontsize=20, color=color)\n return ax\n\n\n@export\ndef source_position_plot(ax, coords, ra_source, dec_source, marker='*', markersize=10, **kwargs):\n \"\"\"\n\n :param ax: matplotlib axis instance\n :param coords: Coordinates() class instance or inherited class (such as PixelGrid(), or Data())\n :param ra_source: list of source position in angular units\n :param dec_source: list of source position in angular units\n :param marker: marker style for matplotlib\n :param markersize: marker size for matplotlib\n :return: matplotlib axis instance with images plotted on\n \"\"\"\n delta_pix = coords.pixel_width\n if len(ra_source) > 0:\n for ra, dec in zip(ra_source, dec_source):\n x_source, y_source = coords.map_coord2pix(ra, dec)\n ax.plot((x_source + 0.5) * delta_pix, (y_source + 0.5) * delta_pix, marker=marker, markersize=markersize,\n **kwargs)\n return ax\n\n\n@export\ndef result_string(x, weights=None, title_fmt=\".2f\", label=None):\n \"\"\"\n\n :param x: marginalized 1-d posterior\n :param weights: weights of posteriors (optional)\n :param title_fmt: format to what digit the results are presented\n :param label: string of parameter label (optional)\n :return: string with mean \\pm quartile\n \"\"\"\n from corner import quantile\n\n q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84], weights=weights)\n q_m, q_p = q_50 - q_16, q_84 - q_50\n\n # Format the quantile display.\n fmt = \"{{0:{0}}}\".format(title_fmt).format\n title = r\"${{{0}}}_{{-{1}}}^{{+{2}}}$\"\n title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))\n if label is not None:\n title = \"{0} = {1}\".format(label, title)\n return title\n\n\n@export\ndef cmap_conf(cmap_string):\n \"\"\"\n configures matplotlib color map\n\n :param cmap_string: string of cmap name, or cmap instance\n :return: cmap instance with setting for bad pixels and values below the threshold\n \"\"\"\n if isinstance(cmap_string, str):\n cmap = plt.get_cmap(cmap_string)\n else:\n cmap = cmap_string\n cmap.set_bad(color='k', alpha=1.)\n cmap.set_under('k')\n return cmap\n"
] |
[
[
"numpy.logical_not",
"numpy.log",
"numpy.sqrt",
"numpy.arange",
"numpy.arccosh",
"numpy.arccos",
"scipy.special.comb",
"numpy.log10",
"numpy.zeros_like",
"numpy.any",
"numpy.exp",
"numpy.zeros",
"numpy.empty"
],
[
"matplotlib.pyplot.get_cmap",
"numpy.array",
"numpy.where",
"numpy.sqrt"
]
] |
cerisara/weibull-knowledge-informed-ml
|
[
"bdcb838807ee6bbb5655b275ba0169b76e3f5acc"
] |
[
"src/visualization/visualize_data.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import signal, fftpack\nfrom matplotlib import gridspec\nimport pandas as pd\nimport logging\nfrom pathlib import Path\nimport os\nimport torch\nfrom src.data.data_utils import load_train_test_ims, load_train_test_femto\nfrom src.features.build_features import build_spectrogram_df_ims, create_fft\n\n\ndef create_time_frequency_plot(\n x, y, xf, yf, save_plot=False, save_name=\"time_freq_domain.svg\", dpi=150\n):\n \"\"\"Create a time domain and frequency domain plot.\n\n Parameters\n ===========\n x : ndarray\n Time (likely in seconds). Necessary for plotting time domain signals\n\n y : ndarray\n Time-domain signal (for example, the acceleration)\n\n xf : ndarray\n Frequency (likely in Hz). Necessary for plotting the frequency domain\n\n yf : ndarry\n Amplitude of FFT.\n\n save_plot : boolean\n True or False, whether to save the plot to file\n\n save_name : str\n If saving the plot, what is the name? Can be a string and/or path\n\n dpi : int\n dpi of saved image, if applicable\n\n Returns\n ===========\n Saves and/or shows a plot.\n\n \"\"\"\n\n # setup the seaborn plot\n sns.set(font_scale=1.1, style=\"whitegrid\")\n fig, axes = plt.subplots(2, 1, figsize=(12, 8), sharex=False, sharey=False)\n fig.tight_layout(pad=5.0)\n\n pal = sns.cubehelix_palette(6, rot=-0.25, light=0.7) # pick nice color for plot\n\n # plot time domain signal\n axes[0].plot(x, y, marker=\"\", label=\"Best model\", color=pal[3], linewidth=0.8)\n axes[0].set_title(\"Time Domain\", fontdict={\"fontweight\": \"normal\"})\n axes[0].set_xlabel(\"Time (seconds)\")\n axes[0].set_ylabel(\"Acceleration (g)\")\n\n # plot the frequency domain signal\n axes[1].plot(xf, yf, marker=\"\", label=\"Best model\", color=pal[3], linewidth=0.8)\n axes[1].set_title(\"Frequency Domain\", fontdict={\"fontweight\": \"normal\"})\n axes[1].set_xlabel(\"Frequency (Hz)\")\n axes[1].set_ylabel(\"Amplitude\")\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0, 0))\n\n # clean up the sub-plots to make everything pretty\n for ax in axes.flatten():\n ax.yaxis.set_tick_params(labelleft=True, which=\"major\")\n ax.grid(False)\n\n if save_plot:\n plt.savefig(save_name, dpi=dpi, bbox_inches=\"tight\")\n\n else:\n plt.show()\n\n\ndef plot_freq_peaks(\n xf,\n yf,\n max_freq_to_plot=1000,\n peak_height=0.0001,\n peak_distance=100,\n save_plot=False,\n save_name=\"fft_peaks.png\",\n dpi=150,\n):\n \"\"\"Create a frequency domain plot and show peaks with associated\n frequency values.\n\n Parameters\n ===========\n\n xf : ndarray\n Frequency (likely in Hz). Necessary for plotting the frequency domain\n\n yf : ndarry\n Amplitude of FFT.\n\n max_freq_to_plot : int\n Cuttoff for the\n\n save_name : str\n If saving the plot, what is the name? Can be a string and/or path\n\n dpi : int\n dpi of saved image, if applicable\n\n Returns\n ===========\n Saves and/or shows a plot.\n\n \"\"\"\n\n # select the index number where xf is less than a certain freq\n i = np.where(xf < max_freq_to_plot)[0][-1]\n peak_distance_index = peak_distance * i / max_freq_to_plot\n\n # setup the seaborn plot\n sns.set(font_scale=1.0, style=\"whitegrid\")\n fig, axes = plt.subplots(1, 1, figsize=(12, 8), sharex=False, sharey=False)\n fig.tight_layout(pad=5.0)\n\n pal = sns.cubehelix_palette(6, rot=-0.25, light=0.7) # pick nice color for plot\n\n # plot the frequency domain signal\n axes.plot(\n xf[:i], yf[:i], marker=\"\", label=\"Best model\", color=pal[3], linewidth=0.8\n )\n axes.set_title(\"Frequency Domain\", fontdict={\"fontweight\": \"normal\"})\n axes.set_xlabel(\"Frequency (Hz)\")\n axes.set_ylabel(\"Amplitude\")\n axes.yaxis.set_tick_params(labelleft=True, which=\"major\")\n axes.grid(False)\n\n peaks, _ = signal.find_peaks(\n yf[:i], height=peak_height, distance=peak_distance_index\n )\n plt.plot(xf[peaks], yf[peaks], \"x\", color=\"#d62728\", markersize=10)\n\n for p in peaks:\n axes.text(\n x=xf[p] + max_freq_to_plot / 50.0,\n y=yf[p],\n s=f\"{xf[p]:.1f} Hz\",\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n size=12,\n color=\"#d62728\",\n rotation=\"horizontal\",\n weight=\"normal\",\n )\n\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0, 0))\n if save_plot:\n plt.savefig(save_name, dpi=dpi, bbox_inches=\"tight\")\n else:\n plt.show()\n\n\ndef plot_spectogram_with_binned(\n df_spec, labels_dict, path_save_name=Path(\"dummy_folder\"), vmax_factor1=0.1, vmax_factor2=0.9, dpi=150, save_plot=True\n):\n color_scheme = \"inferno\"\n\n days = []\n for i in labels_dict:\n days.append(labels_dict[i][4])\n\n days = sorted(days)\n\n fig, ax = plt.subplots(\n 1,\n 2,\n figsize=(11, 4),\n dpi=dpi,\n )\n\n vmax_val = np.max(df_spec.to_numpy().flatten())\n ax[0].pcolormesh(\n days,\n df_spec.index,\n df_spec,\n cmap=color_scheme,\n vmax=vmax_val * vmax_factor1,\n shading=\"auto\",\n )\n\n ax[0].set_yticks([0, 1000, 2000, 3000, 4000, 5000])\n ax[0].set_yticklabels([\"\", 1000, 2000, 3000, 4000, 5000])\n ax[0].set_ylabel(\"Frequency (Hz)\")\n ax[0].set_xlabel(\"Runtime (days)\")\n ax[0].tick_params(axis=\"both\", which=\"both\", length=0)\n\n ax[0].text(\n 0.01,\n 0.99,\n \"(a)\",\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n transform=ax[0].transAxes,\n color=\"white\",\n fontsize=12,\n )\n\n ##### BINED SPECTROGRAM #####\n bucket_size = 500\n\n samples = df_spec.shape[1]\n\n df_temp = df_spec.iloc[:10000]\n a = np.array(df_temp) # make numpy array\n print(np.shape(a))\n\n # get the y-axis (frequency values)\n y = np.array(df_temp.index)\n y = np.max(y.reshape(-1, bucket_size), axis=1)\n y = list(y.round().astype(\"int\")[::2])\n y.insert(0, 0)\n plt.box(on=None)\n\n # get the max value for each bucket\n # https://stackoverflow.com/a/15956341/9214620\n max_a = np.max(a.reshape(-1, bucket_size, samples), axis=1)\n\n ax[1].pcolormesh(\n days,\n np.arange(0, 21),\n max_a,\n cmap=color_scheme,\n vmax=vmax_val * vmax_factor2,\n shading=\"auto\",\n )\n ax[1].set_yticks(np.arange(1.5, 20.5, 2))\n ax[1].set_yticklabels(list(np.arange(2, 21, 2)))\n ax[1].tick_params(axis=\"both\", which=\"both\", length=0)\n\n ax[1].set_xlabel(\"Runtime (days)\")\n ax[1].set_ylabel(\"Frequency Bin\")\n\n ax[1].text(\n 0.01,\n 0.99,\n \"(b)\",\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n transform=ax[1].transAxes,\n color=\"white\",\n fontsize=12,\n )\n\n sns.despine(left=True, bottom=True, right=True)\n if save_plot:\n plt.savefig(path_save_name, dpi=dpi, bbox_inches=\"tight\")\n else:\n plt.show()\n\n\ndef weibull_pdf(t, eta, beta):\n \"weibull PDF function\"\n return (\n (beta / (eta ** beta))\n * (t ** (beta - 1.0))\n * np.exp(-1.0 * ((t / eta) ** beta))\n )\n\n\ndef weibull_cdf(t, eta, beta):\n \"weibull CDF function\"\n return 1.0 - np.exp(-1.0 * ((t / eta) ** beta))\n\n\ndef plot_weibull_example(\n beta=2.0, eta=100, path_save_name=\"weibull_cdf_pdf.svg\", dpi=300\n):\n\n pal = sns.cubehelix_palette(6, rot=-0.25, light=0.7)\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 4), constrained_layout=False)\n axes[0].title.set_text(\"Weibull CDF\")\n axes[0].set_xlabel(\"Time (days)\", labelpad=10)\n axes[0].set_ylabel(\"Fraction Failing, F(t)\", labelpad=10)\n axes[0].grid(False)\n\n axes[1].title.set_text(\"Weibull PDF\")\n axes[1].set_xlabel(\"Time (days)\", labelpad=10)\n axes[1].set_ylabel(\"Probability Density, f(t)\", labelpad=10)\n axes[1].grid(False)\n\n for beta in [2.0]:\n\n t = np.linspace(0, 300, 1000)\n f = weibull_cdf(t, eta, beta)\n axes[0].plot(t, f, color=pal[5], linewidth=2)\n f = weibull_pdf(t, eta, beta)\n axes[1].plot(t, f, color=pal[5], linewidth=2)\n plt.subplots_adjust(wspace=0.4)\n plt.savefig(path_save_name, dpi=dpi, bbox_inches=\"tight\")\n\n\ndef ims_data_processed_fig(\n folder_data_ims, path_save_name=\"spectrograms_processed_data_IMS.png\", dpi=300, save_plot=True\n):\n (\n x_train,\n y_train,\n x_val,\n y_val,\n x_test,\n y_test,\n x_train_2,\n y_train_2,\n x_train_3,\n y_train_3,\n ) = load_train_test_ims(folder_data_ims)\n\n y_train_days = torch.reshape(y_train[:, 0], (-1, 1))\n y_val_days = torch.reshape(y_val[:, 0], (-1, 1))\n y_test_days = torch.reshape(y_test[:, 0], (-1, 1))\n\n y_train_days_2 = torch.reshape(y_train_2[:, 0], (-1, 1))\n y_train_days_3 = torch.reshape(y_train_3[:, 0], (-1, 1))\n\n y_train = torch.reshape(y_train[:, 1], (-1, 1))\n y_val = torch.reshape(y_val[:, 1], (-1, 1))\n y_test = torch.reshape(y_test[:, 1], (-1, 1))\n\n y_train_2 = torch.reshape(y_train_2[:, 1], (-1, 1))\n y_train_3 = torch.reshape(y_train_3[:, 1], (-1, 1))\n\n # y_list\n y_list = [y_train_2, y_train_3, y_val, y_test]\n\n # x_list\n x_list = [x_train_2, x_train_3, x_val, x_test]\n\n # y_days_list\n y_days_list = [y_train_days_2, y_train_days_3, y_val_days, y_test_days]\n\n val_max_list = [0.3, 0.3, 0.3, 0.3]\n\n color_scheme = \"inferno\"\n\n fig = plt.figure(\n figsize=(11, 8), dpi=dpi\n )\n gs = gridspec.GridSpec(2, 2)\n\n ax1 = plt.subplot(gs[0, 0])\n ax2 = plt.subplot(gs[0, 1])\n ax3 = plt.subplot(gs[1, 0])\n ax4 = plt.subplot(gs[1, 1])\n gs.update(wspace=0.2, hspace=0.3)\n\n ## General Formatting ##\n # create list of axis elements\n axes_list = [ax1, ax2, ax3, ax4]\n\n for ax in axes_list:\n ax.grid(b=None)\n\n ###### TEST DATA #####\n plt.rcParams[\"axes.titlepad\"] = 7\n\n # secondary axis title list\n ax_title_list = [\n \"(a)\" + \" Train Data (run 2, bearing 1)\",\n \"(b)\" + \" Train Data (run 3, bearing 3)\",\n \"(c)\" + \" Val Data (run 1, bearing 3)\",\n \"(d)\" + \" Test Data (run 1, bearing 4)\",\n ]\n\n counter = 0\n for ax, ax_title, y_temp, x_temp, y_days, val_max in zip(\n axes_list, ax_title_list, y_list, x_list, y_days_list, val_max_list\n ):\n\n index_sorted = np.array(np.argsort(y_temp, 0).reshape(-1))\n\n time_array = np.sort(y_days[:, -1])\n\n index_new = np.arange(0, len(time_array), int(len(time_array) / 3) - 1)\n\n labels_new = [f\"{i:.1f}\" for i in time_array[index_new]]\n labels_new[0] = \"0\"\n\n ax.pcolormesh(\n x_temp[index_sorted].T,\n cmap=color_scheme,\n vmax=val_max,\n )\n\n ax.set_xticks(index_new)\n ax.set_xticklabels(\n labels_new,\n )\n\n ax.text(\n 0.02,\n 0.97,\n ax_title,\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n transform=ax.transAxes,\n color=\"white\",\n fontsize=12,\n )\n\n if counter == 0:\n ax.set_xticks(index_new)\n ax.set_xticklabels(\n labels_new,\n )\n ax.set_yticks(np.arange(3.5, 20.5, 4))\n ax.set_yticklabels(list(np.arange(4, 21, 4)))\n ax.set_ylabel(\"Frequency Bin\")\n ax.set_xlabel(\"Runtime (days)\")\n else:\n ax.set_yticklabels([])\n\n if counter != 0:\n ax.set_yticklabels([])\n\n counter += 1\n\n sns.despine(left=True, bottom=True, right=True)\n if save_plot:\n plt.savefig(path_save_name, bbox_inches=\"tight\")\n else:\n plt.show()\n\n\ndef femto_data_processed_fig(\n folder_data_femto,\n path_save_name=\"spectrograms_processed_data_FEMTO.png\",\n dpi=300,\n vmax_val=0.15,\n save_plot=True,\n):\n # load data\n (\n x_train,\n y_train,\n x_val,\n y_val,\n x_test,\n y_test,\n x_train1_1,\n y_train1_1,\n x_train2_1,\n y_train2_1,\n x_train3_1,\n y_train3_1,\n x_val1_2,\n y_val1_2,\n x_val2_2,\n y_val2_2,\n x_val3_2,\n y_val3_2,\n x_test1_3,\n y_test1_3,\n x_test2_3,\n y_test2_3,\n x_test3_3,\n y_test3_3,\n ) = load_train_test_femto(folder_data_femto)\n\n y_train1_1_days = torch.reshape(y_train1_1[:, 0], (-1, 1))\n y_train2_1_days = torch.reshape(y_train2_1[:, 0], (-1, 1))\n y_train3_1_days = torch.reshape(y_train3_1[:, 0], (-1, 1))\n y_val1_2_days = torch.reshape(y_val1_2[:, 0], (-1, 1))\n y_val2_2_days = torch.reshape(y_val2_2[:, 0], (-1, 1))\n y_val3_2_days = torch.reshape(y_val3_2[:, 0], (-1, 1))\n y_test1_3_days = torch.reshape(y_test1_3[:, 0], (-1, 1))\n y_test2_3_days = torch.reshape(y_test2_3[:, 0], (-1, 1))\n y_test3_3_days = torch.reshape(y_test3_3[:, 0], (-1, 1))\n\n y_train = torch.reshape(y_train[:, 1], (-1, 1))\n y_val = torch.reshape(y_val[:, 1], (-1, 1))\n y_test = torch.reshape(y_test[:, 1], (-1, 1))\n\n y_train1_1 = torch.reshape(y_train1_1[:, 1], (-1, 1))\n y_train2_1 = torch.reshape(y_train2_1[:, 1], (-1, 1))\n y_train3_1 = torch.reshape(y_train3_1[:, 1], (-1, 1))\n y_val1_2 = torch.reshape(y_val1_2[:, 1], (-1, 1))\n y_val2_2 = torch.reshape(y_val2_2[:, 1], (-1, 1))\n y_val3_2 = torch.reshape(y_val3_2[:, 1], (-1, 1))\n y_test1_3 = torch.reshape(y_test1_3[:, 1], (-1, 1))\n y_test2_3 = torch.reshape(y_test2_3[:, 1], (-1, 1))\n y_test3_3 = torch.reshape(y_test3_3[:, 1], (-1, 1))\n\n # y_list\n y_list = [\n y_train1_1,\n y_train2_1,\n y_train3_1,\n y_val1_2,\n y_val2_2,\n y_val3_2,\n y_test1_3,\n y_test2_3,\n y_test3_3,\n ]\n\n # x_list\n x_list = [\n x_train1_1,\n x_train2_1,\n x_train3_1,\n x_val1_2,\n x_val2_2,\n x_val3_2,\n x_test1_3,\n x_test2_3,\n x_test3_3,\n ]\n\n # y_days_list\n y_days_list = [\n y_train1_1_days,\n y_train2_1_days,\n y_train3_1_days,\n y_val1_2_days,\n y_val2_2_days,\n y_val3_2_days,\n y_test1_3_days,\n y_test2_3_days,\n y_test3_3_days,\n ]\n\n ax_title_list = [\n \"(a)\" + \" Train Data (Bearing1_1)\",\n \"(b)\" + \" Train Data (Bearing2_1)\",\n \"(c)\" + \" Train Data (Bearing3_1)\",\n \"(d)\" + \" Val Data (Bearing1_2)\",\n \"(e)\" + \" Val Data (Bearing2_2)\",\n \"(f)\" + \" Val Data (Bearing3_2)\",\n \"(g)\" + \" Test Data (Bearing1_3)\",\n \"(h)\" + \" Test Data (Bearing2_3)\",\n \"(i)\" + \" Test Data (Bearing3_3)\",\n ]\n\n fig = plt.figure(figsize=(14, 12), dpi=dpi)\n gs = gridspec.GridSpec(3, 3)\n ax1 = plt.subplot(gs[0, 0])\n ax2 = plt.subplot(gs[0, 1])\n ax3 = plt.subplot(gs[0, 2])\n ax4 = plt.subplot(gs[1, 0])\n ax5 = plt.subplot(gs[1, 1])\n ax6 = plt.subplot(gs[1, 2])\n ax7 = plt.subplot(gs[2, 0])\n ax8 = plt.subplot(gs[2, 1])\n ax9 = plt.subplot(gs[2, 2])\n gs.update(wspace=0.15, hspace=0.3)\n\n # create list of axis elements\n axes_list = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]\n\n color_scheme = \"inferno\"\n counter = 0\n for ax, ax_title, y_temp, x_temp, y_days in zip(\n axes_list, ax_title_list, y_list, x_list, y_days_list\n ):\n\n index_sorted = np.array(np.argsort(y_temp, 0).reshape(-1))\n\n time_array = np.sort(y_days[:, -1])\n\n index_new = np.arange(0, len(time_array), int(len(time_array) / 3) - 1)\n\n labels_new = [f\"{i*24:.1f}\" for i in time_array[index_new]]\n # change first value to '0'\n labels_new[0] = \"0\"\n\n ax.pcolormesh(\n x_temp[index_sorted].T,\n cmap=color_scheme,\n vmax=vmax_val,\n )\n\n ax.set_xticks(index_new)\n ax.set_xticklabels(\n labels_new,\n )\n\n ax.text(\n 0.02,\n 0.97,\n ax_title,\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n transform=ax.transAxes,\n color=\"white\",\n fontsize=12,\n )\n\n if counter == 0:\n ax.set_xticks(index_new)\n ax.set_xticklabels(\n labels_new,\n )\n ax.set_yticks(np.arange(3.5, 20.5, 4))\n ax.set_yticklabels(list(np.arange(4, 21, 4)))\n ax.set_ylabel(\"Frequency Bin\")\n ax.set_xlabel(\"Runtime (hours)\")\n else:\n ax.set_yticklabels([])\n\n if counter != 0:\n ax.set_yticklabels([])\n\n counter += 1\n\n sns.despine(left=True, bottom=True, right=True)\n if save_plot:\n plt.savefig(path_save_name, dpi=dpi, bbox_inches=\"tight\")\n else:\n plt.show()\n\n\ndef main():\n logger = logging.getLogger(__name__)\n logger.info(\"making figures from results\")\n\n path_raw_data = root_dir / \"data/raw/IMS/\"\n path_save_loc = root_dir / \"reports/figures/\"\n folder_data_ims = root_dir / \"data/processed/IMS/\"\n folder_data_femto = root_dir / \"data/processed/FEMTO/\"\n\n folder_2nd = path_raw_data / \"2nd_test\"\n date_list2 = sorted(os.listdir(folder_2nd))\n col_names = [\"b1_ch1\", \"b2_ch2\", \"b3_ch3\", \"b4_ch4\"]\n df_spec, labels_dict = build_spectrogram_df_ims(\n folder_2nd,\n date_list2,\n channel_name=\"b1_ch1\",\n start_time=date_list2[0],\n col_names=col_names,\n )\n\n ######################\n # EXAMPLE SPECTROGRAM AND FEATURE PLOT\n plot_spectogram_with_binned(\n df_spec,\n labels_dict,\n path_save_loc / \"spectrogram_with_binned_example.png\",\n vmax_factor1=0.08,\n vmax_factor2=0.5,\n dpi=150,\n )\n\n sns.set(font_scale=0.8, style=\"whitegrid\", font=\"DejaVu Sans\")\n\n ######################\n # WEIBULL CDF/PDF PLOT\n plot_weibull_example(\n beta=2.0,\n eta=100,\n path_save_name=path_save_loc / \"weibull_cdf_pdf_example.pdf\",\n dpi=300,\n )\n\n ######################\n # TIME-DOMAIN, FREQ DOMAIN PLOT\n folder_1st = path_raw_data / \"1st_test\"\n col_names = [\n \"b1_ch1\",\n \"b1_ch2\",\n \"b2_ch3\",\n \"b2_ch4\",\n \"b3_ch5\",\n \"b3_ch6\",\n \"b4_ch7\",\n \"b4_ch8\",\n ]\n\n df = pd.read_csv(folder_1st / \"2003.10.22.12.06.24\", sep=\"\\t\", names=col_names)\n x, y, xf, yf = create_fft(\n df,\n y_name=\"b1_ch2\",\n sample_freq=20480.0,\n window=\"kaiser\",\n beta=3,\n )\n create_time_frequency_plot(\n x,\n y,\n xf,\n yf,\n save_plot=True,\n save_name=path_save_loc / \"time_freq_domain_example.pdf\",\n )\n\n ######################\n # IMS PROCESSED DATA FIGURE\n ims_data_processed_fig(\n folder_data_ims,\n path_save_name=path_save_loc / \"ims_spectrograms_processed_data.png\",\n dpi=300,\n )\n\n ######################\n # PRONOSTIA (FEMTO) PROCESSED DATA FIGURE\n femto_data_processed_fig(\n folder_data_femto,\n path_save_name=path_save_loc / \"femto_spectrograms_processed_data.png\",\n dpi=300,\n vmax_val=0.15,\n )\n\n\nif __name__ == \"__main__\":\n log_fmt = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n root_dir = Path(__file__).resolve().parents[2]\n\n main()\n"
] |
[
[
"scipy.signal.find_peaks",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"torch.reshape",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.box",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ticklabel_format",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.shape"
]
] |
liusy182/jina
|
[
"c272bbddef733167804c5a68d5f41ec789fa1732"
] |
[
"tests/unit/flow-construct/test_flow.py"
] |
[
"import datetime\nimport inspect\nimport json\nimport os\nfrom typing import Union\n\nimport numpy as np\nimport pytest\n\nfrom jina import Flow, Document, DocumentArray, Executor, requests, __windows__\nfrom jina.enums import FlowBuildLevel, PollingType\nfrom jina.excepts import RuntimeFailToStart\nfrom jina.executors import BaseExecutor\nfrom jina.helper import random_identity\nfrom jina.peapods.pods import BasePod\nfrom docarray.document.generators import from_ndarray\nfrom jina.types.request.data import Response\nfrom docarray.proto import docarray_pb2\nfrom tests import random_docs\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\n@pytest.mark.slow\ndef test_flow_with_jump(tmpdir):\n f = (\n Flow()\n .add(name='r1')\n .add(name='r2')\n .add(name='r3', needs='r1')\n .add(name='r4', needs='r2')\n .add(name='r5', needs='r3')\n .add(name='r6', needs='r4')\n .add(name='r8', needs='r6')\n .add(name='r9', needs='r5')\n .add(name='r10', needs=['r9', 'r8'])\n )\n\n with f:\n _validate_flow(f)\n\n f.save_config(os.path.join(str(tmpdir), 'tmp.yml'))\n Flow.load_config(os.path.join(str(tmpdir), 'tmp.yml'))\n\n with Flow.load_config(os.path.join(str(tmpdir), 'tmp.yml')) as f:\n _validate_flow(f)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_simple_flow(protocol):\n bytes_gen = (Document() for _ in range(10))\n\n def bytes_fn():\n for _ in range(100):\n yield Document()\n\n f = Flow(protocol=protocol).add(name='executor0')\n\n with f:\n f.index(inputs=bytes_gen)\n\n with f:\n f.index(inputs=bytes_fn)\n\n with f:\n f.index(inputs=bytes_fn)\n f.index(inputs=bytes_fn)\n\n _validate_flow(f)\n\n assert 'gateway' not in f\n\n\n@pytest.mark.slow\ndef test_flow_identical(tmpdir):\n with open(os.path.join(cur_dir, '../yaml/test-flow.yml')) as fp:\n a = Flow.load_config(fp)\n\n b = (\n Flow()\n .add(name='chunk_seg', shards=3)\n .add(name='wqncode1', shards=2)\n .add(name='encode2', shards=2, needs='chunk_seg')\n .join(['wqncode1', 'encode2'])\n )\n\n a.save_config(os.path.join(str(tmpdir), 'test2.yml'))\n\n c = Flow.load_config(os.path.join(str(tmpdir), 'test2.yml'))\n\n assert a == b\n assert a == c\n\n with a as f:\n _validate_flow(f)\n\n\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_flow_no_container(protocol):\n f = Flow(protocol=protocol).add(\n name='dummyEncoder',\n uses=os.path.join(cur_dir, 'mwu-encoder/mwu_encoder.yml'),\n )\n\n with f:\n f.index(inputs=random_docs(10))\n\n\n@pytest.fixture\ndef docpb_workspace(tmpdir):\n os.environ['TEST_DOCSHARD_WORKSPACE'] = str(tmpdir)\n yield\n del os.environ['TEST_DOCSHARD_WORKSPACE']\n\n\n@pytest.mark.slow\ndef test_py_client():\n f = (\n Flow()\n .add(name='r1')\n .add(name='r2')\n .add(name='r3', needs='r1')\n .add(name='r4', needs='r2')\n .add(name='r5', needs='r3')\n .add(name='r6', needs='r4')\n .add(name='r8', needs='r6')\n .add(name='r9', needs='r5')\n .add(name='r10', needs=['r9', 'r8'])\n )\n\n with f:\n _validate_flow(f)\n\n\ndef test_dry_run_with_two_pathways_diverging_at_gateway():\n f = Flow().add(name='r2').add(name='r3', needs='gateway').join(['r2', 'r3'])\n\n with f:\n _validate_flow(f)\n\n\ndef test_dry_run_with_two_pathways_diverging_at_non_gateway():\n f = (\n Flow()\n .add(name='r1')\n .add(name='r2')\n .add(name='r3', needs='r1')\n .join(['r2', 'r3'])\n )\n\n with f:\n _validate_flow(f)\n\n\ndef test_refactor_num_part():\n f = (\n Flow()\n .add(name='r1', needs='gateway')\n .add(name='r2', needs='gateway')\n .join(['r1', 'r2'])\n )\n\n with f:\n _validate_flow(f)\n\n\ndef test_refactor_num_part_proxy():\n f = (\n Flow()\n .add(name='r1')\n .add(name='r2', needs='r1')\n .add(name='r3', needs='r1')\n .join(['r2', 'r3'])\n )\n\n with f:\n _validate_flow(f)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_refactor_num_part_proxy_2(protocol):\n f = (\n Flow(protocol=protocol)\n .add(name='r1')\n .add(name='r2', needs='r1', shards=2)\n .add(name='r3', needs='r1', shards=3, polling='ALL')\n .needs(['r2', 'r3'])\n )\n\n with f:\n f.index([Document(text='abbcs'), Document(text='efgh')])\n _validate_flow(f)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_refactor_num_part_2(protocol):\n f = Flow(protocol=protocol).add(name='r1', needs='gateway', shards=3, polling='ALL')\n\n with f:\n f.index([Document(text='abbcs'), Document(text='efgh')])\n\n f = Flow(protocol=protocol).add(name='r1', needs='gateway', shards=3)\n\n with f:\n f.index([Document(text='abbcs'), Document(text='efgh')])\n\n\n@pytest.fixture()\ndef datauri_workspace(tmpdir):\n os.environ['TEST_DATAURIINDEX_WORKSPACE'] = str(tmpdir)\n yield\n del os.environ['TEST_DATAURIINDEX_WORKSPACE']\n\n\nclass DummyOneHotTextEncoder(Executor):\n @requests\n def foo(self, docs, **kwargs):\n for d in docs:\n d.embedding = np.array([1, 2, 3])\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_flow_with_publish_driver(protocol):\n def validate(da):\n for d in da:\n assert d.embedding is not None\n\n f = (\n Flow(protocol=protocol)\n .add(name='r2', uses=DummyOneHotTextEncoder)\n .add(name='r3', uses=DummyOneHotTextEncoder, needs='gateway')\n .join(needs=['r2', 'r3'])\n )\n\n with f:\n da = f.index(\n [Document(text='text_1'), Document(text='text_2')], return_results=True\n )\n _validate_flow(f)\n\n validate(da)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_flow_arbitrary_needs(protocol):\n f = (\n Flow(protocol=protocol)\n .add(name='p1')\n .add(name='p2', needs='gateway')\n .add(name='p3', needs='gateway')\n .add(name='p4', needs='gateway')\n .add(name='p5', needs='gateway')\n .needs(['p2', 'p4'], name='r1')\n .needs(['p3', 'p5'], name='r2')\n .needs(['p1', 'r1'], name='r3')\n .needs(['r2', 'r3'], name='r4')\n )\n\n with f:\n f.index([Document(text='abbcs'), Document(text='efgh')])\n _validate_flow(f)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_flow_needs_all(protocol):\n f = Flow(protocol=protocol).add(name='p1', needs='gateway').needs_all(name='r1')\n assert f._pod_nodes['r1'].needs == {'p1'}\n\n f = (\n Flow(protocol=protocol)\n .add(name='p1', needs='gateway')\n .add(name='p2', needs='gateway')\n .add(name='p3', needs='gateway')\n .needs(needs=['p1', 'p2'], name='r1')\n .needs_all(name='r2')\n )\n assert f._pod_nodes['r2'].needs == {'p3', 'r1'}\n\n with f:\n f.index(from_ndarray(np.random.random([10, 10])))\n\n f = (\n Flow(protocol=protocol)\n .add(name='p1', needs='gateway')\n .add(name='p2', needs='gateway')\n .add(name='p3', needs='gateway')\n .needs(needs=['p1', 'p2'], name='r1')\n .needs_all(name='r2')\n .add(name='p4', needs='r2')\n )\n assert f._pod_nodes['r2'].needs == {'p3', 'r1'}\n assert f._pod_nodes['p4'].needs == {'r2'}\n\n with f:\n f.index(from_ndarray(np.random.random([10, 10])))\n _validate_flow(f)\n\n\nclass EnvChecker1(BaseExecutor):\n \"\"\"Class used in Flow YAML\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # pea/pod-specific\n assert os.environ['key1'] == 'value1'\n assert os.environ['key2'] == 'value2'\n # inherit from parent process\n assert os.environ['key_parent'] == 'value3'\n\n\nclass EnvChecker2(BaseExecutor):\n \"\"\"Class used in Flow YAML\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # pea/pod-specific\n assert 'key1' not in os.environ\n assert 'key2' not in os.environ\n # inherit from parent process\n assert os.environ['key_parent'] == 'value3'\n\n\ndef test_flow_with_pod_envs():\n f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-with-envs.yml'))\n with f:\n _validate_flow(f)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('return_results', [False, True])\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\n@pytest.mark.parametrize('on_done', [None, lambda x: x])\ndef test_return_results_sync_flow(return_results, protocol, on_done):\n with Flow(protocol=protocol).add() as f:\n da = f.index(\n from_ndarray(np.random.random([10, 2])),\n return_results=return_results,\n on_done=on_done,\n )\n if return_results or on_done is None:\n assert isinstance(da, DocumentArray)\n assert len(da) == 10\n for doc in da:\n assert isinstance(doc, Document)\n\n else:\n assert da is None\n _validate_flow(f)\n\n\n@pytest.mark.parametrize(\n 'input, expect_host, expect_port',\n [\n ('0.0.0.0', '0.0.0.0', None),\n ('0.0.0.0:12345', '0.0.0.0', 12345),\n ('123.124.125.0:45678', '123.124.125.0', 45678),\n ('api.jina.ai:45678', 'api.jina.ai', 45678),\n ],\n)\ndef test_flow_host_expose_shortcut(input, expect_host, expect_port):\n f = Flow().add(host=input).build()\n assert f['executor0'].args.host == expect_host\n if expect_port is not None:\n assert f['executor0'].args.port_jinad == expect_port\n\n\ndef test_flow_workspace_id():\n f = Flow().add().add().add().build()\n assert len(f.workspace_id) == 4\n assert len(set(f.workspace_id.values())) == 4\n\n with pytest.raises(ValueError):\n f.workspace_id = 'hello'\n\n new_id = random_identity()\n f.workspace_id = new_id\n assert len(set(f.workspace_id.values())) == 1\n assert list(f.workspace_id.values())[0] == new_id\n\n\ndef test_flow_identity():\n f = Flow().add().add().add().build()\n assert len(f.identity) == 4\n assert len(set(f.identity.values())) == 4\n\n with pytest.raises(ValueError):\n f.identity = 'hello'\n\n new_id = random_identity()\n f.identity = new_id\n assert len(set(f.identity.values())) == 1\n assert list(f.identity.values())[0] == new_id\n\n\n@pytest.mark.slow\ndef test_flow_identity_override():\n f = Flow().add().add(shards=2).add(shards=2)\n\n with f:\n assert len(set(p.args.identity for _, p in f)) == f.num_pods\n\n f = Flow(identity='123456').add().add(shards=2).add(shards=2)\n\n with f:\n assert len(set(p.args.identity for _, p in f)) == 1\n\n y = '''\n!Flow\nversion: '1.0'\nexecutors:\n - name: hello\n - name: world\n shards: 3\n '''\n\n f = Flow.load_config(y)\n for _, p in f:\n p.args.identity = '1234'\n\n with f:\n assert len(set(p.args.identity for _, p in f)) == 2\n for _, p in f:\n if p.args.identity != '1234':\n assert p.name == 'gateway'\n\n\n@pytest.mark.slow\ndef test_bad_pod_graceful_termination():\n def asset_bad_flow(f):\n with pytest.raises(RuntimeFailToStart):\n with f:\n assert f._build_level == FlowBuildLevel.EMPTY\n\n # bad remote pod\n asset_bad_flow(Flow().add(name='exec1', host='hello-there'))\n\n # bad local pod\n asset_bad_flow(Flow().add(name='exec2', uses='hello-there'))\n\n # bad local pod at second\n asset_bad_flow(Flow().add().add(name='exec3', uses='hello-there'))\n\n # bad remote pod at second\n asset_bad_flow(Flow().add().add(name='exec4', host='hello-there'))\n\n # bad local pod at second, with correct pod at last\n asset_bad_flow(Flow().add().add(name='exec5', uses='hello-there').add())\n\n # bad remote pod at second, with correct pod at last\n asset_bad_flow(Flow().add().add(name='exec6', host='hello-there').add())\n\n\ndef test_socket_types_2_remote_one_local():\n f = (\n Flow()\n .add(name='executor1', host='0.0.0.1')\n .add(name='executor2', shards=2, host='0.0.0.2')\n .add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])\n .join(name='join', needs=['executor2', 'executor3'])\n )\n\n f.build()\n\n _validate_flow(f)\n\n\ndef test_socket_types_2_remote_one_local_input_socket_pull_connect_from_remote():\n f = (\n Flow()\n .add(name='executor1', host='0.0.0.1')\n .add(name='executor2', shards=2, host='0.0.0.2')\n .add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])\n .join(name='join', needs=['executor2', 'executor3'])\n )\n\n f.build()\n\n _validate_flow(f)\n\n\ndef test_single_document_flow_index():\n d = Document()\n with Flow().add() as f:\n f.index(d)\n f.index(lambda: d)\n\n\ndef test_flow_equalities():\n f1 = (\n Flow()\n .add(name='executor0')\n .add(name='executor1', needs='gateway')\n .needs_all(name='joiner')\n )\n f2 = (\n Flow()\n .add(name='executor0')\n .add(name='executor1', needs='gateway')\n .add(name='joiner', needs=['executor0', 'executor1'])\n )\n assert f1 == f2\n\n f2 = f2.add(name='executor0')\n assert f1 != f2\n\n\ndef test_flow_get_item():\n f1 = Flow().add().add(needs='gateway').needs_all(name='joiner')\n assert isinstance(f1[1], BasePod)\n assert isinstance(f1['executor0'], BasePod)\n\n\nclass CustomizedExecutor(BaseExecutor):\n pass\n\n\ndef test_flow_add_class():\n f = Flow().add(uses=BaseExecutor).add(uses=CustomizedExecutor)\n\n with f:\n pass\n\n\n@pytest.mark.slow\ndef test_flow_allinone_yaml():\n f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-allinone.yml'))\n with f:\n pass\n\n f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-allinone-oldstyle.yml'))\n with f:\n pass\n\n\nclass MyExec(Executor):\n @requests\n def foo(self, parameters, **kwargs):\n assert parameters['hello'] == 'world'\n\n\ndef test_flow_empty_data_request(mocker):\n f = Flow().add(uses=MyExec)\n\n mock = mocker.Mock()\n\n with f:\n f.post('/hello', parameters={'hello': 'world'}, on_done=mock)\n\n mock.assert_called()\n\n\ndef test_flow_common_kwargs():\n\n with Flow(name='hello', something_random=True).add() as f:\n assert f._common_kwargs == {'something_random': True}\n\n\n@pytest.mark.parametrize('is_async', [True, False])\ndef test_flow_set_asyncio_switch_post(is_async):\n f = Flow(asyncio=is_async)\n assert inspect.isasyncgenfunction(f.post) == is_async\n\n\n@pytest.mark.skipif(__windows__, reason='timing comparison is broken for 2nd Flow')\ndef test_flow_routes_list():\n def _time(time: str):\n return datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n def my_cb_one(resp: Response):\n gateway_entry, pod1_entry = json.loads(resp.json())['routes']\n assert gateway_entry['executor'] == 'gateway'\n assert pod1_entry['executor'].startswith('executor1')\n assert (\n _time(gateway_entry['end_time'])\n > _time(pod1_entry['end_time'])\n > _time(pod1_entry['start_time'])\n > _time(gateway_entry['start_time'])\n )\n\n def my_cb_two(resp: Response):\n routes = json.loads(resp.json())['routes']\n gateway_entry, *pods = routes\n (\n a1_entry,\n a2_entry,\n b1_entry,\n gateway_entry,\n merge_entry,\n ) = _extract_route_entries(gateway_entry, routes)\n assert gateway_entry['executor'] == 'gateway'\n assert a1_entry['executor'].startswith('a1')\n assert a2_entry['executor'].startswith('a2')\n assert b1_entry['executor'].startswith('b1')\n assert merge_entry['executor'].startswith('merge')\n assert (\n _time(gateway_entry['end_time'])\n > _time(merge_entry['end_time'])\n > _time(merge_entry['start_time'])\n > _time(a2_entry['end_time'])\n > _time(a2_entry['start_time'])\n > _time(a1_entry['start_time'])\n > _time(gateway_entry['start_time'])\n )\n\n with Flow().add(name='executor1') as simple_flow:\n simple_flow.index(inputs=Document(), return_results=True, on_done=my_cb_one)\n\n with Flow().add(name='a1').add(name='a2').add(name='b1', needs='gateway').add(\n name='merge', needs=['a2', 'b1']\n ) as shards_flow:\n shards_flow.index(inputs=Document(), return_results=True, on_done=my_cb_two)\n\n\ndef _extract_route_entries(gateway_entry, routes):\n for route in routes:\n if route['executor'] == 'gateway':\n gateway_entry = route\n elif route['executor'] == 'a1':\n a1_entry = route\n elif route['executor'] == 'a2':\n a2_entry = route\n elif route['executor'] == 'b1':\n b1_entry = route\n elif route['executor'] == 'merge':\n merge_entry = route\n return a1_entry, a2_entry, b1_entry, gateway_entry, merge_entry\n\n\ndef test_flow_change_parameters():\n class MyExec(Executor):\n @requests\n def foo(self, **kwargs):\n return {'a': 1}\n\n def my_cb(resp: Response):\n assert resp.parameters['a'] == 1.0\n\n f = Flow().add(uses=MyExec)\n with f:\n f.post('/', parameters={'a': 2}, on_done=my_cb)\n f.post('/', parameters={}, on_done=my_cb)\n\n\ndef test_flow_load_executor_yaml_extra_search_paths():\n f = Flow(extra_search_paths=[os.path.join(cur_dir, 'executor')]).add(\n uses='config.yml'\n )\n with f:\n da = f.post('/', inputs=Document(), return_results=True)\n assert da[0].text == 'done'\n\n\ndef test_flow_load_yaml_extra_search_paths():\n f = Flow.load_config(os.path.join(cur_dir, 'flow/flow.yml'))\n with f:\n da = f.post('/', inputs=Document(), return_results=True)\n assert da[0].text == 'done'\n\n\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])\ndef test_gateway_only_flows_no_error(capsys, protocol):\n f = Flow(protocol=protocol)\n with f:\n pass\n captured = capsys.readouterr()\n assert not captured.err\n\n\ndef _validate_flow(f):\n graph_dict = f._get_graph_representation()\n adresses = f._get_pod_addresses()\n for name, pod in f:\n if name != 'gateway':\n assert adresses[name][0] == f'{pod.host}:{pod.head_port_in}'\n for n in pod.needs:\n assert name in graph_dict[n if n != 'gateway' else 'start-gateway']\n else:\n for n in pod.needs:\n assert 'end-gateway' in graph_dict[n]\n"
] |
[
[
"numpy.array",
"numpy.random.random"
]
] |
petrapoklukar/disentanglement_sample_efficiency
|
[
"27b4bac6ee61973a728e48230c6eb449b167c46e"
] |
[
"disentanglement_lib/preprocessing/methods.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 5 16:41:20 2020\n\n@author: petrapoklukar\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport os\nimport gin.tf\n\n@gin.configurable(\"split_train_and_validation_per_model\", \n blacklist=[\"dataset_name\", \"model_name\"])\ndef create_split_train_and_validation_per_model(dataset_name, \n model_name, \n random_seed=gin.REQUIRED, \n unit_labels=False):\n \"\"\" Randomly splits the model split into smaller datasets of different\n sizes.\n \n Args: \n filename: name of the file to split further\n \"\"\"\n if model_name:\n model_name = '_{0}_{1}'.format(model_name, str(random_seed))\n random_state = np.random.RandomState(random_seed)\n SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + \".h5\")\n dataset_split = h5py.File(SHAPES3D_PATH, 'r')\n print(dataset_split.keys())\n images_split = dataset_split['images'][()]\n labels_split = dataset_split['labels'][()]\n indices_split = dataset_split['indices'][()]\n dataset_size = len(images_split)\n \n ims = np.array(images_split)\n labs = np.array(labels_split)\n inds = np.array(indices_split)\n \n if unit_labels:\n labels_min = np.array([0., 0., 0., 0.75, 0., -30.])\n labels_max = np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])\n labels_split = (labels_split - labels_min)/(labels_max - labels_min)\n assert(np.min(labels_split) == 0 and np.max(labels_split) == 1)\n \n all_local_indices = random_state.choice(dataset_size, dataset_size, replace=False)\n random_state.shuffle(all_local_indices)\n splitratio = int(dataset_size * 0.85)\n\n train_local_indices = all_local_indices[:splitratio]\n test_local_indices = all_local_indices[splitratio:]\n \n print('Writing files')\n for indices, split in list(zip([train_local_indices, test_local_indices], \n ['_train', '_valid'])):\n \n SPLIT_SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + model_name + split + \".h5\")\n assert(ims[indices].shape[0] == indices.shape[0])\n assert(labs[indices].shape[0] == indices.shape[0])\n assert(inds[indices].shape[0] == indices.shape[0])\n hf = h5py.File(SPLIT_SHAPES3D_PATH, 'w')\n hf.create_dataset('images', data=ims[indices])\n hf.create_dataset('labels', data=labs[indices])\n hf.create_dataset('indices', data=inds[indices])\n hf.close()\n \n dataset_split.close()\n\n \n@gin.configurable(\"split_train_and_validation\", \n blacklist=[\"dataset_name\", \"model_name\"])\ndef create_split_train_and_validation(dataset_name, \n model_name,\n random_seed=gin.REQUIRED, \n unit_labels=False):\n \"\"\" Randomly splits the dataset split into train and validation\n splits.\n \n Args: \n filename: name of the file to split further\n \"\"\"\n del model_name\n \n random_state = np.random.RandomState(random_seed)\n SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + \".h5\")\n dataset_split = h5py.File(SHAPES3D_PATH, 'r')\n print(dataset_split.keys())\n images_split = dataset_split['images'][()]\n labels_split = dataset_split['labels'][()]\n indices_split = dataset_split['indices'][()]\n dataset_size = len(images_split)\n \n ims = np.array(images_split)\n labs = np.array(labels_split)\n inds = np.array(indices_split)\n \n if unit_labels:\n labels_min = np.array([0., 0., 0., 0.75, 0., -30.])\n labels_max = np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])\n labels_split = (labels_split - labels_min)/(labels_max - labels_min)\n assert(np.min(labels_split) == 0 and np.max(labels_split) == 1)\n \n all_local_indices = random_state.choice(dataset_size, dataset_size, replace=False)\n random_state.shuffle(all_local_indices)\n splitratio = int(dataset_size * 0.85)\n\n train_local_indices = all_local_indices[:splitratio]\n test_local_indices = all_local_indices[splitratio:]\n \n dataset_name += '_' + str(random_seed)\n print('Writing files')\n for indices, split in list(zip([train_local_indices, test_local_indices], \n ['_train', '_valid'])):\n \n SPLIT_SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + split + \".h5\")\n print(SPLIT_SHAPES3D_PATH)\n assert(ims[indices].shape[0] == indices.shape[0])\n assert(labs[indices].shape[0] == indices.shape[0])\n assert(inds[indices].shape[0] == indices.shape[0])\n hf = h5py.File(SPLIT_SHAPES3D_PATH, 'w')\n hf.create_dataset('images', data=ims[indices])\n hf.create_dataset('labels', data=labs[indices])\n hf.create_dataset('indices', data=inds[indices])\n hf.close()\n \n dataset_split.close()\n \n\n@gin.configurable(\"pca_split_holdout\", \n blacklist=[\"dataset_name\", \"model_name\"])\ndef create_pca_split_holdout(dataset_name, \n model_name,\n random_seed=gin.REQUIRED, \n split_size=gin.REQUIRED,\n unit_labels=False):\n \"\"\" Randomly splits the dataset split into train and validation\n splits.\n \n Args: \n filename: name of the file to split further\n \"\"\"\n del model_name\n \n random_state = np.random.RandomState(random_seed)\n SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + \".h5\")\n dataset_split = h5py.File(SHAPES3D_PATH, 'r')\n print(dataset_split.keys())\n images_split = dataset_split['images'][()]\n labels_split = dataset_split['labels'][()]\n indices_split = dataset_split['indices'][()]\n dataset_size = len(images_split)\n \n ims = np.array(images_split)\n labs = np.array(labels_split)\n inds = np.array(indices_split)\n \n if unit_labels:\n labels_min = np.array([0., 0., 0., 0.75, 0., -30.])\n labels_max = np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])\n labels_split = (labels_split - labels_min)/(labels_max - labels_min)\n assert(np.min(labels_split) == 0 and np.max(labels_split) == 1)\n \n holdout_local_indices = random_state.choice(dataset_size, split_size, replace=False)\n random_state.shuffle(holdout_local_indices)\n \n dataset_name = \"3dshapes_pca_holdout_s{0}\".format(split_size)\n print('Writing files')\n\n SPLIT_SHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\", \n dataset_name + \".h5\")\n print(SPLIT_SHAPES3D_PATH)\n print(holdout_local_indices.shape)\n assert(ims[holdout_local_indices].shape[0] == holdout_local_indices.shape[0])\n assert(labs[holdout_local_indices].shape[0] == holdout_local_indices.shape[0])\n assert(inds[holdout_local_indices].shape[0] == holdout_local_indices.shape[0])\n hf = h5py.File(SPLIT_SHAPES3D_PATH, 'w')\n hf.create_dataset('images', data=ims[holdout_local_indices])\n hf.create_dataset('labels', data=labs[holdout_local_indices])\n hf.create_dataset('indices', data=inds[holdout_local_indices])\n hf.close()\n \n dataset_split.close()"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.random.RandomState",
"numpy.min"
]
] |
jakegrigsby/cc-afbc
|
[
"3fc587aff7d33fde89da9b4bfa39e2c5aa3866fc"
] |
[
"estimator.py"
] |
[
"from torch import nn\nimport torch.nn.functional as F\nimport torch\nimport random\n\n\nclass AdvantageEstimator(nn.Module):\n def __init__(self, actor, critics, popart=False, method=\"mean\", ensembling=\"min\"):\n super().__init__()\n assert method in [\"mean\", \"max\", \"expectation\"]\n assert ensembling in [\"min\", \"mean\"]\n self.actor = actor\n self.critics = critics\n self.method = method\n self.ensembling = ensembling\n self.val_s = None\n self.popart = popart\n\n def pop(self, q, s, a):\n if self.popart:\n return self.popart(q(s, a))\n else:\n return q(s, a)\n\n def get_hparams(self):\n return {\"adv_method\": self.method, \"adv_ensembling_method\": self.method}\n\n def estimate_value(self, state, n=10):\n # get an action distribution from the policy\n act_dist = self.actor(state)\n actions = [act_dist.sample() for _ in range(n)]\n\n # get the q value for each of the n actions\n qs = []\n for act in actions:\n q_preds = torch.stack(\n [self.pop(critic, state, act) for critic in self.critics], dim=0\n )\n if self.ensembling == \"min\":\n q_preds = q_preds.min(0).values\n elif self.ensembling == \"mean\":\n q_preds = q_preds.mean(0)\n qs.append(q_preds)\n\n if self.method == \"mean\":\n # V(s) = E_{a ~ \\pi(s)} [Q(s, a)]\n value = torch.stack(qs, dim=0).mean(0)\n elif self.method == \"max\":\n # Optimisitc value estimate: V(s) = max_{a1, a2, a3, ..., aN}(Q(s, a))\n value = torch.stack(qs, dim=0).max(0).values\n elif self.method == \"expectation\":\n probs = F.softmax(\n torch.stack(\n [\n act_dist.log_prob(act).sum(-1, keepdim=True).exp()\n for act in actions\n ],\n dim=0,\n ),\n dim=0,\n )\n value = (probs * torch.stack(qs, dim=0)).sum(0)\n\n self.val_s = value\n return value\n\n def stochastic_qval(self, s, a, n=10):\n with torch.no_grad():\n qs = [random.choice(self.critics)(s, a) for _ in range(n)]\n return torch.stack(qs, dim=1)\n\n def stochastic_val(self, s, a, n=10):\n with torch.no_grad():\n dist = self.actor(s)\n vs = [random.choice(self.critics)(s, dist.sample()) for _ in range(n)]\n return torch.stack(vs, dim=1)\n\n def forward(self, state, action, use_computed_val=False, n=10):\n with torch.no_grad():\n q_preds = torch.stack(\n [self.pop(critic, state, action) for critic in self.critics], dim=0\n )\n if self.ensembling == \"min\":\n q_preds = q_preds.min(0).values\n elif self.ensembling == \"mean\":\n q_preds = q_preds.mean(0)\n # reuse the expensive value computation if it has already been done\n if use_computed_val:\n assert self.val_s is not None\n else:\n # do the value computation\n self.estimate_value(state, n=n)\n # A(s, a) = Q(s, a) - V(s)\n adv = q_preds - self.val_s\n return adv\n"
] |
[
[
"torch.stack",
"torch.no_grad"
]
] |
abhinav042/COMP3297
|
[
"3c37a4330d94a45cdabaa26bc17147ae73118cfb"
] |
[
"tutor_app/.~c9_invoke_ZPZ8Qk.py"
] |
[
"from django.db import models\nfrom django.contrib.auth.models import User\nfrom course_app.models import Course\nimport numpy as np\nfrom datetime import datetime\n\n# Create your models here.\nclass Timeslot(models.Model):\n \n date_of_slot = models.DateTimeField(null=True);\n #time_of_slot = models.TimeField(null=True);\n\nclass Tutor(models.Model):\n\n user = models.OneToOneField(User,null=True)\n first_name = models.CharField(max_length=30,null=True);\n last_name = models.CharField(max_length=30,null=True);\n wallet = models.FloatField(null=True)\n profile_pic = models.ImageField(upload_to='profile_pics',blank=True)\n bio = models.TextField(null=True)\n contracted = models.BooleanField(default=False)\n salary = models.FloatField(default=0)\n courses = models.ManyToManyField(Course)\n subject_tag = models.CharField(max_length=30,null=True)\n \n #CPPS = \n #CMVS = Computer Vision\n #MATH = Mathematic, Statistic and Algebra\n #AIML = AI and Machine Learning\n #WWNT = World-wide WEB and Networking\n #APPS = Application Development and Java\n #ARVR = Augmented and Virtual Reality\n #SWEG = Software Engineering\n #STDS = System and Design\n\n university = models.CharField(max_length=30,null=True)\n active = models.BooleanField(default=True)\n #average_rating = models.FloatField(default=2)\n #blocked_timeslots = models.ManyToManyField(Timeslot, null=True)\n def __str__(self):\n return self.user.username\n \n def average_rating(self):\n all_ratings = map(lambda x: x.rating, self.review_set.all())\n return np.mean(all_ratings)\n \nclass Review(models.Model):\n \n RATING_CHOICES = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (6, '6'),\n )\n \n tutor = models.ForeignKey(Tutor)\n user_name = models.CharField(max_length = 100)\n comment = models.CharField(max_length = 200)\n pub_date = models.DateTimeField(default=datetime.now(), blank=True)\n rating = models.IntegerField(choices = RATING_CHOICES)\n \nclass Transaction_(models.Model):\n tutor = models.ForeignKey(Tutor, on_delete=models.CASCADE)\n amount_added = models.FloatField(null = True)\n "
] |
[
[
"numpy.mean"
]
] |
rucka/NeuralNetworkPlayground
|
[
"b1c9398ee3b0831de4982fdfef34892faa04440d"
] |
[
"notebook/slim_people_classification/eval_image_classifier.py"
] |
[
"\"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport tensorflow as tf\n\nfrom datasets import dataset_factory\nfrom nets import nets_factory\nfrom preprocessing import preprocessing_factory\n\nslim = tf.contrib.slim\n\ntf.app.flags.DEFINE_integer(\n 'batch_size', 100, 'The number of samples in each batch.')\n\ntf.app.flags.DEFINE_integer(\n 'max_num_batches', None,\n 'Max number of batches to evaluate by default use all.')\n\ntf.app.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', '/tmp/tfmodel/',\n 'The directory where the model was written to or an absolute path to a '\n 'checkpoint file.')\n\ntf.app.flags.DEFINE_string(\n 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')\n\ntf.app.flags.DEFINE_integer(\n 'num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_name', 'gender', 'The name of the dataset to load.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_split_name', 'test', 'The name of the train/test split.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir', None, 'The directory where the dataset files are stored.')\n\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\n\ntf.app.flags.DEFINE_string(\n 'model_name', 'inception_v3', 'The name of the architecture to evaluate.')\n\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None, 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.')\n\ntf.app.flags.DEFINE_float(\n 'moving_average_decay', None,\n 'The decay to use for the moving average.'\n 'If left as None, then moving averages are not used.')\n\ntf.app.flags.DEFINE_integer(\n 'eval_image_size', None, 'Eval image size')\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n tf.logging.set_verbosity(tf.logging.INFO)\n with tf.Graph().as_default():\n tf_global_step = slim.get_or_create_global_step()\n\n ######################\n # Select the dataset #\n ######################\n dataset = dataset_factory.get_dataset(\n FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)\n\n ####################\n # Select the model #\n ####################\n network_fn = nets_factory.get_network_fn(\n FLAGS.model_name,\n num_classes=(dataset.num_classes - FLAGS.labels_offset),\n is_training=False)\n\n ##############################################################\n # Create a dataset provider that loads data from the dataset #\n ##############################################################\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n shuffle=False,\n common_queue_capacity=2 * FLAGS.batch_size,\n common_queue_min=FLAGS.batch_size)\n [image, label] = provider.get(['image', 'label'])\n label -= FLAGS.labels_offset\n\n #####################################\n # Select the preprocessing function #\n #####################################\n preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(\n preprocessing_name,\n is_training=False)\n\n eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size\n\n image = image_preprocessing_fn(image, eval_image_size, eval_image_size)\n\n images, labels = tf.train.batch(\n [image, label],\n batch_size=FLAGS.batch_size,\n num_threads=FLAGS.num_preprocessing_threads,\n capacity=5 * FLAGS.batch_size)\n\n ####################\n # Define the model #\n ####################\n logits, _ = network_fn(images)\n\n if FLAGS.moving_average_decay:\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, tf_global_step)\n variables_to_restore = variable_averages.variables_to_restore(\n slim.get_model_variables())\n variables_to_restore[tf_global_step.op.name] = tf_global_step\n else:\n variables_to_restore = slim.get_variables_to_restore()\n\n predictions = tf.argmax(logits, 1)\n labels = tf.squeeze(labels)\n\n # Define the metrics:\n names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({\n 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),\n 'Recall_5': slim.metrics.streaming_sparse_recall_at_k(\n logits, labels, 5),\n })\n\n # Print the summaries to screen.\n for name, value in names_to_values.items():\n summary_name = 'eval/%s' % name\n op = tf.summary.scalar(summary_name, value, collections=[])\n op = tf.Print(op, [value], summary_name)\n tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\n # TODO(sguada) use num_epochs=1\n if FLAGS.max_num_batches:\n num_batches = FLAGS.max_num_batches\n else:\n # This ensures that we make a single pass over all of the data.\n num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Evaluating %s' % checkpoint_path)\n\n slim.evaluation.evaluate_once(\n master=FLAGS.master,\n checkpoint_path=checkpoint_path,\n logdir=FLAGS.eval_dir,\n num_evals=num_batches,\n eval_op=list(names_to_updates.values()),\n variables_to_restore=variables_to_restore)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.Print",
"tensorflow.train.latest_checkpoint",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.squeeze",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.gfile.IsDirectory",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.logging.info",
"tensorflow.add_to_collection",
"tensorflow.train.batch",
"tensorflow.argmax",
"tensorflow.summary.scalar",
"tensorflow.app.run"
]
] |
Ali-Sahili/Background-Subtraction-Unsupervised-Learning
|
[
"445b2cf8736a4a28cff2b074a32afe8fe6986d53",
"445b2cf8736a4a28cff2b074a32afe8fe6986d53",
"445b2cf8736a4a28cff2b074a32afe8fe6986d53"
] |
[
"AE/train_complex.py",
"WDCGAN/models.py",
"VAE/train.py"
] |
[
"import torch\nfrom torch import nn\nimport torchvision.utils as vutils\nimport pytorch_ssim\n\nimport numpy as np\n\nfrom AE.Complex_Attention import *\n\nfrom Param import *\nfrom utils import weights_init\n\nfrom Losses import *\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\n\n\ndef train(dataloader, print_epoch=batch_size, verbose=False):\n\n assert image_size == 256 or image_size == 128\n\n nc = 3\n h = image_size \n w = image_size\n dim_encoding = 6\n nheads = 8\n nce = 8\n nzf = 50 \n nze = 2 \n ncg = 8\n ncl = 8\n mask_head_flag = False\n masked_head = None\n model = Complex_Attention_Autoencoder(nze,nzf,nc, nce,nheads, h, w, dim_encoding,ncg,ncl, \n mask_head_flag,masked_head).to(device)\n\n\n\n \n if initialize_weights:\n model.apply(weights_init)\n\n criterion = nn.MSELoss() \n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n #optimizer = torch.optim.AdamW(model.parameters(), lr=lr)\n\n print(\"Starting Training Loop...\")\n\n\n AE_losses = []\n img_list = []\n att_enc_list = []\n att_dec_list = []\n max_norm = 0\n # For each epoch\n for epoch in range(num_epochs):\n torch.cuda.empty_cache()\n \n # For each batch in the dataloader\n for i, data in enumerate(dataloader, 0):\n \n if verbose: print(data[0].shape)\n if verbose: print(data[1].shape)\n\n img = data[0].to(device)\n \n encod_out, attention_maps_encoder = model(img)\n output, attention_maps_decoder = model(encod_out)\n\n loss = criterion(output, img)\n\n\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss.detach_()\n\n if max_norm > 0:\n torch.nn.utils.clip_grad_norm_(Encoder.parameters(), max_norm)\n torch.nn.utils.clip_grad_norm_(Decoder.parameters(), max_norm)\n\n # Output training stats\n if i % print_epoch == 0:\n print('[%d/%d][%d/%d]\\tLoss_AE: %.4f'\n % (epoch+1, num_epochs, i, len(dataloader), loss.item()))\n\n # Save Losses for plotting later\n AE_losses.append(loss.item())\n \n # Check how the generator is doing by saving G's output on fixed_noise\n if (i % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):\n with torch.no_grad():\n enc_out, att_enc = model(data[0].to(device))\n img_out, att_dec = model(enc_out)\n img_list.append(vutils.make_grid(img_out.detach().cpu()[0:10,:3], nrow=5, normalize=True))\n att_enc_list.append(vutils.make_grid(att_enc.detach().cpu()[0:10,:3], nrow=5, normalize=True))\n att_dec_list.append(vutils.make_grid(att_dec.detach().cpu()[0:10,:3], nrow=5, normalize=True))\n \n\n return AE_losses, img_list, att_enc_list, att_dec_list, model\n\n\n\n\n",
"\"\"\" models of Generators, Endoders and Discriminators at various image sizes\nfollowing deep convolutionnal model of DCGAN\ncf https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html\nand https://github.com/pytorch/examples/tree/master/dcgan\n\nthe final non linearity of the generator should be tanh ( positive and negative values, centered at zero) for GAN, but sigmoid for VAE,\nwhere image pixel values are coded as probabilities between 0 and 1\n\"\"\"\n\n\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\n\nfrom Param import nz, nc\n\nclass Generator64(nn.Module):\n\n def __init__(self,nz=nz,ngf=64,nc=nc):\n super(Generator64, self).__init__()\n self.nz=nz\n self.nc=nc\n self.main = nn.Sequential(\n # input is z, going into a convolution\n # input shape bachsize x nz\n nn.Conv2d(nz, ngf * 16, 1, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n #nn.Sigmoid() for VAE\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n output = self.main(input.reshape(-1, self.nz, 1, 1))\n return output\n\n\n\nclass Generator128(nn.Module):\n def __init__(self,nz=nz,ngf=32,nc=nc):\n super(Generator128, self).__init__()\n self.nz = nz\n self.nc = nc\n self.main = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf *32 , 2, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n # size ngf*32 x2 x2\n nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # size ngf*16 x4 x4\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 8 x 8\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 16 x16\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 32 x 32\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 64 x 64\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # nn.Sigmoid() for VAE\n # state size. (nc) x 128 x 128\n )\n\n def forward(self, input):\n output = self.main(input.reshape(-1, self.nz, 1, 1))\n return output\n\n\nclass Generator256(nn.Module):\n def __init__(self,nz=nz,ngf=16,nc=nc):\n super(Generator256, self).__init__()\n self.nz=nz\n self.nc=nc\n self.main = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf *64 , 2, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 64),\n nn.ReLU(True),\n # size ngf*64 x2 x2\n nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n # size ngf*32 x4 x4\n nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*8) x 8 x 8\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*4) x 16 x16\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*2) x 32 x 32\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf) x 64 x 64\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (nc) x 128 x 128\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # nn.Sigmoid() for VAE\n # state size. (nc) x 256 x 256\n\n )\n\n def forward(self, input):\n output = self.main(input.reshape(-1, self.nz, 1, 1))\n return output\n\n\n\nclass Generator512(nn.Module):\n def __init__(self,nz=nz,ngf=8,nc=nc):\n super(Generator512, self).__init__()\n self.nz=nz\n self.nc=nc\n self.main = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 128),\n nn.ReLU(True),\n # size ngf*128 x2 x2\n nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 64),\n nn.ReLU(True),\n # size ngf*64 x4 x4\n nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n # size ngf*32 x8 x8\n nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*16) x 16 x16\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 32 x 32\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 64 x 64\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 128 x 128\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 256 x 256\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n #nn.Sigmoid() for VAE\n # state size. (nc) x 256 x 256\n\n )\n\n def forward(self, input):\n output = self.main(input.reshape(-1, self.nz, 1, 1))\n return output\n\n\n\n\n\nclass Discriminator64(nn.Module):\n def __init__(self,nef=64,nc=nc):\n super(Discriminator64, self).__init__()\n self.nef=nef\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, nef, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 32 x 32\n nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*2) x 16 x 16\n nn.Conv2d(nef * 2, nef * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*4) x 8 x 8\n nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*8) x 4 x 4\n nn.Conv2d(nef * 8, nef * 16, 4, 1, 0, bias=False),\n nn.Conv2d(nef * 16, 1, 1, 1, 0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.reshape(-1, 1)\n\n\n\nclass Discriminator128(nn.Module):\n def __init__(self,nef=32,nc=nc):\n super(Discriminator128, self).__init__()\n self.nc=nc\n self.main = nn.Sequential(\n # input is (nc) x 128 x 128\n nn.Conv2d(nc, nef, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 64 x 64\n nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 32 x 32\n nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*2) x 16 x 16\n nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*4) x 8 x 8\n nn.Conv2d(nef * 8, nef * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*8) x 4 x 4\n nn.Conv2d(nef * 16, nef * 32, 4, 1, 0, bias=False),\n nn.BatchNorm2d(nef * 32),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(nef * 32, 1, 1, 1, 0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.reshape(-1, 1)\n\n\nclass Discriminator256(nn.Module):\n def __init__(self,nef=16,nc=nc):\n super(Discriminator256, self).__init__()\n self.nc=nc\n self.main = nn.Sequential(\n # input is (nc) x 258 x 256\n nn.Conv2d(nc, nef, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 128 x 128\n nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 64 x 64\n nn.Conv2d(nef * 2, nef * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef) x 32 x 32\n nn.Conv2d(nef*4, nef * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*2) x 16 x 16\n nn.Conv2d(nef * 8, nef * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*4) x 8 x 8\n nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 32),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*8) x 4 x 4\n nn.Conv2d(nef * 32, nef * 64, 4, 1, 0, bias=False),\n nn.BatchNorm2d(nef * 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(nef * 64, 1, 1, 1, 0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.reshape(-1, 1)\n\n\n\n\nclass Discriminator512(nn.Module):\n def __init__(self,nef=8,nc=nc):\n super(Discriminator512, self).__init__()\n self.nc=nc\n self.main = nn.Sequential(\n # input is (nc) x 512 x 512\n nn.Conv2d(nc, nef, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef),\n nn.LeakyReLU(0.2, inplace=True),\n # state size is (nef) x 256 x 256\n nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*2) x 128 x 128\n nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*4) x 64 x 64\n nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*8) x 32 x 32\n nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*2) x 16 x 16\n nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 32),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*32) x 8 x 8\n nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 64),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*64) x 4 x 4\n nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),\n nn.BatchNorm2d(nef * 128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(nef * 128, 1, 1, 1, 0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.reshape(-1, 1)\n\n\n# Discriminator based on U-net architecture\nclass netD512(nn.Module):\n\n def __init__(self, nef=16, nc=nc):\n super(netD512, self).__init__()\n self.nc = nc\n\n # 3*512*512\n self.down1 = nn.Sequential(\n nn.Conv2d(self.nc, nef, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef, nef, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef),\n nn.ReLU(inplace=True))\n self.down1_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 16*256*256\n self.down2 = nn.Sequential(\n nn.Conv2d(nef, nef*2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*2),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*2, nef*2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*2),\n nn.ReLU(inplace=True),)\n self.down2_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 32*128*128\n self.down3 = nn.Sequential(\n nn.Conv2d(nef*2, nef*4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*4),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*4, nef*4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*4),\n nn.ReLU(inplace=True),)\n self.down3_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 64*64*64\n self.down4 = nn.Sequential(\n nn.Conv2d(nef*4, nef*8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*8),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*8, nef*8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*8),\n nn.ReLU(inplace=True))\n self.down4_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 128*32*32\n self.down5 = nn.Sequential(\n nn.Conv2d(nef*8, nef*16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*16),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*16, nef*16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*16),\n nn.ReLU(inplace=True))\n self.down5_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 256*16*16\n self.down6 = nn.Sequential(\n nn.Conv2d(nef*16, nef*32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*32),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*32, nef*32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*32),\n nn.ReLU(inplace=True))\n self.down6_pool = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 512*8*8\n self.center = nn.Sequential(\n nn.Conv2d(nef*32, nef*64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*64),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*64, nef*64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*64),\n nn.ReLU(inplace=True),)\n\n # 1024*8*8\n self.upsample6 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up6 = nn.Sequential(\n nn.Conv2d(nef*64+nef*32, nef*32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*32),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*32, nef*32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*32),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*32, nef*32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*32),\n nn.ReLU(inplace=True),)\n\n # 512*16*16\n self.upsample5 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up5 = nn.Sequential(\n nn.Conv2d(nef*32+nef*16, nef*16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*16),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*16, nef*16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*16),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*16, nef*16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*16),\n nn.ReLU(inplace=True),)\n\n # 256*32*32\n self.upsample4 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up4 = nn.Sequential(\n nn.Conv2d(nef*16+nef*8, nef*8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*8),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*8, nef*8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*8),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*8, nef*8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*8),\n nn.ReLU(inplace=True),)\n\n # 128*64*64\n self.upsample3 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up3 = nn.Sequential(\n nn.Conv2d(nef*8+nef*4, nef*4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*4),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*4, nef*4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*4),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*4, nef*4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*4),\n nn.ReLU(inplace=True),\n )\n\n # 64*128*128\n self.upsample2 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up2 = nn.Sequential(\n nn.Conv2d(nef*4+nef*2, nef*2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*2),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*2, nef*2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*2),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef*2, nef*2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef*2),\n nn.ReLU(inplace=True),\n )\n\n # 32*256*256\n self.upsample1 = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear'))\n self.up1 = nn.Sequential(\n nn.Conv2d(nef*2+nef, nef, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef, nef, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef),\n nn.ReLU(inplace=True),\n nn.Conv2d(nef, nef, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(nef),\n nn.ReLU(inplace=True),\n )\n # 16*512*512\n self.classifier = nn.Sequential(\n nn.Conv2d(nef, self.nc, kernel_size=3, stride=1, padding=1),\n nn.Sigmoid(),\n )\n # 3*512*512\n\n # state size. (nef*32) x 8 x 8\n self.out = nn.Sequential(\n nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),\n nn.BatchNorm2d(nef * 64),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (nef*64) x 4 x 4\n nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),\n nn.BatchNorm2d(nef * 128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(nef * 128, 1, 1, 1, 0, bias=True),\n nn.Sigmoid())\n\n\n def forward(self, img):\n # 3*512*512\n down1 = self.down1(img)\n down1_pool = self.down1_pool(down1)\n\n # 16*256*256\n down2 = self.down2(down1_pool)\n down2_pool = self.down2_pool(down2)\n\n # 32*128*128\n down3 = self.down3(down2_pool)\n down3_pool = self.down3_pool(down3)\n\n # 64*64*64\n down4 = self.down4(down3_pool)\n down4_pool = self.down4_pool(down4)\n\n # 128*32*32\n down5 = self.down5(down4_pool)\n down5_pool = self.down5_pool(down5)\n\n # 256*16*16\n down6 = self.down6(down5_pool)\n down6_pool = self.down6_pool(down6)\n\n # 512*8*8\n center = self.center(down6_pool)\n # 1024*8*8\n\n up6 = self.upsample6(center)\n # 1024*16*16\n\n up6 = torch.cat((down6,up6), 1)\n up6 = self.up6(up6)\n\n # 512*16*16\n up5 = self.upsample5(up6)\n up5 = torch.cat((down5,up5), 1)\n up5 = self.up5(up5)\n\n # 256*32*32\n up4 = self.upsample4(up5)\n up4 = torch.cat((down4,up4), 1)\n up4 = self.up4(up4)\n\n # 128*64*64\n up3 = self.upsample3(up4)\n up3 = torch.cat((down3,up3), 1)\n up3 = self.up3(up3)\n\n # 64*128*128\n up2 = self.upsample2(up3)\n up2 = torch.cat((down2,up2), 1)\n up2 = self.up2(up2)\n\n # 32*256*256\n up1 = self.upsample1(up2)\n up1 = torch.cat((down1,up1), 1)\n up1 = self.up1(up1)\n\n # 16*512*512\n prob = self.classifier(up1)\n # 3*512*512\n\n out = self.out(down6_pool)\n #return prob\n return out\n\n",
"import torch\nfrom torch.autograd import Variable\nimport torchvision.utils as vutils\n\nfrom VAE.model import VAE, Encoder, Decoder, VAE256\n\nimport os\npath = os.getcwd()\nos.chdir(\"../\")\nfrom Param import *\nos.chdir(path)\n\n\nfrom VAE.Loss_functions import *\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ndef trainVAE(dataloader, print_epoch = 32, verbose = False):\n\n assert image_size == 512 or image_size == 256\n #\n\n torch.cuda.empty_cache()\n torch.manual_seed(10)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n if image_size == 256:\n model = VAE256().to(device)\n elif image_size == 512:\n model = VAE().to(device)\n #Encoder_model = Encoder().to(device)\n #Decoder_model = Decoder().to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n #optimizer = torch.optim.Adam(list(Encoder_model.parameters())+list(Decoder_model.parameters()), lr=lr) \n\n img_list = []\n losses = []\n \n mu_list = []\n logvar_list = []\n\n print(\"Starting Training Loop...\")\n #Encoder_model.train()\n #Decoder_model.train()\n for epoch in range(num_epochs):\n torch.cuda.empty_cache()\n #Encoder_model.train()\n #Decoder_model.train()\n model.train()\n\n for i, data in enumerate(dataloader, 0):\n img = Variable(data[0]).to(device)\n\n output, mu, logvar = model(img)\n\n #mu, logvar = Encoder_model(img)\n \n \"\"\"\n if verbose: \n mu_mean = torch.mean(mu)\n logvar_mean = torch.mean(logvar)\n\n print('means: ',mu_mean.item(), logvar_mean.item())\n\n std = logvar.mul(0.5).exp_()\n condition1 = (mu > (mu + 0.1*std))\n condition2 = (mu < (mu - 0.1*std)) \n\n outliers = condition1 * condition2\n\n mu = mu[outliers == False] # = 0 # mu_mean\n logvar = logvar[outliers == False] #= 0 # logvar_mean\n\n if verbose:\n nb1 = mu.numel() - (mu == 0).sum()\n nb2 = logvar.numel() - (logvar == 0).sum()\n print('number of zeros: ', nb1.item(), nb2.item())\n \"\"\"\n\n #output = Decoder_model(mu, logvar)\n \n\n loss = loss_function2(output, img, mu, logvar)\n losses.append(loss)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss.detach_()\n\n mu_list.append(mu)\n logvar_list.append(logvar)\n\n if i % print_epoch == 0:\n print('[%d/%d][%d/%d]\\tLoss: %.4f'\n % (epoch+1, num_epochs, i, len(dataloader), loss.item()))\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (epoch == num_epochs-1) and (i == len(dataloader)-1):\n img_list.append(vutils.make_grid(output.detach().cpu()[:8], padding=2, normalize=True))\n\n\n return img_list, losses, model, mu_list, logvar_list\n"
] |
[
[
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.nn.MSELoss"
],
[
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.autograd.Variable"
]
] |
jiagengliu/small-caps
|
[
"2c112da11f6dded5e56568a60cc44ba96a3c9a38"
] |
[
"K-line pattern/backtest.py"
] |
[
"\"\"\"\nBacktest on K-line patterns\n\"\"\"\n# coding: utf-8\n\n# # Initialization\n\nimport talib as tl\nimport tushare as ts\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nimport os\ncodes = pd.read_csv('index/code_50')\nexample = str(codes['code'].get_values()[5])\n\ndef price(FuncName,code,interval = '15',plot=False):\n #print('The example stock is %s and the code is %d with market size as %d' %(code['name'][0],code['code'][0],code['mktcap'][0]) )\n data = ts.get_k_data(code = example, start='2018-05-25', end = '2018-05-29',ktype= interval, autype='qfq',retry_count=5)\n #plt.Line2D(range(len(data['close'])),data['close'])\n #่ก็ฅจ้็จๆฏๆฅๆถ็ไปท็ปๅพ\n price = data['close']\n \n if plot:\n #็ปๅถ่กไปท่ตฐๅฟ\n plt.figure(dpi=64,figsize=(12,8))\n plt.plot(price,'k-',\n markerfacecolor='blue',markersize=12) \n plt.show()\n close = data['close']\n high = data['high']\n low = data['low']\n open = data['open']\n test = np.array(getattr(tl,FuncName)(open,high,low,close))\n \n return (close,high,low,open,test,data)\n\n\n# # Strategy Implementation\n\n# ้่ฟ็ญ็ฅ๏ผๅพๅฐ๏ผ100ๅ่ฎคไธบๅบ่ฏฅไนฐๅ
ฅ๏ผๅพๅฐ100ๅ่ฎคไธบๅๅบ๏ผๅจๅผไธบ0็ๆถๅ๏ผไฟๆๆไป้ไธๅ\n\n\ndef mock(Funcname,interval,start,end,code,close,high,low,open,test):\n #ๅๅง้ถๆฎต๏ผๅๅฎ็ฐ้ไธบ1000ไบบๆฐๅธ๏ผไธๆไป้ไธบ0\n cash = [1000]\n hold_position = [0]\n for i in range(1,len(test)):\n BuyorSell = test[i]\n #ๅฆๆๆๆฏๅๆๆพ็คบไนฐๅ
ฅไธๆไป้ไธบ0๏ผๅๅ
จไปไนฐๅ
ฅ\n if BuyorSell == 100 and hold_position[i-1]==0:\n #่ฎก็ฎๅฏไปฅไนฐๅ
ฅๅคๅฐ่ก็ฅจ\n num_of_shares = int(cash[i-1]/close[i-1])\n hold_position.append(num_of_shares)\n print('%s Buying %s shares of stock' %(data['date'][i],num_of_shares))\n cash.append(cash[i-1]-num_of_shares*close[i-1])\n #ๅฆๆๅๅบไธๆไป้ๅคงไบ0๏ผๅ็ฉบไปๅๅบ\n elif BuyorSell == -100 and hold_position[i-1]>0:\n print('%s Selling %s shares of stock' %(data['date'][i],hold_position[i-1]))\n cash_0 = hold_position[i-1]*close[i-1]\n cash.append(cash_0+cash[i-1])\n hold_position.append(0)\n else:\n cash.append(cash[i-1])\n hold_position.append(hold_position[i-1])\n #็ปๅถ่ตไบง็ไบๅพ๏ผไบคๆ็ญ็ฅๅพ๏ผๆถ็็ๆฏ่พๅพ\n stock = np.multiply(hold_position,close)\n asset = cash + stock\n #่ตไบง็ไบๅพ\n #plt.figure(dpi=64,figsize=(12,8))\n fig = plt.figure(figsize=(12,9))\n fig.suptitle('Stock Code: %s Type: %s \\nFunction:%s\\nPeriod: %s-%s' %(code,interval,Funcname,start,end),fontsize=15,x=0.53,y=0.96)\n gs = gridspec.GridSpec(3, 1, height_ratios=[3,1,3]) \n #plt.subplot(311)\n \n ax0 = plt.subplot(gs[0])\n ax0.plot(asset,'b-',markerfacecolor='blue',markersize=12)\n #plt.show()\n #ไบคๆ็ญ็ฅๅพ\n #plt.figure(dpi=64,figsize=(12,2))\n #plt.subplot(312)\n ax1 = plt.subplot(gs[1])\n ax1.plot(test/100,'b-',markerfacecolor='blue',markersize=12)\n #plt.show()\n #ๆถ็็ๆฏ่พๅพ()\n ret_0 = tl.ROC(close,timeperiod=5)\n ret = tl.ROC(asset,timeperiod=5)\n #plt.figure(dpi=64,figsize=(12,6))\n #plt.subplot(313)\n \n ax2 = plt.subplot(gs[2])\n ax2.plot(ret_0,'b--',markerfacecolor='blue',markersize=12,label='market return')\n ax2.plot(ret,'k-',markerfacecolor='blue',markersize=12,label='strategy return')\n ax2.legend(fontsize='large',loc='best')\n #plt.show()\n #ๅๆๅฝขๆ็ญ็ฅ็ไบๆนๅทฎ\n \n return fig\n\n\n# # ๆน้ๆต่ฏ\n\n# ้ๆบ้ๅๅคๆฏๅฐๅธๅผ่ก็ฅจ๏ผๅฏนๅคไธชๅฝขๆ่ฟ่กๅ็ฌๅๅซๅๆ\n\n\nstocks = np.random.choice(codes['code'],size=10,replace=True)\nvalid_names = []\nintervals = ['5','15','60']\nfor stock in stocks:\n for interval in intervals:\n print('interval is %s' %interval)\n for FuncName in FuncList:\n try:\n if not os.path.exists('single'):\n os.mkdir('single')\n (close,high,low,open,test,data) = price(FuncName,stock,interval=interval)\n if np.sum(test==100)>5:\n start = np.array(data['date'])[0]\n end = np.array(data['date'])[-1]\n fig=mock(FuncName,interval,start,end,stock,close,high,low,open,test)\n fig.savefig('single/%s_%s_%s.jpg' %(FuncName,interval,stock))\n if FuncName not in valid_names:\n valid_names.append(FuncName)\n except:\n raise ValueError('ktype is wrong')\n\n\n# ็ปๅๅคไธชๅฝขๆๅๆๅฝขๆไบคๆ็ญ็ฅ๏ผ็ฒ็ฅๅฐ่ฟ่ก็ฎๅ็ปๅ๏ผ๏ผ็ปๅๆนๆณๅฎไธบ้ๆบ้ๅไธไธชๆๆๅฝขๆ่ฟ่ก็ญ็ฅๅ ๅ ๏ผๅบ็จๅฐๆฐๆไปๅคๆฐๅๅ๏ผๅฆๆๅๆถๅบ็ฐๅ็ฉบ๏ผไนฐๅ
ฅๅไฟๆๆ
ๅต๏ผๅฎไธบ้็จไฟๆ็ญ็ฅใ\n\nfor i in range(15):\n choices = np.random.choice(valid_names,size=3,replace=True)\n if not os.path.exists('compound'):\n os.mkdir('compound')\n for stock in stocks:\n for interval in ['5']:\n try:\n (close,high,low,open,test0,data) = price(choices[0],stock,interval=interval)\n (close,high,low,open,test1,data) = price(choices[1],stock,interval=interval)\n (close,high,low,open,test2,data) = price(choices[2],stock,interval=interval)\n start = np.array(data['date'])[0]\n end = np.array(data['date'])[-1]\n test = test0+test1+test2\n for i in range(len(test)):\n if test[i] == 300:\n test[i] = 100\n elif test[i] == -300:\n test[i] = -100\n FuncName = '%s+%s+%s' %(choices[0],choices[1],choices[2])\n fig = mock(FuncName,interval,start,end,stock,close,high,low,open,test)\n fig.savefig('compound/%s_%s_%s.jpg' %(FuncName,interval,stock))\n except:\n raise ValueError('something goes wrong')\n\n\nFuncList =[\n 'CDL2CROWS'\n , 'CDL3BLACKCROWS'\n , 'CDL3INSIDE'\n , 'CDL3LINESTRIKE'\n , 'CDL3OUTSIDE'\n , 'CDL3STARSINSOUTH'\n , 'CDL3WHITESOLDIERS'\n , 'CDLABANDONEDBABY'\n , 'CDLADVANCEBLOCK'\n , 'CDLBELTHOLD'\n , 'CDLBREAKAWAY'\n , 'CDLCLOSINGMARUBOZU'\n , 'CDLCONCEALBABYSWALL'\n , 'CDLCOUNTERATTACK'\n , 'CDLDARKCLOUDCOVER'\n , 'CDLDOJI'\n , 'CDLDOJISTAR'\n , 'CDLDRAGONFLYDOJI'\n , 'CDLENGULFING'\n , 'CDLEVENINGDOJISTAR'\n , 'CDLEVENINGSTAR'\n , 'CDLGAPSIDESIDEWHITE'\n , 'CDLGRAVESTONEDOJI'\n , 'CDLHAMMER'\n , 'CDLHANGINGMAN'\n , 'CDLHARAMI'\n , 'CDLHARAMICROSS'\n , 'CDLHIGHWAVE'\n , 'CDLHIKKAKE'\n , 'CDLHIKKAKEMOD'\n , 'CDLHOMINGPIGEON'\n , 'CDLIDENTICAL3CROWS'\n , 'CDLINNECK'\n , 'CDLINVERTEDHAMMER'\n , 'CDLKICKING'\n , 'CDLKICKINGBYLENGTH'\n , 'CDLLADDERBOTTOM'\n , 'CDLLONGLEGGEDDOJI'\n , 'CDLLONGLINE'\n , 'CDLMARUBOZU'\n , 'CDLMATCHINGLOW'\n , 'CDLMATHOLD'\n , 'CDLMORNINGDOJISTAR'\n , 'CDLMORNINGSTAR'\n , 'CDLONNECK'\n , 'CDLPIERCING'\n , 'CDLRICKSHAWMAN'\n , 'CDLRISEFALL3METHODS'\n , 'CDLSEPARATINGLINES'\n , 'CDLSHOOTINGSTAR'\n , 'CDLSHORTLINE'\n , 'CDLSPINNINGTOP'\n , 'CDLSTALLEDPATTERN'\n , 'CDLSTICKSANDWICH'\n , 'CDLTAKURI'\n , 'CDLTASUKIGAP'\n , 'CDLTHRUSTING'\n , 'CDLTRISTAR'\n , 'CDLUNIQUE3RIVER'\n , 'CDLUPSIDEGAP2CROWS'\n , 'CDLXSIDEGAP3METHODS'\n]\n\n"
] |
[
[
"pandas.read_csv",
"numpy.multiply",
"numpy.random.choice",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
joaomh/Coding-Challenge-Py
|
[
"615eab0309fbefddebfcf6216dcccbafcf986543"
] |
[
"CC_001_PiValueLeibniz.py"
] |
[
"# Coding Challenge #001\n# Pi Approximation with Leibniz Series\n# Joao Pinheiro\n# YouTube Channel - ExataMenteS\n\n# Import some packages\nimport numpy as np \nimport matplotlib.pyplot as plt \n\n# User input ot 'n'\nInteraction = input('Insert the number of interactions: ')\nprint('The number of interactions is ' + Interaction)\n\n# Init the variables\nn = np.linspace(0, int(Interaction), int(Interaction)+1)\npi = np.zeros(len(n))\nerror = np.zeros(len(n))\n\n# Loop\nfor i in range(int(Interaction)+1):\n pi[i] = pi[i-1] + 4*( (-1)**i / (2*i+1) ) # Leibniz Formula Aprox\n error[i] = pi[i] - np.pi # Comparing with the real value of Pi\n plt.plot(n[i],pi[i],'ro',markersize=2) # Plot the pi[i] for each 'n'\n plt.plot(n[i],error[i],'bo',markersize=2) # Plot the error[i] for each 'n'\n plt.pause(0.001)\n\n# Plot and see if the series converg\nplt.plot(n,np.pi*np.ones(len(n))) # Plot a constant line with value of Pi\nplt.plot(n,np.zeros(len(n))) # Plot a constant line with value of 0\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause"
]
] |
vishalbelsare/TFNumPy
|
[
"577838cb6781207d561e389e9cd02aba02c25a1b"
] |
[
"test/test_khatrirao_product.py"
] |
[
"\nimport unittest\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom tfnumpy.tensor import khatrirao_product\n\n\nclass test_KJ(unittest.TestCase):\n def setUp(self):\n x1 = np.array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], dtype=np.float32)\n x2 = np.array([[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]], dtype=np.float32)\n x = np.zeros((3, 4, 2))\n x[:, :, 0] = x1\n x[:, :, 1] = x2\n self.x = x\n\n self.a1 = np.array([[2., 3., 3.], [1., 2., -1.]])\n self.a2 = np.array([[2., 3., 1.], [1., 2., 3.]])\n\n self.answer = np.array([[ 4., 9., 3.],\n [ 2., 6., 9.],\n [ 2., 6., -1.],\n [ 1., 4., -3.]], dtype=np.float32)\n\n def test_nopredefinedsession(self):\n result = khatrirao_product(self.a1, self.a2)\n self.assertTrue(np.allclose(self.answer, result, rtol=1e-5))\n\n def test_predefinedsession(self):\n sess = tf.Session()\n result = khatrirao_product(self.a1, self.a2, tfsess=sess)\n self.assertTrue(np.allclose(self.answer, result, rtol=1e-5))\n"
] |
[
[
"tensorflow.compat.v1.Session",
"numpy.array",
"numpy.zeros",
"numpy.allclose"
]
] |
kyugorithm/pytorch-CycleGAN-Unet-D
|
[
"bca69aa1bd6329a0186188c53d501522d9f24fef"
] |
[
"util/image_pool.py"
] |
[
"import random\nimport torch\n\n\nclass ImagePool():\n \"\"\"This class implements an image buffer that stores previously generated images.\n\n This buffer enables us to update discriminators using a history of generated images\n rather than the ones produced by the latest generators.\n \"\"\"\n\n def __init__(self, pool_size):\n \"\"\"Initialize the ImagePool class\n\n Parameters:\n pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created\n \"\"\"\n self.pool_size = pool_size\n if self.pool_size > 0: # create an empty pool\n self.num_imgs = 0\n self.images = []\n\n def query(self, images):\n \"\"\"Return an image from the pool.\n\n Parameters:\n images: the latest generated images from the generator\n\n Returns images from the buffer.\n\n By 50/100, the buffer will return input images.\n By 50/100, the buffer will return images previously stored in the buffer,\n and insert the current images to the buffer.\n \"\"\"\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n return_images = []\n for image in images:\n \n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n \n return_images = torch.cat(return_images, 0) # collect all the images and return\n return return_images\n"
] |
[
[
"torch.unsqueeze",
"torch.cat"
]
] |
Se-Hun/TIKITAKA
|
[
"e795945af71f9a66b9756bbe4062f6840e3ce70e"
] |
[
"onmt/decoders/transformer.py"
] |
[
"\"\"\"\nImplementation of \"Attention is All You Need\"\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom onmt.decoders.decoder import DecoderBase\nfrom onmt.modules import MultiHeadedAttention, AverageAttention\nfrom onmt.modules.position_ffn import PositionwiseFeedForward\nfrom onmt.utils.misc import sequence_mask\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"Transformer Decoder layer block in Pre-Norm style.\n Pre-Norm style is an improvement w.r.t. Original paper's Post-Norm style,\n providing better converge speed and performance. This is also the actual\n implementation in tensor2tensor and also avalable in fairseq.\n See https://tunz.kr/post/4 and :cite:`DeeperTransformer`.\n\n .. mermaid::\n\n graph LR\n %% \"*SubLayer\" can be self-attn, src-attn or feed forward block\n A(input) --> B[Norm]\n B --> C[\"*SubLayer\"]\n C --> D[Drop]\n D --> E((+))\n A --> E\n E --> F(out)\n\n\n Args:\n d_model (int): the dimension of keys/values/queries in\n :class:`MultiHeadedAttention`, also the input size of\n the first-layer of the :class:`PositionwiseFeedForward`.\n heads (int): the number of heads for MultiHeadedAttention.\n d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`.\n dropout (float): dropout in residual, self-attn(dot) and feed-forward\n attention_dropout (float): dropout in context_attn (and self-attn(avg))\n self_attn_type (string): type of self-attention scaled-dot, average\n max_relative_positions (int):\n Max distance between inputs in relative positions representations\n aan_useffn (bool): Turn on the FFN layer in the AAN decoder\n full_context_alignment (bool):\n whether enable an extra full context decoder forward for alignment\n alignment_heads (int):\n N. of cross attention heads to use for alignment guiding\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,\n self_attn_type=\"scaled-dot\", max_relative_positions=0,\n aan_useffn=False, full_context_alignment=False,\n alignment_heads=0):\n super(TransformerDecoderLayer, self).__init__()\n\n if self_attn_type == \"scaled-dot\":\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=dropout,\n max_relative_positions=max_relative_positions)\n elif self_attn_type == \"average\":\n self.self_attn = AverageAttention(d_model,\n dropout=attention_dropout,\n aan_useffn=aan_useffn)\n\n self.context_attn = MultiHeadedAttention(\n heads, d_model, dropout=attention_dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)\n self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)\n self.drop = nn.Dropout(dropout)\n self.full_context_alignment = full_context_alignment\n self.alignment_heads = alignment_heads\n\n def forward(self, *args, **kwargs):\n \"\"\" Extend `_forward` for (possibly) multiple decoder pass:\n Always a default (future masked) decoder forward pass,\n Possibly a second future aware decoder pass for joint learn\n full context alignement, :cite:`garg2019jointly`.\n\n Args:\n * All arguments of _forward.\n with_align (bool): whether return alignment attention.\n\n Returns:\n (FloatTensor, FloatTensor, FloatTensor or None):\n\n * output ``(batch_size, T, model_dim)``\n * top_attn ``(batch_size, T, src_len)``\n * attn_align ``(batch_size, T, src_len)`` or None\n \"\"\"\n with_align = kwargs.pop('with_align', False)\n output, attns = self._forward(*args, **kwargs)\n top_attn = attns[:, 0, :, :].contiguous()\n attn_align = None\n if with_align:\n if self.full_context_alignment:\n # return _, (B, Q_len, K_len)\n _, attns = self._forward(*args, **kwargs, future=True)\n\n if self.alignment_heads > 0:\n attns = attns[:, :self.alignment_heads, :, :].contiguous()\n # layer average attention across heads, get ``(B, Q, K)``\n # Case 1: no full_context, no align heads -> layer avg baseline\n # Case 2: no full_context, 1 align heads -> guided align\n # Case 3: full_context, 1 align heads -> full cte guided align\n attn_align = attns.mean(dim=1)\n return output, top_attn, attn_align\n\n def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n layer_cache=None, step=None, future=False):\n \"\"\" A naive forward pass for transformer decoder.\n\n # T: could be 1 in the case of stepwise decoding or tgt_len\n\n Args:\n inputs (FloatTensor): ``(batch_size, T, model_dim)``\n memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)``\n src_pad_mask (LongTensor): ``(batch_size, 1, src_len)``\n tgt_pad_mask (LongTensor): ``(batch_size, 1, T)``\n layer_cache (dict or None): cached layer info when stepwise decode\n step (int or None): stepwise decoding counter\n future (bool): If set True, do not apply future_mask.\n\n Returns:\n (FloatTensor, FloatTensor):\n\n * output ``(batch_size, T, model_dim)``\n * attns ``(batch_size, head, T, src_len)``\n\n \"\"\"\n dec_mask = None\n\n if step is None:\n tgt_len = tgt_pad_mask.size(-1)\n if not future: # apply future_mask, result mask in (B, T, T)\n future_mask = torch.ones(\n [tgt_len, tgt_len],\n device=tgt_pad_mask.device,\n dtype=torch.uint8)\n future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len)\n # BoolTensor was introduced in pytorch 1.2\n try:\n future_mask = future_mask.bool()\n except AttributeError:\n pass\n dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)\n else: # only mask padding, result mask in (B, 1, T)\n dec_mask = tgt_pad_mask\n\n input_norm = self.layer_norm_1(inputs)\n\n if isinstance(self.self_attn, MultiHeadedAttention):\n query, _ = self.self_attn(input_norm, input_norm, input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n attn_type=\"self\")\n elif isinstance(self.self_attn, AverageAttention):\n query, _ = self.self_attn(input_norm, mask=dec_mask,\n layer_cache=layer_cache, step=step)\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid, attns = self.context_attn(memory_bank, memory_bank, query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n attn_type=\"context\")\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, attns\n\n def update_dropout(self, dropout, attention_dropout):\n self.self_attn.update_dropout(attention_dropout)\n self.context_attn.update_dropout(attention_dropout)\n self.feed_forward.update_dropout(dropout)\n self.drop.p = dropout\n\n\nclass TransformerDecoder(DecoderBase):\n \"\"\"The Transformer decoder from \"Attention is All You Need\".\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`\n\n .. mermaid::\n\n graph BT\n A[input]\n B[multi-head self-attn]\n BB[multi-head src-attn]\n C[feed forward]\n O[output]\n A --> B\n B --> BB\n BB --> C\n C --> O\n\n\n Args:\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n copy_attn (bool): if using a separate copy attention\n self_attn_type (str): type of self-attention scaled-dot, average\n dropout (float): dropout in residual, self-attn(dot) and feed-forward\n attention_dropout (float): dropout in context_attn (and self-attn(avg))\n embeddings (onmt.modules.Embeddings):\n embeddings to use, should have positional encodings\n max_relative_positions (int):\n Max distance between inputs in relative positions representations\n aan_useffn (bool): Turn on the FFN layer in the AAN decoder\n full_context_alignment (bool):\n whether enable an extra full context decoder forward for alignment\n alignment_layer (int): Nยฐ Layer to supervise with for alignment guiding\n alignment_heads (int):\n N. of cross attention heads to use for alignment guiding\n \"\"\"\n\n def __init__(self, num_layers, d_model, heads, d_ff,\n copy_attn, self_attn_type, dropout, attention_dropout,\n embeddings, max_relative_positions, aan_useffn,\n full_context_alignment, alignment_layer,\n alignment_heads):\n super(TransformerDecoder, self).__init__()\n\n self.embeddings = embeddings\n\n # Decoder State\n self.state = {}\n\n self.transformer_layers = nn.ModuleList(\n [TransformerDecoderLayer(d_model, heads, d_ff, dropout,\n attention_dropout, self_attn_type=self_attn_type,\n max_relative_positions=max_relative_positions,\n aan_useffn=aan_useffn,\n full_context_alignment=full_context_alignment,\n alignment_heads=alignment_heads)\n for i in range(num_layers)])\n\n # previously, there was a GlobalAttention module here for copy\n # attention. But it was never actually used -- the \"copy\" attention\n # just reuses the context attention.\n self._copy = copy_attn\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n self.alignment_layer = alignment_layer\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n opt.dec_layers,\n opt.dec_rnn_size,\n opt.heads,\n opt.transformer_ff,\n opt.copy_attn,\n opt.self_attn_type,\n opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n opt.attention_dropout[0] if type(opt.attention_dropout)\n is list else opt.dropout,\n embeddings,\n opt.max_relative_positions,\n opt.aan_useffn,\n opt.full_context_alignment,\n opt.alignment_layer,\n alignment_heads=opt.alignment_heads)\n\n def init_state(self, src, memory_bank, enc_hidden):\n \"\"\"Initialize decoder state.\"\"\"\n self.state[\"src\"] = src\n self.state[\"cache\"] = None\n\n def map_state(self, fn):\n def _recursive_map(struct, batch_dim=0):\n for k, v in struct.items():\n if v is not None:\n if isinstance(v, dict):\n _recursive_map(v)\n else:\n struct[k] = fn(v, batch_dim)\n\n self.state[\"src\"] = fn(self.state[\"src\"], 1)\n if self.state[\"cache\"] is not None:\n _recursive_map(self.state[\"cache\"])\n\n def detach_state(self):\n self.state[\"src\"] = self.state[\"src\"].detach()\n\n def forward(self, tgt, memory_bank, step=None, **kwargs):\n \"\"\"Decode, possibly stepwise.\"\"\"\n if step == 0:\n self._init_cache(memory_bank)\n\n tgt_words = tgt[:, :, 0].transpose(0, 1)\n\n emb = self.embeddings(tgt, step=step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n output = emb.transpose(0, 1).contiguous()\n src_memory_bank = memory_bank.transpose(0, 1).contiguous()\n\n pad_idx = self.embeddings.word_padding_idx\n src_lens = kwargs[\"memory_lengths\"]\n src_max_len = self.state[\"src\"].shape[0]\n src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)\n tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]\n\n with_align = kwargs.pop('with_align', False)\n attn_aligns = []\n\n for i, layer in enumerate(self.transformer_layers):\n layer_cache = self.state[\"cache\"][\"layer_{}\".format(i)] \\\n if step is not None else None\n output, attn, attn_align = layer(\n output,\n src_memory_bank,\n src_pad_mask,\n tgt_pad_mask,\n layer_cache=layer_cache,\n step=step,\n with_align=with_align)\n if attn_align is not None:\n attn_aligns.append(attn_align)\n\n output = self.layer_norm(output)\n dec_outs = output.transpose(0, 1).contiguous()\n attn = attn.transpose(0, 1).contiguous()\n\n attns = {\"std\": attn}\n if self._copy:\n attns[\"copy\"] = attn\n if with_align:\n attns[\"align\"] = attn_aligns[self.alignment_layer] # `(B, Q, K)`\n # attns[\"align\"] = torch.stack(attn_aligns, 0).mean(0) # All avg\n\n # TODO change the way attns is returned dict => list or tuple (onnx)\n return dec_outs, attns\n\n def _init_cache(self, memory_bank):\n self.state[\"cache\"] = {}\n batch_size = memory_bank.size(1)\n depth = memory_bank.size(-1)\n\n for i, layer in enumerate(self.transformer_layers):\n layer_cache = {\"memory_keys\": None, \"memory_values\": None}\n if isinstance(layer.self_attn, AverageAttention):\n layer_cache[\"prev_g\"] = torch.zeros((batch_size, 1, depth),\n device=memory_bank.device)\n else:\n layer_cache[\"self_keys\"] = None\n layer_cache[\"self_values\"] = None\n self.state[\"cache\"][\"layer_{}\".format(i)] = layer_cache\n\n def update_dropout(self, dropout, attention_dropout):\n self.embeddings.update_dropout(dropout)\n for layer in self.transformer_layers:\n layer.update_dropout(dropout, attention_dropout)\n"
] |
[
[
"torch.nn.Dropout",
"torch.ones",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.gt"
]
] |
IdahoLabResearch/SFWDSubsurface
|
[
"6b926e90963cd24805c3eed12a2fddbb0a259835"
] |
[
"GeoprocessingServiceScriptResources/StressFieldsRoseDiagram.py"
] |
[
"\r\nimport arcpy\r\nfrom arcpy import env\r\nfrom arcpy.sa import *\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\nimport os\r\nimport time\r\nimport sys\r\n\r\narcpy.env.parallelProcessingFactor = \"100%\"\r\n\r\nt0 = time.clock() \r\n\r\n# Set the environment:\r\narcpy.env.overwriteOutput = True\r\n\r\nscriptPath = arcpy.GetParameter(0)\r\nscriptPath = sys.path[0]\r\n\r\n\r\n#Set variables for script:\r\n\r\nStressFieldsUSA_WM = \"StressFieldsUSA_WM\"\r\n\r\nSelectionPolygon = \"SelectionPolygon\"\r\n\r\nSelectionPolygon = arcpy.GetParameter(1)\r\n\r\n\r\nTableFromSelection = os.path.join(\"in_memory\", \"TableFromSelection\")\r\n\r\n\r\n# make vents and polygon feature layers:\r\n\r\narcpy.MakeFeatureLayer_management(StressFieldsUSA_WM,\"StressFieldsFeatLyr\")\r\narcpy.AddMessage(\"after make vents feat lyr: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\n\r\narcpy.MakeFeatureLayer_management(SelectionPolygon,\"PolyFeatLyr\")\r\narcpy.AddMessage(\"after make poly feat lyr: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\n\r\n# Select vents with polygon:\r\narcpy.SelectLayerByLocation_management(\"StressFieldsFeatLyr\",\"COMPLETELY_WITHIN\",\"PolyFeatLyr\")\r\n\r\narcpy.AddMessage(\"after selection: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\n\r\n# Create table from selcted records:\r\narcpy.CopyRows_management (\"StressFieldsFeatLyr\", TableFromSelection)\r\narcpy.AddMessage(\"after create selection table: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\n\r\n# table to data frame: \r\n\r\n##### credit: https://gist.github.com/d-wasserman/e9c98be1d0caebc2935afecf0ba239a0 #### \r\n\r\n\r\ndef arcgis_table_to_dataframe(in_fc, input_fields, query=\"\", skip_nulls=False, null_values=None):\r\n \"\"\"Function will convert an arcgis table into a pandas dataframe with an object ID index, and the selected\r\n input fields. Uses TableToNumPyArray to get initial data.\r\n :param - in_fc - input feature class or table to convert\r\n :param - input_fields - fields to input into a da numpy converter function\r\n :param - query - sql like query to filter out records returned\r\n :param - skip_nulls - skip rows with null values\r\n :param - null_values - values to replace null values with.\r\n :returns - pandas dataframe\"\"\"\r\n OIDFieldName = arcpy.Describe(in_fc).OIDFieldName\r\n if input_fields:\r\n final_fields = [OIDFieldName] + input_fields\r\n else:\r\n final_fields = [field.name for field in arcpy.ListFields(in_fc)]\r\n np_array = arcpy.da.TableToNumPyArray(in_fc, final_fields, query, skip_nulls, null_values)\r\n object_id_index = np_array[OIDFieldName]\r\n fc_dataframe = pd.DataFrame(np_array, index=object_id_index, columns=input_fields)\r\n return fc_dataframe\r\n\t\r\nfc_dataframe = arcgis_table_to_dataframe(TableFromSelection,['AZI'])\t\r\n\t\r\narcpy.AddMessage(\"after converting selection table to data frame: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\narcpy.AddMessage(fc_dataframe)\r\n\r\n#dataframe to histogram array\r\n\t\r\n## run numpy.histogram\r\n##https://numpy.org/doc/stable/reference/generated/numpy.histogram.html ### Just return the array of bin counts as final\r\n\r\nAzimuthArray = fc_dataframe\r\n\r\n# Creating histogram \r\nnp.histogram(AzimuthArray, bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,355,360]) \r\n\r\nhist, bins = np.histogram(AzimuthArray, bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,355,360]) \r\n\r\narcpy.AddMessage(hist)\r\n\r\nazimuthList = list(hist)\r\narcpy.AddMessage(azimuthList)\r\n\r\n# return the hitogram array as the final array, make parameter: \r\n\r\narcpy.AddMessage(\"after creating bins for rose diagram: Elapsed time: {0} seconds\".format(int(time.clock() - t0)))\r\n\r\narcpy.SetParameterAsText(2, azimuthList)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.histogram",
"pandas.DataFrame"
]
] |
goyal07nidhi/Stock-Market-Analysis-Pipeline-AWS
|
[
"24bbf629dbc296b002fb443741c8102bf5200e45"
] |
[
"LSTM_model/main.py"
] |
[
"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import LSTM\r\nimport tensorflow as tf\r\nimport boto3\r\nfrom io import StringIO\r\n\r\nACCESS_KEY = \"aws-access-key\"\r\nSECRET_KEY = \"aws-secret-access-key\"\r\nBUCKET = 'aws-bucket'\r\nREGION_NAME = 'aws-region'\r\n\r\n# To print all filenames in a certain directory in a bucket\r\ns3 = boto3.client('s3', region_name=REGION_NAME, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\r\n\r\nfilename = []\r\n\r\n# Get a list of keys in an S3 bucket.\r\nresp = s3.list_objects_v2(Bucket=BUCKET, Prefix='Batchprocess')\r\nfor obj in resp['Contents']:\r\n files = obj['Key']\r\n filename.append(files)\r\n\r\nfor x in filename:\r\n try:\r\n if x.split('/')[1] != '':\r\n history = s3.get_object(Bucket=BUCKET, Key=x)\r\n df = pd.read_csv(history['Body'], sep=',')\r\n\r\n # considering Close column\r\n df1 = df.reset_index()['Close']\r\n\r\n # Scaling the feartures\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n df1 = scaler.fit_transform(np.array(df1).reshape(-1, 1))\r\n\r\n # splitting dataset into train and test split\r\n training_size = int(len(df1) * 0.65)\r\n test_size = len(df1) - training_size\r\n train_data, test_data = df1[0:training_size, :], df1[training_size:len(df1), :1]\r\n\r\n # convert an array of values into a dataset matrix\r\n def create_dataset(dataset, time_step=1):\r\n dataX, dataY = [], []\r\n for i in range(len(dataset) - time_step - 1):\r\n a = dataset[i:(i + time_step), 0]\r\n dataX.append(a)\r\n dataY.append(dataset[i + time_step, 0])\r\n return np.array(dataX), np.array(dataY)\r\n\r\n\r\n # reshape into X=t,t+1,t+2,t+3 and Y=t+4\r\n time_step = 40\r\n X_train, y_train = create_dataset(train_data, time_step)\r\n X_test, ytest = create_dataset(test_data, time_step)\r\n\r\n print(test_size)\r\n\r\n # reshape input to be [samples, time steps, features] which is required for LSTM\r\n X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\r\n X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\r\n\r\n # Model\r\n model = Sequential()\r\n model.add(LSTM(50, return_sequences=True, input_shape=(40, 1)))\r\n model.add(LSTM(50, return_sequences=True))\r\n model.add(LSTM(50))\r\n model.add(Dense(1))\r\n model.compile(loss='mean_squared_error', optimizer='adam')\r\n\r\n # Fitting the data\r\n model.fit(X_train, y_train, validation_data=(X_test, ytest), epochs=100, batch_size=64, verbose=1)\r\n\r\n # Prediction for next 90 days\r\n x_input = test_data[len(test_data) - 40:].reshape(1, -1)\r\n\r\n temp_input = list(x_input)\r\n temp_input = temp_input[0].tolist()\r\n\r\n lst_output = []\r\n n_steps = 40\r\n i = 0\r\n\r\n while i < 90:\r\n if len(temp_input) > 40:\r\n x_input = np.array(temp_input[1:])\r\n x_input = x_input.reshape(1, -1)\r\n x_input = x_input.reshape((1, n_steps, 1))\r\n yhat = model.predict(x_input, verbose=0)\r\n temp_input.extend(yhat[0].tolist())\r\n temp_input = temp_input[1:]\r\n lst_output.extend(yhat.tolist())\r\n i = i + 1\r\n else:\r\n x_input = x_input.reshape((1, n_steps, 1))\r\n yhat = model.predict(x_input, verbose=0)\r\n temp_input.extend(yhat[0].tolist())\r\n lst_output.extend(yhat.tolist())\r\n i = i + 1\r\n\r\n datelist = pd.date_range(pd.to_datetime(df['Date']).dt.date.max(), periods=90).tolist()\r\n\r\n predict_df = pd.DataFrame()\r\n predict_df['Date'] = datelist\r\n predict_df['Close_Price_Prediction'] = scaler.inverse_transform(lst_output)\r\n\r\n csv_buffer = StringIO()\r\n predict_df.to_csv(csv_buffer)\r\n s3_resource = boto3.resource('s3', region_name=REGION_NAME, aws_access_key_id='aws-access-key',\r\n aws_secret_access_key='aws-secret-access-key')\r\n s3_resource.Object('aws-bucket', 'prediction/' + x.split('/')[1]).put(Body=csv_buffer.getvalue())\r\n except:\r\n pass\r\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime",
"tensorflow.keras.layers.Dense",
"pandas.DataFrame",
"tensorflow.keras.layers.LSTM",
"numpy.array",
"tensorflow.keras.models.Sequential",
"sklearn.preprocessing.MinMaxScaler"
]
] |
floatingstarZ/loc_cls_exp
|
[
"8b971db671753d3571914aaa760cc13ac47018e8"
] |
[
"commonlibs/math_tools/coco_match_bbox.py"
] |
[
"import torch\nfrom commonlibs.math_tools.IOU import IOU\n\ndef _match_DT_GT(dts, dt_scores, dt_labels,\n gts, gt_labels,\n cat_label, threshold):\n \"\"\"\n \n :param dts: M x 4, det bboxes, left top right down\n :param dt_labels: M, det bbox labels\n :param dt_scores: M, det bbox scores\n \n :param gts: N x 4, ground truth\n :param gt_labels: N, ground truth labels\n :param cat_label: int, category labels\n :param threshold: float, iou threshold\n :return: \n matched dt bboxes, matched dt scores\n matched gt bboxes\n matched dt index: int number\n matched gt index: int number\n \"\"\"\n # label == category\n # .._indices: index of .. in origin .. order\n gt_indices = torch.arange(0, len(gt_labels))[gt_labels == cat_label].long()\n gt = gts[gt_indices]\n\n dt_indices = torch.arange(0, len(dt_labels))[dt_labels == cat_label].long()\n dt = dts[dt_indices]\n scores = dt_scores[dt_indices]\n\n scores, ids = torch.sort(scores, descending=True)\n dt_indices = dt_indices[ids]\n dt = dt[ids]\n\n # gtm: gt matched dts' id(in input order)\n gtm = -gt_labels.new_ones(len(gt)).long()\n dtm = -gt_labels.new_ones(len(dt)).long()\n\n if len(gt) == 0 or len(dt) == 0:\n e = gt.new_tensor([]).long()\n return e, e, e, e, e, e, e\n\n ious = IOU(dt, gt)\n for dind, d in enumerate(dt):\n iou = min([threshold, 1 - 1e-10])\n m = -1\n # match d with gts\n for gind, g in enumerate(gt):\n # if g already matched\n if gtm[gind] != -1:\n continue\n # continue to next gt unless better match made\n if ious[dind, gind] < iou:\n continue\n # save matched gind and iou\n iou = ious[dind, gind]\n m = gind\n # if d doesn't matched any gts\n if m == -1:\n continue\n # save matched ids(in input order)\n gtm[m] = dt_indices[dind]\n dtm[dind] = gt_indices[m]\n\n matched_dt = dt[dtm > -1]\n matched_dt_score = scores[dtm > -1]\n matched_dt_ids = dt_indices[dtm > -1]\n dtm = dtm[dtm > -1]\n\n matched_gt = gt[gtm > -1]\n matched_gt_ids = gt_indices[gtm > -1]\n gtm = gtm[gtm>-1]\n\n return matched_dt, matched_dt_score, matched_dt_ids, \\\n matched_gt, matched_gt_ids, dtm, gtm\n\ndef match_dt_with_gt(dts, dt_scores, dt_labels,\n gts, gt_labels,\n cat_label_list, threshold):\n \"\"\"\n\n :param dts: M x 4, det bboxes, left top right down\n :param dt_labels: M, det bbox labels\n :param dt_scores: M, det bbox scores\n\n :param gts: N x 4, ground truth\n :param gt_labels: N, ground truth labels\n :param cat_label_list: [int], possible category label list\n :param threshold: float, iou threshold\n :return: \n gtmatched: matched dt idx in dt_labels order, int, -1 -> unmatched \n matched gt index: bool\n \"\"\"\n gtmatched = -gts.new_ones(len(gt_labels)).long()\n dtmatched = -gts.new_ones(len(dt_labels)).long()\n if len(gt_labels) == 0 or len(gt_labels) == 0:\n e = gt_labels.new_tensor([]).long()\n return e, e\n\n for cat_label in cat_label_list:\n matched_dt, matched_dt_score, matched_dt_ids, \\\n matched_gt, matched_gt_ids, dtm, gtm = \\\n _match_DT_GT(dts, dt_scores, dt_labels,\n gts, gt_labels, cat_label, threshold)\n gtmatched[matched_gt_ids] = gtm\n dtmatched[matched_dt_ids] = dtm\n return gtmatched, dtmatched\n\nif __name__ == '__main__':\n gts = torch.Tensor([[0, 0, 5, 5],\n [20, 20, 25, 25]])\n gt_labels = torch.Tensor([0, 1])\n dts = torch.Tensor([[0, 0, 3, 3],\n [20, 20, 23, 23],\n [0, 0, 5, 3],\n [20, 20, 25, 23],\n [20, 20, 25, 25]])\n dt_score = torch.Tensor([1, 0.5, 1.2, 0.7, 10])\n dt_labels = torch.Tensor([0, 1, 0, 1, 0])\n cat_label_list = torch.Tensor([0, 1])\n\n threshold = 0.1\n\n print(match_dt_with_gt(dts, dt_score,dt_labels,\n gts, gt_labels,\n cat_label_list, threshold))\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.sort",
"torch.Tensor"
]
] |
RockawayIBMHackathonPrague2017/bumblebees
|
[
"efa8184f06e677a343713a7f66e4fdaed4969f66"
] |
[
"distance.py"
] |
[
"#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom math import *\nfrom decimal import Decimal\nfrom scipy.spatial import distance\n\ndef get_all_params(items):\n all_params = {}\n for item in items:\n params = item['PARAMS']\n for param in params:\n if all_params.get(param) == None:\n all_params[param] = []\n all_params[param].append(params[param])\n return all_params\n\ndef create_DataFrame(items):\n params = get_all_params(items)\n dataset = []\n ids = []\n for item in items:\n item_params = item['PARAMS']\n observation = []\n for prm in params:\n observation.append(item_params.get(prm))\n dataset.append(observation)\n ids.append(item['ITEM_ID'])\n\n dataset = np.array(dataset)\n return pd.DataFrame(dataset, columns=params), ids\n\nclass Distance():\n def __init__(self, items, drop=[]):\n self._items = items\n self.df, self.ids = create_DataFrame(items)\n self._drop_columns(drop)\n print('Distance obj: Creting DataFrame')\n self._remove_unsignificant_columns()\n print('Distance obj: Removing unsignificant columns')\n self._encode_color()\n print('Distance obj: Encoding special columns')\n self._fit()\n print('Distance obj: Fitting')\n\n def _drop_columns(self, columns):\n [x.decode('utf-8') for x in columns]\n for item in columns:\n self.df = self.df.drop(item.decode('utf-8'), 1)\n\n def _remove_unsignificant_columns(self):\n for col in self.df:\n v = self.df[col]\n percentage = sum(x == None for x in v) / len(v)\n if percentage > 0.9:\n self.df = self.df.drop(col, 1)\n\n def _encode_color(self):\n try:\n index = self.df.columns.get_loc('Barva')\n print(index)\n color_column = []\n for item in self.df.iloc[:, index]:\n if item == None:\n color_column.append(0)\n else:\n color_column.append(int('0x{}'.format(COLORS[item]), 16))\n self.df.iloc[:, index] = np.array(color_column)\n except KeyError:\n print('No color in DataFrame')\n\n def _fit(self):\n dummy_df = pd.get_dummies(self.df, drop_first=True)\n # add price\n product_price = [item['PRICE_VAT'] for item in self._items]\n dummy_df['PRICE_VAT'] = pd.Series(product_price, index=dummy_df.index)\n\n self.dummy_df = dummy_df\n X = self.dummy_df.iloc[:, :].values\n X = X.astype(np.float64)\n\n sc_X = MinMaxScaler(feature_range=(0, 1))\n # sc_X = StandardScaler()\n self.X = sc_X.fit_transform(X)\n\n def train_euclidean(self, observation):\n y = []\n for xi in self.X:\n y.append(np.sqrt(np.sum((self.X[observation, :] - xi) ** 2)))\n return y\n\n def train_cosine(self, observation):\n y = []\n for xi in self.X:\n y.append(distance.cosine(self.X[observation, :], xi))\n return y\n\n def train_manhattan(self, observation):\n y = []\n for xi in self.X:\n y.append(sum(abs(a - b) for a, b in zip(xi, self.X[observation, :])))\n return y\n\n def nth_root(self, value, n_root):\n root_value = 1 / float(n_root)\n return round(Decimal(value) ** Decimal(root_value), 3)\n\n def train_minkowski(self, observation, p_value):\n y = []\n for xi in self.X:\n y.append(self.nth_root(sum(pow(abs(a - b), p_value) for a, b in zip(xi, self.X[observation, :])), p_value))\n return y\n\n def get_df(self, observation):\n df = pd.DataFrame(self.df)\n\n #df['Distance_eucl'] = pd.Series(self.train_euclidean(observation), index=df.index)\n df['Distance_cos'] = pd.Series(self.train_cosine(observation), index=df.index)\n #df['Distance_manh'] = pd.Series(self.train_manhattan(observation), index=df.index)\n #df['Distance_mink'] = pd.Series(self.train_minkowski(observation, 3), index=df.index)\n\n product_names = [item['PRODUCTNAME'] for item in self._items]\n product_desc = [item['DESCRIPTION'] for item in self._items]\n product_price = [item['PRICE_VAT'] for item in self._items]\n df['PRODUCTNAME'] = pd.Series(product_names, index=df.index)\n df['DESCRIPTION'] = pd.Series(product_desc, index=df.index)\n df['PRICE_VAT'] = pd.Series(product_price, index=df.index)\n return df\n def get_items(self, observation):\n print('get items', observation)\n items = self._items\n distances = self.train_cosine(observation)\n stacked = np.column_stack((distances, items))\n sorted = stacked[stacked[:,0].argsort()]\n return sorted[:,1]\n\n\nCOLORS = {\n 'None': '0',\n 'nerozliลกuje se': '0',\n 'vรญcebarevnรก': '0',\n\n 'azurovรก': '00FFFF',\n 'bรฉลพovรก': 'F5F5DC',\n 'bรญlรก': 'FFFFFF',\n 'bรญlรก/hnฤdรก': 'FFFFFF',\n 'bรญlรก/rลฏลพovรก': 'FFFFFF',\n 'bรญlรก/stลรญbrnรก': 'FFFFFF',\n 'bรญlรก/zlatรก': 'FFFFFF',\n 'bรญlรก/ฤernรก': 'FFFFFF',\n 'bรญlรก/ฤervenรก': 'FFFFFF',\n 'bรญlรก/ลกedรก': 'FFFFFF',\n 'chrom': '808080',\n 'cihlovรก': 'B22222',\n 'dub': 'A52A2A',\n 'fialovรก': 'EE82EE',\n 'grafitovฤ ลกedรก': '808080',\n 'hnฤdรก': 'A52A2A',\n 'hnฤdรก/zelenรก': 'A52A2A',\n 'khaki': 'F0E68C',\n 'kรกvovรก/ลพula': 'A52A2A',\n 'matnรก': '0000FF',\n 'modrรก': '0000FF',\n 'modrรก/oranลพovรก': '0000FF',\n 'modrรก/tmavฤ modrรก': '0000FF',\n 'modrรก/zelenรก': '0000FF',\n 'modrรก/ฤernรก': '0000FF',\n 'mฤฤ': 'A52A2A',\n 'nรกmoลnรญ modrรก': '0000FF',\n 'oranลพovรก': 'FFA500',\n 'purpurovรก svฤtlรก': '9370DB',\n 'rลฏลพovรก': 'FFC0CB',\n 'rลฏลพovรก/fialovรก': 'FFC0CB',\n 'stลรญbrnรก': 'C0C0C0',\n 'stลรญbrnรก/modrรก': 'C0C0C0',\n 'stลรญbrnรก/rลฏลพovรก': 'C0C0C0',\n 'stลรญbrnรก/ฤernรก': 'C0C0C0',\n 'stลรญbrnรก/ลกedรก': 'C0C0C0',\n 'svฤtle hnฤdรก': 'A52A2A',\n 'svฤtle modrรก': '0000FF',\n 'svฤtle rลฏลพovรก': 'FFC0CB',\n 'svฤtle zelenรก': '008000',\n 'svฤtle ลกedรก': '808080',\n 'titan': 'C0C0C0',\n 'tmavฤ fialovรก': 'EE82EE',\n 'tmavฤ modrรก': '0000FF',\n 'tmavฤ ลกedรก': '808080',\n 'tyrkysovรก': '0000FF',\n 'vรญnovรก': 'FF0000',\n 'zelenรก': '008000',\n 'zlatรก': 'FFD700',\n 'zlatรก/hnฤdรก': 'FFD700',\n 'ฤernรก': '000000',\n 'ฤernรก/bรญlรก': '000000',\n 'ฤernรก/lesk': '000000',\n 'ฤernรก/mat': '000000',\n 'ฤernรก/modrรก': '000000',\n 'ฤernรก/oranลพovรก': '000000',\n 'ฤernรก/stลรญbrnรก': '000000',\n 'ฤernรก/tmavฤ ลกedรก': '000000',\n 'ฤernรก/zelenรก': '000000',\n 'ฤernรก/zlatรก': '000000',\n 'ฤernรก/ฤervenรก': '000000',\n 'ฤernรก/ลกedรก': '000000',\n 'ฤernรก/ลพlutรก': '000000',\n 'ฤervenรก': 'FF0000',\n 'ฤervenรก/modrรก': 'FF0000',\n 'ฤervenรก/ฤernรก': 'FF0000',\n 'ฤirรก': '808080',\n 'ลกedรก': '808080',\n 'ลกedรก/zelenรก': '808080',\n 'ลกedรก/ฤernรก': '808080',\n 'ลพlutรก': 'FFFF00',\n 'ลพlutรก/modrรก': 'FFFF00',\n 'ลพlutรก/ฤernรก': 'FFFF00',\n}\n\n"
] |
[
[
"pandas.Series",
"scipy.spatial.distance.cosine",
"pandas.DataFrame",
"numpy.column_stack",
"numpy.array",
"numpy.sum",
"sklearn.preprocessing.MinMaxScaler",
"pandas.get_dummies"
]
] |
bsmrvl/DS-Unit-3-Sprint-2-SQL-and-Databases
|
[
"d04faad599946d98dccc9d0f3a6adfc72db136e6"
] |
[
"module2-sql-for-analysis/insert_titanic.py"
] |
[
"\"\"\"\nExecuting this module on its own will connect to a postgreSQL\ndatabase and insert the Titanic data into a new table called \n'titanic'. First create a file called creds.txt with the \nfollowing:\n\nhost\ndbname\nuser\npassword\n\nNo extra lines at the beginning or end. Put this file in the same\ndirectory as insert_titanic.py. \n\nOnce run, you may import elephant_connect() and query() from\nthe module to reconnect to the database and explore through \nsimple queries. \n\"\"\"\n\nimport psycopg2\nimport pandas as pd \n\n\n# These three functions are used by create_titanic_table\n# to turn DataFrame rows into one long string so we only\n# need one INSERT statement. \n# \n# _fix_apos is necessary because the tuple() method decides \n# whether to use '' or \"\" depending on the presence of \n# apostrophes in given strings. This leads to the presense of \n# both '' strings and \"\" strings in the INSERT statement, which\n# causes an error in psycopg2. Replacing apostrophes with ticks\n# resolves this by ensuring all strings are contained by ''.\n\ndef _fix_apos(val):\n if isinstance(val, str) and \"'\" in val:\n return val.replace(\"'\", '`')\n else:\n return val\n\ndef _row_to_string(row):\n lis = [_fix_apos(val) for val in list(row)]\n return str(tuple(lis))\n\ndef _whole_string(df):\n whole_string = ''\n for i in range(df.shape[0]):\n whole_string = whole_string + _row_to_string(df.iloc[i]) + ', '\n return whole_string[:-2]\n\n# ---------------------------------\n\ndef elephant_connect():\n \"\"\"Connects to postgreSQL database with credentials from creds.txt.\"\"\"\n f = open('creds.txt', 'r')\n host = f.readline()[:-1]\n dbname = f.readline()[:-1]\n user = f.readline()[:-1]\n password = f.readline()\n f.close()\n\n conn = psycopg2.connect(host=host,\n dbname=dbname,\n user=user,\n password=password)\n curs = conn.cursor()\n return conn, curs\n\n\ndef create_titanic_table(df, conn, curs):\n create_table = '''\n DROP TABLE IF EXISTS titanic;\n DROP TYPE IF EXISTS sex;\n CREATE TYPE sex AS ENUM ('male', 'female');\n CREATE TABLE titanic (\n id SERIAL PRIMARY KEY,\n survived SMALLINT,\n pclass SMALLINT,\n name VARCHAR(100),\n sex SEX,\n age REAL,\n siblings_spouses_aboard SMALLINT,\n parents_children_aboard SMALLINT,\n fare REAL\n );\n '''\n curs.execute(create_table)\n conn.commit()\n\n add_data = '''\n INSERT INTO titanic\n (survived, pclass, name, sex, age, siblings_spouses_aboard, parents_children_aboard, fare)\n VALUES\n ''' + _whole_string(df) + ';'\n curs.execute(add_data)\n conn.commit()\n\n\ndef query(curs, qry):\n \"\"\"A simple query function which immediately prints nice results.\"\"\"\n curs.execute(qry)\n print(pd.DataFrame(curs.fetchall()))\n\n\nif __name__ == '__main__':\n df = pd.read_csv('titanic.csv')\n\n print('\\nConnecting to Elephant...')\n conn, curs = elephant_connect()\n\n print('Creating table and inserting Titanic data...')\n create_titanic_table(df, conn, curs)\n\n print('''\nDone! The Titanic dataset is now in your postgreSQL database.\nTo make queries from a repl:\n - from insert_titanic import elephant_connect, query\n - conn, curs = elephant_connect()\n - query(curs, \"SELECT...\")\n - ...or any standard connection/cursor methods\n ''')\n\n conn.close()"
] |
[
[
"pandas.read_csv"
]
] |
atidem/fuzzyAnalyticHierarchyProcess
|
[
"9ffe29d08f8a464dea8fc494ca77bfc2fe6c66c6"
] |
[
"ahp.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 9 11:38:48 2019\r\n\r\n@author: atidem\r\n\"\"\"\r\n\r\n\"\"\"\r\nBu proje Atakan Demir(2016280050) ve Kaan Karavar(2016280022) tarafฤฑndan yapฤฑlmฤฑลtฤฑr.\r\nBu program anaconda framework iรงindeki spyder editรถrรผ kullanฤฑlarak hazฤฑrlandฤฑ.\r\nKรผtรผphaneler komut satฤฑrฤฑna (cmd) pip install kรผtรผphane adฤฑ yazฤฑlarak indirilebilir.\r\nKullanฤฑlan kรผtรผphaneler kullandฤฑฤฤฑmฤฑz framework ile default olarak gelmektedir.\r\n\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ntext = open(\"Dagdeviren.prj\",\"r\") ### HESAPLAMAK ฤฐSTEDฤฐฤฤฐNฤฐZ DOSYA ADINI YAZINIZ.. // open file from text\r\ntxt = text.read()\r\ntxt = txt.replace(';',' ')\r\nlst = pd.DataFrame(data=txt.split(\"\\n\"))\r\nproductCount = int(lst.iloc[0,0]) #รrรผn Sayฤฑmฤฑz // count of product\r\nproducts = list(lst.iloc[1:productCount+1,0]) #รrรผn isimlerimiz // name of products\r\nahp = [] # prj dosyasฤฑnฤฑn bize geliลi bu \r\nresult = [] # en son elde edilecek sonuclar\r\n\r\n#Dosyayฤฑ iลlemler iรงin dataframe haline รงeviriyoruz // convert to dataframe from file\r\nfor i in range(productCount+1,len(lst.iloc[:,0])):\r\n ahp.append(lst.iloc[i,0].split())\r\nahp=pd.DataFrame(data=ahp)\r\n\r\n#Elimizdeki verilerin satฤฑr toplamlarฤฑnฤฑ bulup geri dรถndรผrรผyoruz. // return row sum\r\ndef rowSum(matrix):\r\n matrix = pd.DataFrame(data=matrix)\r\n rowSum = []\r\n for i in range(len(matrix.iloc[:,1])):\r\n l = 0.0\r\n m = 0.0\r\n u = 0.0\r\n for j in range(len(matrix.iloc[1,:])): \r\n if((j+1) % 3 == 0):\r\n u = u + float(matrix.iloc[i,j])\r\n elif((j+1) % 3 == 1):\r\n l = l + float(matrix.iloc[i,j])\r\n elif((j+1) % 3 == 2):\r\n m = m + float(matrix.iloc[i,j])\r\n rowSum.append([l,m,u]) \r\n return(rowSum)\r\n\r\n#Verilerin isimleri bir listenin iรงine yazฤฑp dรถndรผrรผyoruz. // get name of criteria\r\ndef getNode(matrix):\r\n liste = [] \r\n for i in range(len(matrix.iloc[:,1])):\r\n if(matrix.iloc[i,0].isalpha()):\r\n print(matrix.iloc[i,0])\r\n liste.append([matrix.iloc[i,0],matrix.iloc[i,1]]) \r\n \r\n return liste\r\n\r\n#Her bir matris iรงin S deฤerini hesaplฤฑyoruz. // s value calculate\r\ndef sVaules(rowSum = []):\r\n s = []\r\n for i in range(len(rowSum)):\r\n x = 0\r\n z = 0\r\n q = 0\r\n for j in range(len(rowSum)):\r\n if(i != j):\r\n x = x + rowSum[j][2]\r\n z = z + rowSum[j][0]\r\n q = q + rowSum[j][1]\r\n l = float(rowSum[i][0]) / (float(rowSum[i][0])+ x )\r\n u = float(rowSum[i][2]) / (float(rowSum[i][2])+ z )\r\n m = float(rowSum[i][1]) / q\r\n s.append([l,m,u])\r\n \r\n return s \r\n\r\n#Elde ettiฤimiz s'ler ve satฤฑr toplamlarฤฑnฤฑ kullanarak ilgili aฤฤฑrlฤฑklarฤฑ buluyoruz. // find weight with s values\r\ndef weightFirst(sValues = []):\r\n w= []\r\n V = []\r\n tmp = []\r\n for i in range(len(sValues)):\r\n for j in range(len(sValues)):\r\n if(i != j):\r\n if(sValues[i][1] >= sValues[j][1]):\r\n tmp.append(1)\r\n elif(sValues[i][2] >= sValues[j][0]):\r\n tmp.append((sValues[i][2]-(sValues[j][0]))/((sValues[i][2]-sValues[i][1])+(sValues[j][1]-(sValues[j][0]))))\r\n else:\r\n tmp.append(0)\r\n V.append(tmp.copy())\r\n tmp.clear()\r\n \r\n for i in range(len(V)):\r\n w.append(min(V[i]))\r\n \r\n summ = sum(w)\r\n for i in range(len(w)):\r\n w[i] = w[i] / summ\r\n return w \r\n\r\n# Dallanan matris iรงinden gidilebilecek yollarฤฑ buluyoruz ama hepsi // find all path \r\n# istediฤimiz yol en uca (yapraฤa kadar giden) gittiฤinde o yol iรงin ilgili deฤerlerle hesaplanฤฑyor. // calculate value top to leaf \r\n# dataframe de node lar yanฤฑndaki sayฤฑlar azaltฤฑlarak sadece ilgili alt noda ait aฤฤฑrlฤฑk deฤeri kullanฤฑlarak hesaplandฤฑ. \r\ndef allOfRoad(liste,i):\r\n if(i<len(nodeList)):\r\n nodeList[i][1] = int(nodeList[i][1])\r\n if(len(liste)!= 0):\r\n liste[-1][1] = liste[-1][1] - 1\r\n if(int(nodeList[i][1])!=0):\r\n nodeList[i][1] = int(nodeList[i][1])\r\n liste.append(nodeList[i]) \r\n allOfRoad(liste,i+1)\r\n elif(int(nodeList[i][1])==0):\r\n liste.append(nodeList[i]) \r\n calcRows(liste)\r\n liste.pop()\r\n if(i<(len(nodeList)-1) and int(nodeList[i+1][1]) != 0): \r\n liste.pop()\r\n allOfRoad(liste,i+1)\r\n \r\n else:\r\n liste.pop()\r\n allOfRoad(liste,i+1) \r\n\r\n#Bulduฤumuz her bir yolun deฤerini hesaplฤฑyoruz // calculate all path\r\ndef calcRows(liste):\r\n mult = 1\r\n for i in range(len(liste)-1):\r\n mult = mult * weights[liste[i][0]][liste[i][1]] \r\n result.append(mult*np.array(weights[liste[-1][0]])) \r\n \r\n#Bulduฤum tรผm nodelarฤฑn aฤฤฑrlฤฑklarฤฑnฤฑ hesaplฤฑyoruz. // calculate weigths all node \r\ndef weightCalcAllNodes():\r\n weights = {}\r\n for i in range(len(ahp.iloc[:,0])-1):\r\n if(ahp.iloc[i,0].isalpha()):\r\n if(int(ahp.iloc[i,1])==0):\r\n a = ahp.iloc[i+1:i+1+productCount,:productCount*3]\r\n tmp = weightFirst(sVaules(rowSum(a)))\r\n tmp.reverse()\r\n weights[ahp.iloc[i,0]] = tmp \r\n else:\r\n y = int(ahp.iloc[i,1])\r\n a = ahp.iloc[i+1:i+1+y,:y*3]\r\n tmp = weightFirst(sVaules(rowSum(a)))\r\n tmp.reverse()\r\n weights[ahp.iloc[i,0]] = tmp\r\n \r\n return weights\r\n \r\n#En sonunda tรผm sonuรงlarฤฑnฤฑ ilgili sรผtunlarฤฑnฤฑ toplayฤฑp, sonucu dรถndรผyoruz. // find final result \r\ndef sonuc(result):\r\n result = np.array(result) \r\n return np.sum(result, axis=0)\r\n \r\nweights = weightCalcAllNodes()\r\nnodeList = getNode(ahp)\r\nliste=[]\r\nallOfRoad(liste,0)\r\nproducts.reverse()\r\nresult = pd.DataFrame(data = sonuc(result),index = products)\r\n\r\n\r\nprint(ahp.iloc[6,0].isalpha())\r\nprint(\"aฤฤฑrlฤฑklar ters halinde yazฤฑlmฤฑลtฤฑr.\")\r\nprint(weights)\r\nprint(result)\r\n"
] |
[
[
"numpy.array",
"numpy.sum",
"pandas.DataFrame"
]
] |
zhangganjun87/models
|
[
"58deb0599f10dc5b33570103339fb7fa5bb876c3"
] |
[
"official/resnet/keras/resnet_model.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"ResNet50 model for Keras.\n\nAdapted from tf.keras.applications.resnet50.ResNet50().\nThis is ResNet model version 1.5.\n\nRelated papers/blogs:\n- https://arxiv.org/abs/1512.03385\n- https://arxiv.org/pdf/1603.05027v2.pdf\n- http://torch.ch/blog/2016/02/04/resnets.html\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport warnings\n\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras import utils\n\n\nL2_WEIGHT_DECAY = 1e-4\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\n\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of\n middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n\n # Returns\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2a')(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2a')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size,\n padding='same', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2b')(x)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2b')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2c')(x)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n return x\n\n\ndef conv_block(input_tensor,\n kernel_size,\n filters,\n stage,\n block,\n strides=(2, 2)):\n \"\"\"A block that has a conv layer at shortcut.\n\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of\n middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n strides: Strides for the second conv layer in the block.\n\n # Returns\n Output tensor for the block.\n\n Note that from stage 3,\n the second conv layer at main path is with strides=(2, 2)\n And the shortcut should have strides=(2, 2) as well\n \"\"\"\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2a')(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2a')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2b')(x)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2b')(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '2c')(x)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2c')(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name=conv_name_base + '1')(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x\n\n\ndef resnet50(num_classes, dtype='float32', batch_size=None):\n # TODO(tfboyd): add training argument, just lik resnet56.\n \"\"\"Instantiates the ResNet50 architecture.\n\n Args:\n num_classes: `int` number of classes for image classification.\n\n Returns:\n A Keras model instance.\n \"\"\"\n input_shape = (224, 224, 3)\n img_input = layers.Input(shape=input_shape, dtype=dtype,\n batch_size=batch_size)\n\n if backend.image_data_format() == 'channels_first':\n x = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),\n name='transpose')(img_input)\n bn_axis = 1\n else: # channels_last\n x = img_input\n bn_axis = 3\n\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)\n x = layers.Conv2D(64, (7, 7),\n strides=(2, 2),\n padding='valid', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name='conv1')(x)\n x = layers.BatchNormalization(axis=bn_axis,\n scale=False,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name='bn_conv1')(x)\n x = layers.Activation('relu')(x)\n x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n x = layers.Dense(\n num_classes,\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n name='fc1000')(x)\n # TODO(reedwm): Remove manual casts once mixed precision can be enabled with a\n # single line of code.\n x = backend.cast(x, 'float32')\n x = layers.Activation('softmax')(x)\n\n # Create model.\n return models.Model(img_input, x, name='resnet50')\n"
] |
[
[
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.layers.Activation",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.keras.layers.ZeroPadding2D",
"tensorflow.python.keras.layers.MaxPooling2D",
"tensorflow.python.keras.regularizers.l2",
"tensorflow.python.keras.layers.add",
"tensorflow.python.keras.backend.cast",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.layers.GlobalAveragePooling2D",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.backend.permute_dimensions"
]
] |
MTG/jamendo-dataset
|
[
"472a14524bc8f257c9ad921307180a80cc5fd1f3"
] |
[
"scripts/baseline/solver.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport numpy as np\nimport datetime\nimport tqdm\nfrom sklearn import metrics\nimport pickle\nimport csv\n\nimport torch\nimport torch.nn as nn\n\nfrom model import CNN\n\n\nclass Solver(object):\n def __init__(self, data_loader, valid_loader, config):\n # Data loader\n self.data_loader = data_loader\n self.valid_loader = valid_loader\n\n # Training settings\n self.n_epochs = 500\n self.lr = 1e-4\n self.log_step = 10\n self.is_cuda = torch.cuda.is_available()\n self.model_save_path = config.model_save_path\n self.batch_size = config.batch_size\n self.tag_list = self.get_tag_list(config)\n if config.subset == 'all':\n self.num_class = 183\n elif config.subset == 'genre':\n self.num_class = 87\n self.tag_list = self.tag_list[:87]\n elif config.subset == 'instrument':\n self.num_class = 40\n self.tag_list = self.tag_list[87:127]\n elif config.subset == 'moodtheme':\n self.num_class = 56\n self.tag_list = self.tag_list[127:]\n elif config.subset == 'top50tags':\n self.num_class = 50\n self.model_fn = os.path.join(self.model_save_path, 'best_model.pth')\n self.roc_auc_fn = 'roc_auc_'+config.subset+'_'+str(config.split)+'.npy'\n self.pr_auc_fn = 'pr_auc_'+config.subset+'_'+str(config.split)+'.npy'\n\n # Build model\n self.build_model()\n\n def build_model(self):\n # model and optimizer\n model = CNN(num_class=self.num_class)\n\n if self.is_cuda:\n self.model = model\n self.model.cuda()\n self.optimizer = torch.optim.Adam(self.model.parameters(), self.lr)\n\n def load(self, filename):\n S = torch.load(filename)\n self.model.load_state_dict(S)\n\n def save(self, filename):\n model = self.model.state_dict()\n torch.save({'model': model}, filename)\n\n def to_var(self, x):\n if self.is_cuda:\n x = x.cuda()\n return x\n\n def train(self):\n start_t = time.time()\n current_optimizer = 'adam'\n best_roc_auc = 0\n drop_counter = 0\n reconst_loss = nn.BCELoss()\n\n for epoch in range(self.n_epochs):\n drop_counter += 1\n # train\n self.model.train()\n ctr = 0\n for x, y, _ in self.data_loader:\n ctr += 1\n\n # variables to cuda\n x = self.to_var(x)\n y = self.to_var(y)\n\n # predict\n out = self.model(x)\n loss = reconst_loss(out, y)\n\n # back propagation\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # print log\n if (ctr) % self.log_step == 0:\n print(\"[%s] Epoch [%d/%d] Iter [%d/%d] train loss: %.4f Elapsed: %s\" %\n (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n epoch+1, self.n_epochs, ctr, len(self.data_loader), loss.item(),\n datetime.timedelta(seconds=time.time()-start_t)))\n\n # validation\n roc_auc, _ = self._validation(start_t, epoch)\n\n # save model\n if roc_auc > best_roc_auc:\n print('best model: %4f' % roc_auc)\n best_roc_auc = roc_auc\n torch.save(self.model.state_dict(), os.path.join(self.model_save_path, 'best_model.pth'))\n\n # schedule optimizer\n current_optimizer, drop_counter = self._schedule(current_optimizer, drop_counter)\n\n print(\"[%s] Train finished. Elapsed: %s\"\n % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n datetime.timedelta(seconds=time.time() - start_t)))\n\n def _validation(self, start_t, epoch):\n prd_array = [] # prediction\n gt_array = [] # ground truth\n ctr = 0\n self.model.eval()\n reconst_loss = nn.BCELoss()\n for x, y in self.valid_loader:\n ctr += 1\n\n # variables to cuda\n x = self.to_var(x)\n y = self.to_var(y)\n\n # predict\n out = self.model(x)\n loss = reconst_loss(out, y)\n\n # print log\n if (ctr) % self.log_step == 0:\n print(\"[%s] Epoch [%d/%d], Iter [%d/%d] valid loss: %.4f Elapsed: %s\" %\n (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n epoch+1, self.n_epochs, ctr, len(self.valid_loader), loss.item(),\n datetime.timedelta(seconds=time.time()-start_t)))\n\n # append prediction\n out = out.detach().cpu()\n y = y.detach().cpu()\n for prd in out:\n prd_array.append(list(np.array(prd)))\n for gt in y:\n gt_array.append(list(np.array(gt)))\n\n # get auc\n roc_auc, pr_auc, _, _ = self.get_auc(prd_array, gt_array)\n return roc_auc, pr_auc\n\n def get_tag_list(self, config):\n if config.subset == 'top50tags':\n path = 'tag_list_50.npy'\n else:\n path = 'tag_list.npy'\n tag_list = np.load(path)\n return tag_list\n\n def get_auc(self, prd_array, gt_array):\n prd_array = np.array(prd_array)\n gt_array = np.array(gt_array)\n\n roc_aucs = metrics.roc_auc_score(gt_array, prd_array, average='macro')\n pr_aucs = metrics.average_precision_score(gt_array, prd_array, average='macro')\n\n print('roc_auc: %.4f' % roc_aucs)\n print('pr_auc: %.4f' % pr_aucs)\n\n roc_auc_all = metrics.roc_auc_score(gt_array, prd_array, average=None)\n pr_auc_all = metrics.average_precision_score(gt_array, prd_array, average=None)\n\n for i in range(self.num_class):\n print('%s \\t\\t %.4f , %.4f' % (self.tag_list[i], roc_auc_all[i], pr_auc_all[i]))\n return roc_aucs, pr_aucs, roc_auc_all, pr_auc_all\n\n def _schedule(self, current_optimizer, drop_counter):\n if current_optimizer == 'adam' and drop_counter == 60:\n self.load(os.path.join(self.model_save_path, 'best_model.pth'))\n self.optimizer = torch.optim.SGD(self.model.parameters(), 0.001, momentum=0.9, weight_decay=0.0001, nesterov=True)\n current_optimizer = 'sgd_1'\n drop_counter = 0\n print('sgd 1e-3')\n # first drop\n if current_optimizer == 'sgd_1' and drop_counter == 20:\n self.load(os.path.join(self.model_save_path, 'best_model.pth'))\n for pg in self.optimizer.param_groups:\n pg['lr'] = 0.0001\n current_optimizer = 'sgd_2'\n drop_counter = 0\n print('sgd 1e-4')\n # second drop\n if current_optimizer == 'sgd_2' and drop_counter == 20:\n self.load(os.path.join(self.model_save_path, 'best_model.pth'))\n for pg in self.optimizer.param_groups:\n pg['lr'] = 0.00001\n current_optimizer = 'sgd_3'\n print('sgd 1e-5')\n return current_optimizer, drop_counter\n\n def test(self):\n start_t = time.time()\n reconst_loss = nn.BCELoss()\n epoch = 0\n\n self.load(self.model_fn)\n self.model.eval()\n ctr = 0\n prd_array = [] # prediction\n gt_array = [] # ground truth\n song_array = [] # song array\n for x, y, fn in self.data_loader:\n ctr += 1\n\n # variables to cuda\n x = self.to_var(x)\n y = self.to_var(y)\n\n # predict\n out = self.model(x)\n loss = reconst_loss(out, y)\n\n # print log\n if (ctr) % self.log_step == 0:\n print(\"[%s] Iter [%d/%d] test loss: %.4f Elapsed: %s\" %\n (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n ctr, len(self.data_loader), loss.item(),\n datetime.timedelta(seconds=time.time()-start_t)))\n\n # append prediction\n out = out.detach().cpu()\n y = y.detach().cpu()\n for prd in out:\n prd_array.append(list(np.array(prd)))\n for gt in y:\n gt_array.append(list(np.array(gt)))\n for song in fn:\n song_array.append(song)\n\n # get auc\n roc_auc, pr_auc, roc_auc_all, pr_auc_all = self.get_auc(prd_array, gt_array)\n\n # save aucs\n np.save(open(self.roc_auc_fn, 'wb'), roc_auc_all)\n np.save(open(self.pr_auc_fn, 'wb'), pr_auc_all)\n np.save(open('prd.npy', 'wb'), prd_array)\n np.save(open('song_list.npy', 'wb'), song_array)\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"torch.load",
"torch.nn.BCELoss",
"sklearn.metrics.average_precision_score",
"torch.cuda.is_available",
"numpy.load",
"numpy.array",
"torch.save"
]
] |
em3ndez/open_spiel
|
[
"ce3c25a788319a58076732cdf28c7ef5651b2ae7"
] |
[
"open_spiel/python/algorithms/cfr_test.py"
] |
[
"# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for open_spiel.python.algorithms.cfr.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom open_spiel.python import policy\nfrom open_spiel.python.algorithms import cfr\nfrom open_spiel.python.algorithms import expected_game_score\nfrom open_spiel.python.algorithms import exploitability\nimport pyspiel\n\n_KUHN_GAME = pyspiel.load_game(\"kuhn_poker\")\n_LEDUC_GAME = pyspiel.load_game(\"leduc_poker\")\n\n_KUHN_UNIFORM_POLICY = policy.TabularPolicy(_KUHN_GAME)\n_LEDUC_UNIFORM_POLICY = policy.TabularPolicy(_LEDUC_GAME)\n\n\nclass ModuleLevelFunctionTest(absltest.TestCase):\n\n def test__update_current_policy(self):\n game = pyspiel.load_game(\"kuhn_poker\")\n tabular_policy = policy.TabularPolicy(game)\n\n cumulative_regrets = np.arange(0, 12 * 2).reshape((12, 2))\n expected_policy = cumulative_regrets / np.sum(\n cumulative_regrets, axis=-1, keepdims=True)\n nodes_indices = {\n u\"0\": 0,\n u\"0pb\": 1,\n u\"1\": 2,\n u\"1pb\": 3,\n u\"2\": 4,\n u\"2pb\": 5,\n u\"1p\": 6,\n u\"1b\": 7,\n u\"2p\": 8,\n u\"2b\": 9,\n u\"0p\": 10,\n u\"0b\": 11,\n }\n # pylint: disable=g-complex-comprehension\n info_state_nodes = {\n key: cfr._InfoStateNode(\n legal_actions=[0, 1],\n index_in_tabular_policy=None,\n cumulative_regret=dict(enumerate(cumulative_regrets[index])),\n cumulative_policy=None) for key, index in nodes_indices.items()\n }\n # pylint: enable=g-complex-comprehension\n\n cfr._update_current_policy(tabular_policy, info_state_nodes)\n\n np.testing.assert_array_equal(expected_policy,\n tabular_policy.action_probability_array)\n\n\nclass CFRTest(parameterized.TestCase, absltest.TestCase):\n\n @parameterized.parameters(\n list(itertools.product([True, False], [True, False], [True, False])))\n def test_policy_zero_is_uniform(self, linear_averaging, regret_matching_plus,\n alternating_updates):\n # We use Leduc and not Kuhn, because Leduc has illegal actions and Kuhn does\n # not.\n game = pyspiel.load_game(\"leduc_poker\")\n cfr_solver = cfr._CFRSolver(\n game,\n regret_matching_plus=regret_matching_plus,\n linear_averaging=linear_averaging,\n alternating_updates=alternating_updates)\n\n np.testing.assert_array_equal(\n _LEDUC_UNIFORM_POLICY.action_probability_array,\n cfr_solver.current_policy().action_probability_array)\n np.testing.assert_array_equal(\n _LEDUC_UNIFORM_POLICY.action_probability_array,\n cfr_solver.average_policy().action_probability_array)\n\n def test_cfr_kuhn_poker(self):\n game = pyspiel.load_game(\"kuhn_poker\")\n cfr_solver = cfr.CFRSolver(game)\n for _ in range(300):\n cfr_solver.evaluate_and_update_policy()\n average_policy = cfr_solver.average_policy()\n average_policy_values = expected_game_score.policy_value(\n game.new_initial_state(), [average_policy] * 2)\n # 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker\n np.testing.assert_allclose(\n average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)\n\n def test_cfr_plus_kuhn_poker(self):\n game = pyspiel.load_game(\"kuhn_poker\")\n cfr_solver = cfr.CFRPlusSolver(game)\n for _ in range(200):\n cfr_solver.evaluate_and_update_policy()\n average_policy = cfr_solver.average_policy()\n average_policy_values = expected_game_score.policy_value(\n game.new_initial_state(), [average_policy] * 2)\n # 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker\n np.testing.assert_allclose(\n average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)\n\n def test_cfr_cce_ce_dist_goofspiel(self):\n \"\"\"Copy of the TestCCEDistCFRGoofSpiel in corr_dist_test.cc.\"\"\"\n game = pyspiel.load_game(\n \"turn_based_simultaneous_game(game=goofspiel(num_cards=3,points_order=\"\n \"descending,returns_type=total_points))\")\n for num_iterations in [1, 10, 100]:\n policies = []\n cfr_solver = cfr.CFRSolver(game)\n for _ in range(num_iterations):\n cfr_solver.evaluate_and_update_policy()\n policies.append(\n policy.python_policy_to_pyspiel_policy(cfr_solver.current_policy()))\n mu = pyspiel.uniform_correlation_device(policies)\n cce_dist_info = pyspiel.cce_dist(game, mu)\n print(\"goofspiel, cce test num_iters: {}, cce_dist: {}, per player: {}\"\n .format(num_iterations, cce_dist_info.dist_value,\n cce_dist_info.deviation_incentives))\n # Try converting one of the BR policies:\n _ = policy.pyspiel_policy_to_python_policy(\n game, cce_dist_info.best_response_policies[0])\n\n # Assemble the same correlation device manually, just as an example for\n # how to do non-uniform distributions of them and to test the python\n # bindings for lists of tuples works properly\n uniform_prob = 1.0 / len(policies)\n mu2 = [(uniform_prob, policy) for policy in policies]\n cce_dist_info2 = pyspiel.cce_dist(game, mu2)\n self.assertAlmostEqual(cce_dist_info2.dist_value,\n sum(cce_dist_info.deviation_incentives))\n # Test the CEDist function too, why not. Disable the exact one, as it\n # takes too long for a test.\n # ce_dist_info = pyspiel.ce_dist(game, pyspiel.determinize_corr_dev(mu))\n ce_dist_info = pyspiel.ce_dist(\n game, pyspiel.sampled_determinize_corr_dev(mu, 100))\n print(\"goofspiel, ce test num_iters: {}, ce_dist: {}, per player: {}\"\n .format(num_iterations, ce_dist_info.dist_value,\n ce_dist_info.deviation_incentives))\n print(\"number of conditional best responses per player:\")\n for p in range(game.num_players()):\n print(\" player {}, num: {}\".format(\n p, len(ce_dist_info.conditional_best_response_policies[p])))\n\n @parameterized.parameters(\n list(itertools.product([True, False], [True, False], [True, False])))\n def test_cfr_kuhn_poker_runs_with_multiple_players(self, linear_averaging,\n regret_matching_plus,\n alternating_updates):\n num_players = 3\n\n game = pyspiel.load_game(\"kuhn_poker\", {\"players\": num_players})\n cfr_solver = cfr._CFRSolver(\n game,\n regret_matching_plus=regret_matching_plus,\n linear_averaging=linear_averaging,\n alternating_updates=alternating_updates)\n for _ in range(10):\n cfr_solver.evaluate_and_update_policy()\n average_policy = cfr_solver.average_policy()\n average_policy_values = expected_game_score.policy_value(\n game.new_initial_state(), [average_policy] * num_players)\n del average_policy_values\n\n @parameterized.parameters(list(itertools.product([False, True])))\n def test_simultaneous_two_step_avg_1b_seq_in_kuhn_poker(\n self, regret_matching_plus):\n num_players = 2\n game = pyspiel.load_game(\"kuhn_poker\", {\"players\": num_players})\n cfr_solver = cfr._CFRSolver(\n game,\n regret_matching_plus=regret_matching_plus,\n linear_averaging=False,\n alternating_updates=False)\n\n def check_avg_policy_is_uniform_random():\n avg_policy = cfr_solver.average_policy()\n for player_info_states in avg_policy.states_per_player:\n for info_state in player_info_states:\n state_policy = avg_policy.policy_for_key(info_state)\n np.testing.assert_allclose(state_policy, [1.0 / len(state_policy)] *\n len(state_policy))\n\n check_avg_policy_is_uniform_random()\n\n cfr_solver.evaluate_and_update_policy()\n check_avg_policy_is_uniform_random()\n\n cfr_solver.evaluate_and_update_policy()\n\n # The acting player in 1b is player 1 and they have not acted before, so\n # the probability this player plays to this information state is 1, and\n # the sequence probability of any action is just the probability of that\n # action given the information state. On the first iteration, this\n # probability is 0.5 for both actions. On the second iteration, the\n # current policy is [0, 1], so the average cumulants should be\n # [0.5, 1.5]. Normalizing this gives the average policy.\n normalization = 0.5 + 0.5 + 1\n np.testing.assert_allclose(cfr_solver.average_policy().policy_for_key(\"1b\"),\n [0.5 / normalization, (0.5 + 1) / normalization])\n\n def test_policy(self):\n game = pyspiel.load_game(\"kuhn_poker\")\n solver = cfr.CFRPlusSolver(game)\n\n tabular_policy = solver.current_policy()\n self.assertLen(tabular_policy.state_lookup, 12)\n for info_state_str in tabular_policy.state_lookup.keys():\n np.testing.assert_equal(\n np.asarray([0.5, 0.5]), tabular_policy.policy_for_key(info_state_str))\n\n @parameterized.parameters([\n (pyspiel.load_game(\"kuhn_poker\"), pyspiel.CFRSolver, cfr.CFRSolver),\n (pyspiel.load_game(\"leduc_poker\"), pyspiel.CFRSolver, cfr.CFRSolver),\n (pyspiel.load_game(\"kuhn_poker\"), pyspiel.CFRPlusSolver,\n cfr.CFRPlusSolver),\n (pyspiel.load_game(\"leduc_poker\"), pyspiel.CFRPlusSolver,\n cfr.CFRPlusSolver),\n ])\n def test_cpp_algorithms_identical_to_python_algorithm(self, game, cpp_class,\n python_class):\n cpp_solver = cpp_class(game)\n python_solver = python_class(game)\n\n for _ in range(5):\n cpp_solver.evaluate_and_update_policy()\n python_solver.evaluate_and_update_policy()\n\n cpp_avg_policy = cpp_solver.average_policy()\n python_avg_policy = python_solver.average_policy()\n\n # We do not compare the policy directly as we do not have an easy way to\n # convert one to the other, so we use the exploitability as a proxy.\n cpp_expl = pyspiel.nash_conv(game, cpp_avg_policy)\n python_expl = exploitability.nash_conv(game, python_avg_policy)\n self.assertEqual(cpp_expl, python_expl)\n # Then we also check the CurrentPolicy, just to check it is giving the same\n # results too\n cpp_current_policy = cpp_solver.current_policy()\n python_current_policy = python_solver.current_policy()\n cpp_expl = pyspiel.nash_conv(game, cpp_current_policy)\n python_expl = exploitability.nash_conv(game, python_current_policy)\n self.assertEqual(cpp_expl, python_expl)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] |
[
[
"numpy.asarray",
"numpy.arange",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose",
"numpy.sum"
]
] |
Kirill888/datacube-core
|
[
"996b395e15f975decb77c0ca9fa0555177674b2f",
"996b395e15f975decb77c0ca9fa0555177674b2f"
] |
[
"tests/test_utils_docs.py",
"datacube_apps/movie_generator.py"
] |
[
"\"\"\"\nTest utility functions from :module:`datacube.utils`\n\n\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom collections import OrderedDict\nfrom types import SimpleNamespace\nfrom typing import Tuple, Iterable\n\nimport numpy as np\nimport pytest\nimport toolz\n\nfrom datacube.model import MetadataType\nfrom datacube.model.utils import traverse_datasets, flatten_datasets, dedup_lineage, remap_lineage_doc\nfrom datacube.testutils import mk_sample_product, make_graph_abcde, gen_dataset_test_dag, dataset_maker\nfrom datacube.utils import (read_documents, InvalidDocException,\n SimpleDocNav)\nfrom datacube.utils.changes import check_doc_unchanged, get_doc_changes, MISSING, DocumentMismatchError\nfrom datacube.utils.documents import (\n parse_yaml,\n without_lineage_sources,\n _open_from_s3,\n netcdf_extract_string,\n DocReader,\n is_supported_document_type,\n get_doc_offset,\n get_doc_offset_safe,\n _set_doc_offset,\n transform_object_tree,\n)\nfrom datacube.utils.serialise import jsonify_document\nfrom datacube.utils.uris import as_url\n\n\ndoc_changes = [\n (1, 1, []),\n ({}, {}, []),\n ({'a': 1}, {'a': 1}, []),\n ({'a': {'b': 1}}, {'a': {'b': 1}}, []),\n ([1, 2, 3], [1, 2, 3], []),\n ([1, 2, [3, 4, 5]], [1, 2, [3, 4, 5]], []),\n (1, 2, [((), 1, 2)]),\n ([1, 2, 3], [2, 1, 3], [((0,), 1, 2), ((1,), 2, 1)]),\n ([1, 2, [3, 4, 5]], [1, 2, [3, 6, 7]], [((2, 1), 4, 6), ((2, 2), 5, 7)]),\n ({'a': 1}, {'a': 2}, [(('a',), 1, 2)]),\n ({'a': 1}, {'a': 2}, [(('a',), 1, 2)]),\n ({'a': 1}, {'b': 1}, [(('a',), 1, MISSING), (('b',), MISSING, 1)]),\n ({'a': {'b': 1}}, {'a': {'b': 2}}, [(('a', 'b'), 1, 2)]),\n ({}, {'b': 1}, [(('b',), MISSING, 1)]),\n ({'a': {'c': 1}}, {'a': {'b': 1}}, [(('a', 'b'), MISSING, 1), (('a', 'c'), 1, MISSING)])\n]\n\n\n@pytest.mark.parametrize(\"v1, v2, expected\", doc_changes)\ndef test_get_doc_changes(v1, v2, expected):\n rval = get_doc_changes(v1, v2)\n assert rval == expected\n\n\ndef test_get_doc_changes_w_baseprefix():\n rval = get_doc_changes({}, None, base_prefix=('a',))\n assert rval == [(('a',), {}, None)]\n\n\n@pytest.mark.parametrize(\"v1, v2, expected\", doc_changes)\ndef test_check_doc_unchanged(v1, v2, expected):\n if expected != []:\n with pytest.raises(DocumentMismatchError):\n check_doc_unchanged(v1, v2, 'name')\n else:\n # No Error Raised\n check_doc_unchanged(v1, v2, 'name')\n\n\ndef test_more_check_doc_unchanged():\n # No exception raised\n check_doc_unchanged({'a': 1}, {'a': 1}, 'Letters')\n\n with pytest.raises(DocumentMismatchError, match='^Letters differs from stored.*a: 1!=2'):\n check_doc_unchanged({'a': 1}, {'a': 2}, 'Letters')\n\n with pytest.raises(DocumentMismatchError, match='^Letters differs from stored.*a.b: 1!=2'):\n check_doc_unchanged({'a': {'b': 1}}, {'a': {'b': 2}}, 'Letters')\n\n\ndef test_without_lineage_sources():\n def mk_sample(v):\n return dict(lineage={'source_datasets': v, 'a': 'a', 'b': 'b'},\n aa='aa',\n bb=dict(bb='bb'))\n\n spec = mk_sample_product('tt')\n\n x = {'a': 1}\n assert without_lineage_sources(x, spec) == x\n assert without_lineage_sources(x, spec, inplace=True) == x\n\n x = {'a': 1, 'lineage': {}}\n assert without_lineage_sources(x, spec) == x\n assert without_lineage_sources(x, spec, inplace=True) == x\n\n x = mk_sample(1)\n assert without_lineage_sources(x, spec) != x\n assert x['lineage']['source_datasets'] == 1\n\n x = mk_sample(2)\n assert without_lineage_sources(x, spec, inplace=True) == x\n assert x['lineage']['source_datasets'] == {}\n\n assert mk_sample(10) != mk_sample({})\n assert without_lineage_sources(mk_sample(10), spec) == mk_sample({})\n assert without_lineage_sources(mk_sample(10), spec, inplace=True) == mk_sample({})\n\n # check behaviour when `sources` is not defined for the type\n no_sources_type = MetadataType({\n 'name': 'eo',\n 'description': 'Sample',\n 'dataset': dict(\n id=['id'],\n label=['ga_label'],\n creation_time=['creation_dt'],\n measurements=['image', 'bands'],\n format=['format', 'name'],\n )\n }, dataset_search_fields={})\n\n assert without_lineage_sources(mk_sample(10), no_sources_type) == mk_sample(10)\n assert without_lineage_sources(mk_sample(10), no_sources_type, inplace=True) == mk_sample(10)\n\n\ndef test_parse_yaml():\n assert parse_yaml('a: 10') == {'a': 10}\n\n\ndef test_read_docs_from_local_path(sample_document_files):\n _test_read_docs_impl(sample_document_files)\n\n\ndef test_read_docs_from_file_uris(sample_document_files):\n uris = [('file://' + doc, ndocs) for doc, ndocs in sample_document_files]\n _test_read_docs_impl(uris)\n\n\ndef test_read_docs_from_s3(sample_document_files, monkeypatch):\n \"\"\"\n Use a mocked S3 bucket to test reading documents from S3\n \"\"\"\n boto3 = pytest.importorskip('boto3')\n moto = pytest.importorskip('moto')\n\n monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'fake')\n monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'fake')\n\n with moto.mock_s3():\n s3 = boto3.resource('s3', region_name='us-east-1')\n bucket = s3.create_bucket(Bucket='mybucket')\n\n mocked_s3_objs = []\n for abs_fname, ndocs in sample_document_files:\n if abs_fname.endswith('gz') or abs_fname.endswith('nc'):\n continue\n\n fname = Path(abs_fname).name\n bucket.upload_file(abs_fname, fname)\n\n mocked_s3_objs.append(('s3://mybucket/' + fname, ndocs))\n\n _test_read_docs_impl(mocked_s3_objs)\n\n with pytest.raises(RuntimeError):\n with _open_from_s3(\"https://not-s3.ga/file.txt\"):\n pass\n\n\ndef test_read_docs_from_http(sample_document_files, httpserver):\n http_docs = []\n for abs_fname, ndocs in sample_document_files:\n if abs_fname.endswith('gz') or abs_fname.endswith('nc'):\n continue\n path = \"/\" + Path(abs_fname).name\n\n httpserver.expect_request(path).respond_with_data(open(abs_fname).read())\n http_docs.append((httpserver.url_for(path), ndocs))\n\n _test_read_docs_impl(http_docs)\n\n\ndef _test_read_docs_impl(sample_documents: Iterable[Tuple[str, int]]):\n # Test case for returning URIs pointing to documents\n for doc_url, num_docs in sample_documents:\n all_docs = list(read_documents(doc_url, uri=True))\n assert len(all_docs) == num_docs\n\n for uri, doc in all_docs:\n assert isinstance(doc, dict)\n assert isinstance(uri, str)\n\n url = as_url(doc_url)\n if num_docs > 1:\n expect_uris = [as_url(url) + '#part={}'.format(i) for i in range(num_docs)]\n else:\n expect_uris = [as_url(url)]\n\n assert [f for f, _ in all_docs] == expect_uris\n\n\ndef test_dataset_maker():\n mk = dataset_maker(0)\n assert mk('aa') == mk('aa')\n\n a = SimpleDocNav(mk('A'))\n b = SimpleDocNav(mk('B'))\n\n assert a.id != b.id\n assert a.doc['creation_dt'] == b.doc['creation_dt']\n assert isinstance(a.id, str)\n assert a.sources == {}\n\n a1, a2 = [dataset_maker(i)('A', product_type='eo') for i in (0, 1)]\n assert a1['id'] != a2['id']\n assert a1['creation_dt'] != a2['creation_dt']\n assert a1['product_type'] == 'eo'\n\n c = SimpleDocNav(mk('C', sources=dict(a=a.doc, b=b.doc)))\n assert c.sources['a'].doc is a.doc\n assert c.sources['b'].doc is b.doc\n\n\ndef test_traverse_datasets():\n \"\"\"\n A -> B\n | |\n | v\n +--> C -> D\n |\n +--> E\n \"\"\"\n\n def node(name, **kwargs):\n return SimpleNamespace(id=name, sources=kwargs)\n\n A, *_ = make_graph_abcde(node)\n\n def visitor(node, name=None, depth=0, out=None):\n s = '{}:{}:{:d}'.format(node.id, name if name else '..', depth)\n out.append(s)\n\n with pytest.raises(ValueError):\n traverse_datasets(A, visitor, mode='not-a-real-mode')\n\n expect_preorder = '''\nA:..:0\nB:ab:1\nC:bc:2\nD:cd:3\nC:ac:1\nD:cd:2\nE:ae:1\n'''.lstrip().rstrip()\n\n expect_postorder = '''\nD:cd:3\nC:bc:2\nB:ab:1\nD:cd:2\nC:ac:1\nE:ae:1\nA:..:0\n'''.lstrip().rstrip()\n\n for mode, expect in zip(['pre-order', 'post-order'],\n [expect_preorder, expect_postorder]):\n out = []\n traverse_datasets(A, visitor, mode=mode, out=out)\n assert '\\n'.join(out) == expect\n\n fv = flatten_datasets(A)\n\n assert len(fv['A']) == 1\n assert len(fv['C']) == 2\n assert len(fv['E']) == 1\n assert set(fv.keys()) == set('ABCDE')\n\n\ndef test_simple_doc_nav():\n \"\"\"\n A -> B\n | |\n | v\n +--> C -> D\n |\n +--> E\n \"\"\"\n\n def node(name, **kwargs):\n return dict(id=name, lineage=dict(source_datasets=kwargs))\n\n A, _, C, _, _ = make_graph_abcde(node)\n rdr = SimpleDocNav(A)\n\n assert rdr.doc == A\n assert rdr.doc_without_lineage_sources == node('A')\n assert isinstance(rdr.sources['ae'], SimpleDocNav)\n assert rdr.sources['ab'].sources['bc'].doc == C\n assert rdr.doc_without_lineage_sources is rdr.doc_without_lineage_sources\n assert rdr.sources is rdr.sources\n assert isinstance(rdr.sources_path, tuple)\n\n def visitor(node, name=None, depth=0, out=None):\n s = '{}:{}:{:d}'.format(node.id, name if name else '..', depth)\n out.append(s)\n\n expect_preorder = '''\nA:..:0\nB:ab:1\nC:bc:2\nD:cd:3\nC:ac:1\nD:cd:2\nE:ae:1\n'''.lstrip().rstrip()\n\n expect_postorder = '''\nD:cd:3\nC:bc:2\nB:ab:1\nD:cd:2\nC:ac:1\nE:ae:1\nA:..:0\n'''.lstrip().rstrip()\n\n for mode, expect in zip(['pre-order', 'post-order'],\n [expect_preorder, expect_postorder]):\n out = []\n traverse_datasets(rdr, visitor, mode=mode, out=out)\n assert '\\n'.join(out) == expect\n\n fv = flatten_datasets(rdr)\n\n assert len(fv['A']) == 1\n assert len(fv['C']) == 2\n assert len(fv['E']) == 1\n assert set(fv.keys()) == set('ABCDE')\n\n fv, dg = flatten_datasets(rdr, with_depth_grouping=True)\n\n assert len(fv['A']) == 1\n assert len(fv['C']) == 2\n assert len(fv['E']) == 1\n assert set(fv.keys()) == set('ABCDE')\n assert isinstance(dg, list)\n assert len(dg) == 4\n assert [len(l) for l in dg] == [1, 3, 2, 1]\n\n def to_set(xx):\n return set(x.id for x in xx)\n\n assert [set(s) for s in ('A',\n 'BCE',\n 'CD',\n 'D')] == [to_set(xx) for xx in dg]\n\n with pytest.raises(ValueError):\n SimpleDocNav([])\n\n\ndef test_dedup():\n ds0 = SimpleDocNav(gen_dataset_test_dag(1, force_tree=True))\n\n # make sure ds0 has duplicate C nodes with equivalent data\n assert ds0.sources['ab'].sources['bc'].doc is not ds0.sources['ac'].doc\n assert ds0.sources['ab'].sources['bc'].doc == ds0.sources['ac'].doc\n\n ds = SimpleDocNav(dedup_lineage(ds0))\n assert ds.sources['ab'].sources['bc'].doc is ds.sources['ac'].doc\n assert ds.sources['ab'].sources['bc'].sources['cd'].doc is ds.sources['ac'].sources['cd'].doc\n\n # again but with raw doc\n ds = SimpleDocNav(dedup_lineage(ds0.doc))\n assert ds.sources['ab'].sources['bc'].doc is ds.sources['ac'].doc\n assert ds.sources['ab'].sources['bc'].sources['cd'].doc is ds.sources['ac'].sources['cd'].doc\n\n # Test that we detect inconsistent metadata for duplicate entries (test 1)\n # test: different values in the same spot\n ds0 = SimpleDocNav(gen_dataset_test_dag(3, force_tree=True))\n ds0.sources['ac'].doc['label'] = 'Modified'\n ds0 = SimpleDocNav(ds0.doc)\n assert ds0.sources['ab'].sources['bc'].doc != ds0.sources['ac'].doc\n\n with pytest.raises(InvalidDocException, match=r'Inconsistent metadata .*'):\n dedup_lineage(ds0)\n\n # Test that we detect inconsistent metadata for duplicate entries (test 2)\n # test: different sources structure\n ds0 = SimpleDocNav(gen_dataset_test_dag(3, force_tree=True))\n ds0.sources['ac'].doc['lineage']['source_datasets']['extra'] = ds0.sources['ae'].doc.copy()\n assert ds0.sources['ab'].sources['bc'].doc != ds0.sources['ac'].doc\n\n ds0 = SimpleDocNav(ds0.doc)\n\n with pytest.raises(InvalidDocException, match=r'Inconsistent lineage .*'):\n dedup_lineage(ds0)\n\n # Test that we detect inconsistent lineage subtrees for duplicate entries\n\n # Subtest 1: different set of keys\n ds0 = SimpleDocNav(gen_dataset_test_dag(7, force_tree=True))\n srcs = toolz.get_in(ds0.sources_path, ds0.sources['ac'].doc)\n\n assert 'cd' in srcs\n srcs['cd'] = {}\n ds0 = SimpleDocNav(ds0.doc)\n\n with pytest.raises(InvalidDocException, match=r'Inconsistent lineage .*'):\n dedup_lineage(ds0)\n\n # Subtest 2: different values for \"child\" nodes\n ds0 = SimpleDocNav(gen_dataset_test_dag(7, force_tree=True))\n srcs = toolz.get_in(ds0.sources_path, ds0.sources['ac'].doc)\n\n assert 'cd' in srcs\n srcs['cd']['id'] = '7fe57724-ed44-4beb-a3ab-c275339049be'\n ds0 = SimpleDocNav(ds0.doc)\n\n with pytest.raises(InvalidDocException, match=r'Inconsistent lineage .*'):\n dedup_lineage(ds0)\n\n # Subtest 3: different name for child\n ds0 = SimpleDocNav(gen_dataset_test_dag(7, force_tree=True))\n srcs = toolz.get_in(ds0.sources_path, ds0.sources['ac'].doc)\n\n assert 'cd' in srcs\n srcs['CD'] = srcs['cd']\n del srcs['cd']\n ds0 = SimpleDocNav(ds0.doc)\n\n with pytest.raises(InvalidDocException, match=r'Inconsistent lineage .*'):\n dedup_lineage(ds0)\n\n\ndef test_remap_lineage_doc():\n def mk_node(ds, sources):\n return dict(id=ds.id, **sources)\n\n ds = SimpleDocNav(gen_dataset_test_dag(3, force_tree=True))\n xx = remap_lineage_doc(ds, mk_node)\n assert xx['id'] == ds.id\n assert xx['ac']['id'] == ds.sources['ac'].id\n\n xx = remap_lineage_doc(ds.doc, mk_node)\n assert xx['id'] == ds.id\n assert xx['ac']['id'] == ds.sources['ac'].id\n\n\ndef test_merge():\n from datacube.model.utils import merge\n assert merge(dict(a=1), dict(b=2)) == dict(a=1, b=2)\n assert merge(dict(a=1, b=2), dict(b=2)) == dict(a=1, b=2)\n\n with pytest.raises(Exception):\n merge(dict(a=1, b=2), dict(b=3))\n\n\n@pytest.mark.xfail(True, reason=\"Merging dictionaries with content of NaN doesn't work currently\")\ndef test_merge_with_nan():\n from datacube.model.utils import merge\n\n _nan = float(\"nan\")\n assert _nan != _nan\n xx = merge(dict(a=_nan), dict(a=_nan)) # <- fails here because of simple equality check\n assert xx['a'] != xx['a']\n\n\n@pytest.fixture\ndef sample_document_files(data_folder):\n files = [('multi_doc.yml', 3),\n ('multi_doc.yml.gz', 3),\n ('multi_doc.nc', 3),\n ('single_doc.yaml', 1),\n ('sample.json', 1)]\n\n files = [(str(os.path.join(data_folder, f)), num_docs)\n for f, num_docs in files]\n\n return files\n\n\ndef test_jsonify():\n from datetime import datetime\n from uuid import UUID\n from decimal import Decimal\n\n assert sorted(jsonify_document({'a': (1.0, 2.0, 3.0),\n 'b': float(\"inf\"),\n 'c': datetime(2016, 3, 11),\n 'd': np.dtype('int16'),\n }).items()) == [\n ('a', (1.0, 2.0, 3.0)),\n ('b', 'Infinity'),\n ('c', '2016-03-11T00:00:00'),\n ('d', 'int16'),\n ]\n\n # Converts keys to strings:\n assert sorted(jsonify_document({1: 'a', '2': Decimal('2')}).items()) == [\n ('1', 'a'), ('2', '2')]\n\n assert jsonify_document({'k': UUID(\"1f231570-e777-11e6-820f-185e0f80a5c0\")}) == {\n 'k': '1f231570-e777-11e6-820f-185e0f80a5c0'}\n\n\ndef test_netcdf_strings():\n assert netcdf_extract_string(np.asarray([b'a', b'b'])) == \"ab\"\n txt = \"some string\"\n assert netcdf_extract_string(txt) is txt\n\n\ndef test_doc_reader():\n d = DocReader({'lat': ['extent', 'lat']}, {}, doc={'extent': {'lat': 4}})\n assert hasattr(d, 'lat')\n assert d.lat == 4\n assert d._doc == {'extent': {'lat': 4}}\n\n d.lat = 5\n assert d.lat == 5\n assert d._doc == {'extent': {'lat': 5}}\n\n assert d.search_fields == {}\n\n assert not hasattr(d, 'no_such')\n with pytest.raises(AttributeError):\n d.no_such\n\n with pytest.raises(AttributeError):\n d.no_such = 0\n\n assert dir(d) == ['lat']\n\n d = DocReader({'platform': ['platform', 'code']}, {}, doc={})\n assert d.platform is None\n\n\ndef test_is_supported_doc_type():\n assert is_supported_document_type(Path('/tmp/something.yaml'))\n assert is_supported_document_type(Path('/tmp/something.YML'))\n assert is_supported_document_type(Path('/tmp/something.yaml.gz'))\n assert not is_supported_document_type(Path('/tmp/something.tif'))\n assert not is_supported_document_type(Path('/tmp/something.tif.gz'))\n\n\ndef test_doc_offset():\n assert get_doc_offset(['a'], {'a': 4}) == 4\n assert get_doc_offset(['a', 'b'], {'a': {'b': 4}}) == 4\n with pytest.raises(KeyError):\n get_doc_offset(['a'], {})\n\n assert get_doc_offset_safe(['a'], {'a': 4}) == 4\n assert get_doc_offset_safe(['a', 'b'], {'a': {'b': 4}}) == 4\n assert get_doc_offset_safe(['a'], {}) is None\n assert get_doc_offset_safe(['a', 'b', 'c'], {'a': {'b': {}}}, 10) == 10\n assert get_doc_offset_safe(['a', 'b', 'c'], {'a': {'b': []}}, 11) == 11\n\n doc = {'a': 4}\n _set_doc_offset(['a'], doc, 5)\n assert doc == {'a': 5}\n doc = {'a': {'b': 4}}\n _set_doc_offset(['a', 'b'], doc, 'c')\n assert doc == {'a': {'b': 'c'}}\n\n\ndef test_transform_object_tree():\n def add_one(a):\n return a + 1\n assert transform_object_tree(add_one, [1, 2, 3]) == [2, 3, 4]\n assert transform_object_tree(add_one, {'a': 1, 'b': 2, 'c': 3}) == {'a': 2, 'b': 3, 'c': 4}\n assert transform_object_tree(add_one, {'a': 1, 'b': (2, 3), 'c': [4, 5]}) == {'a': 2, 'b': (3, 4), 'c': [5, 6]}\n assert transform_object_tree(add_one, {1: 1, '2': 2, 3.0: 3}, key_transform=float) == {1.0: 2, 2.0: 3, 3.0: 4}\n # Order must be maintained\n assert transform_object_tree(add_one, OrderedDict([('z', 1), ('w', 2), ('y', 3), ('s', 7)])) \\\n == OrderedDict([('z', 2), ('w', 3), ('y', 4), ('s', 8)])\n",
"\"\"\"\nThis app creates time series movies\n\n\n\"\"\"\n\n\nimport click\n\nimport fiona\nimport xarray as xr\nimport numpy as np\nimport rasterio\nimport subprocess\nfrom glob import glob\nfrom dateutil.parser import parse\nfrom datetime import datetime, timedelta, time, date\n\nfrom datacube.utils.masking import make_mask\nfrom datacube.ui import click as ui\nfrom datacube import Datacube\nfrom datacube.utils.dates import date_sequence\nfrom datacube.helpers import ga_pq_fuser\n\nDEFAULT_MEASUREMENTS = ['red', 'green', 'blue']\n\nDEFAULT_PRODUCTS = ['ls5_nbar_albers', 'ls7_nbar_albers', 'ls8_nbar_albers']\nDEFAULT_CRS = 'EPSG:3577'\nFFMPEG_PATH = 'ffmpeg'\nVALID_BIT = 8 # GA Landsat PQ Contiguity Bit\n\nSUBTITLE_FORMAT = '%d %B %Y'\nSRT_TIMEFMT = '%H:%M:%S,%f'\nSRT_FORMAT = \"\"\"\n{i}\n{start} --> {end}\n{txt}\"\"\"\n\n\ndef to_datetime(ctx, param, value):\n if value:\n return parse(value)\n else:\n return None\n\n\n# pylint: disable=too-many-arguments, too-many-locals\n@click.command(name='moviemaker')\n@click.option('--load-bounds-from', type=click.Path(exists=True, readable=True, dir_okay=False),\n help='Shapefile to calculate boundary coordinates from.')\n@click.option('--start-date', callback=to_datetime, help='YYYY-MM-DD')\n@click.option('--end-date', callback=to_datetime, help='YYYY-MM-DD')\n@click.option('--stats-duration', default='1y', help='eg. 1y, 3m')\n@click.option('--step-size', default='1y', help='eg. 1y, 3m')\n@click.option('--bounds', nargs=4, help='LEFT, BOTTOM, RIGHT, TOP')\n@click.option('--base-output-name', default='output', help=\"Base name to use for images and video. Eg. \"\n \"--base-output-name stromlo will produce \"\n \"stromlo_001_*.png and stromlo.mp4\")\n@click.option('--time-incr', default=2, help='Time to display each image, in seconds')\n@click.option('--product', multiple=True, default=DEFAULT_PRODUCTS)\n@click.option('--measurement', '-m', multiple=True, default=DEFAULT_MEASUREMENTS)\n@click.option('--ffmpeg-path', default=FFMPEG_PATH, help='Path to ffmpeg executable')\n@click.option('--crs', default=DEFAULT_CRS, help='Used if specifying --bounds. eg. EPSG:3577. ')\n@ui.global_cli_options\n@ui.executor_cli_options\ndef main(bounds, base_output_name, load_bounds_from, start_date, end_date, product, measurement, executor,\n step_size, stats_duration, time_incr, ffmpeg_path, crs):\n \"\"\"\n Create an mp4 movie file based on datacube data\n\n Use only clear pixels, and mosaic over time to produce full frames.\n\n Can combine products, specify multiple --product\n\n \"\"\"\n if load_bounds_from:\n crs, (left, bottom, right, top) = bounds_from_file(load_bounds_from)\n elif bounds:\n left, bottom, right, top = bounds\n else:\n raise click.UsageError('Must specify one of --load-bounds-from or --bounds')\n\n tasks = []\n for filenum, date_range in enumerate(date_sequence(start_date, end_date, stats_duration, step_size), start=1):\n filename = \"{}_{:03d}_{:%Y-%m-%d}.png\".format(base_output_name, filenum, start_date)\n task = dict(filename=filename, products=product, time=date_range, x=(left, right),\n y=(top, bottom), crs=crs, measurements=measurement)\n tasks.append(task)\n\n results = []\n for task in tasks:\n result_future = executor.submit(write_mosaic_to_file, **task)\n results.append(result_future)\n\n filenames = []\n for result in executor.as_completed(results):\n filenames.append(executor.result(result))\n\n # Write subtitle file\n subtitle_filename = \"{}.srt\".format(base_output_name)\n write_subtitle_file(tasks, subtitle_filename=subtitle_filename, display_format=SUBTITLE_FORMAT,\n time_incr=time_incr)\n\n # Write video file\n filenames_pattern = '%s*.png' % base_output_name\n video_filename = \"{}.mp4\".format(base_output_name)\n write_video_file(filenames_pattern, video_filename, subtitle_filename, time_incr=time_incr, ffmpeg_path=ffmpeg_path)\n\n click.echo(\"Finished!\")\n\n\ndef bounds_from_file(filename):\n with fiona.open(filename) as c:\n return c.crs_wkt, c.bounds\n\n\ndef write_mosaic_to_file(filename, **expression):\n image_data = compute_mosaic(**expression)\n write_xarray_to_image(filename, image_data)\n click.echo('Wrote {}.'.format(filename))\n return filename\n\n\ndef compute_mosaic(products, measurements, **parsed_expressions):\n with Datacube() as dc:\n acq_range = parsed_expressions['time']\n click.echo(\"Processing time range {}\".format(acq_range))\n datasets = []\n\n for prodname in products:\n dataset = dc.load(product=prodname,\n measurements=measurements,\n group_by='solar_day',\n **parsed_expressions)\n if len(dataset) == 0:\n continue\n else:\n click.echo(\"Found {} time slices of {} during {}.\".format(len(dataset['time']), prodname, acq_range))\n\n pq = dc.load(product=prodname.replace('nbar', 'pq'),\n group_by='solar_day',\n fuse_func=ga_pq_fuser,\n **parsed_expressions)\n\n if len(pq) == 0:\n click.echo('No PQ found, skipping')\n continue\n\n crs = dataset.attrs['crs']\n dataset = dataset.where(dataset != -999)\n dataset.attrs['product'] = prodname\n dataset.attrs['crs'] = crs\n\n cloud_free = make_mask(pq.pixelquality, ga_good_pixel=True)\n dataset = dataset.where(cloud_free)\n\n if len(dataset) == 0:\n click.echo(\"Nothing left after PQ masking\")\n continue\n\n datasets.append(dataset)\n\n dataset = xr.concat(datasets, dim='time')\n\n return dataset.median(dim='time')\n\n\ndef write_xarray_to_image(filename, dataset, dtype='uint16'):\n img = np.stack([dataset[colour].data for colour in DEFAULT_MEASUREMENTS])\n\n maxvalue = 3000\n nmask = np.isnan(img).any(axis=0)\n\n mask = (img > maxvalue).any(axis=0)\n img /= maxvalue\n img[..., mask] = 1.0\n img[..., nmask] = 1.0\n\n img *= 2 ** 16\n\n profile = {\n 'driver': 'PNG',\n 'height': len(dataset['y']),\n 'width': len(dataset['x']),\n 'count': 3,\n 'dtype': dtype\n }\n click.echo(\"Writing file: {}\".format(filename))\n with rasterio.open(filename, 'w', **profile) as dst:\n dst.write(img.astype(dtype))\n\n\ndef write_subtitle_file(tasks, subtitle_filename, display_format, time_incr):\n if time_incr < 1.0:\n incr = timedelta(microseconds=time_incr * 1000000)\n else:\n incr = timedelta(seconds=time_incr)\n\n with open(subtitle_filename, mode='w') as output:\n start_time_vid = time(0, 0, 0, 0)\n for i, task in enumerate(tasks):\n end_time_vid = (datetime.combine(date.today(), start_time_vid) + incr).time()\n\n start_time_actual, _ = task['time']\n\n txt = start_time_actual.strftime(display_format)\n\n output.write(\n SRT_FORMAT.format(i=i, txt=txt,\n start=start_time_vid.strftime(SRT_TIMEFMT)[:-3],\n end=end_time_vid.strftime(SRT_TIMEFMT)[:-3]))\n start_time_vid = end_time_vid\n\n\ndef write_video_file(filename_pattern, video_filename, subtitle_filename, time_incr, ffmpeg_path):\n resize_images(filename_pattern)\n\n # Run ffmpeg\n movie_cmd = [ffmpeg_path, '-framerate', '1/%s' % time_incr, '-pattern_type', 'glob',\n '-i', filename_pattern, '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-r', '30',\n '-vf', \"subtitles='%s':force_style='FontName=DejaVu Sans'\" % subtitle_filename, video_filename]\n\n subprocess.check_call(movie_cmd)\n\n\ndef resize_images(filename_pattern):\n \"\"\"\n Resize images files in place to a safe size for movie generation\n\n - Maximum height of 1080\n - Ensure dimensions are divisible by 2.\n\n Uses the ImageMagick mogrify command.\n \"\"\"\n sample_file = glob(filename_pattern)[0]\n width, height = subprocess.check_output(['identify', sample_file]).decode('ascii').split()[2].split('x')\n x, y = int(width), int(height)\n if y > 1080:\n scale = y / 1080\n y = 1080\n x = int(x / scale)\n x = x + 1 if x % 2 == 1 else x\n y = y + 1 if y % 2 == 1 else y\n newdims = '%sx%s!' % (x, y)\n resize_cmd = ['mogrify', '-geometry', newdims, filename_pattern]\n subprocess.check_call(resize_cmd)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.asarray",
"numpy.dtype"
],
[
"numpy.isnan",
"numpy.stack"
]
] |
emilyso-99/TCRP_data
|
[
"bd1a82a7affb83d35d7e800e19728f0faa73a4d2"
] |
[
"code/utils /data_loading.py"
] |
[
"import numpy as np\nimport random\nimport copy\nimport torch\nimport torch.utils.data as du\n\n'''\ndef get_unseen_data_loader( feature, label, cat_label, K ):\n\n\tnum_sample = feature.shape[0]\n\tindex_list = np.random.permutation( num_sample )\n\t\n\ttrain_index_list = index_list[0:K]\n\ttest_index_list = index_list[K:]\n\n\ttrain_feature = torch.FloatTensor( feature[train_index_list,:] )\n\ttrain_label = torch.FloatTensor( label[train_index_list,:] )\n\n\ttest_feature = torch.FloatTensor( feature[test_index_list,:] )\n\ttest_label = torch.FloatTensor( label[test_index_list,:] )\n\tcat_test_label = torch.FloatTensor( cat_label[test_index_list,:] )\n\n\ttrain_dataset = du.TensorDataset( train_feature.cuda(), train_label.cuda() )\n\ttrain_loader = du.DataLoader( train_dataset, batch_size=train_feature.size(0))\n\n\ttest_dataset = du.TensorDataset( test_feature.cuda(), test_label.cuda(), cat_test_label.cuda() )\n\ttest_loader = du.DataLoader( test_dataset, batch_size=test_feature.size(0))\n\n\treturn train_loader, test_loader\n'''\ndef get_observed_data_loader(feature, label, tissue_index_list, K, batch_size, tissue_num):\n\n\tindex_list = copy.deepcopy(tissue_index_list)\n\ttrain_sampled_index_list, test_sampled_index_list = [], []\n\n\t#print 'tissue index list..', len(tissue_index_list)\n\n\trandom_tissue_index = np.random.permutation( len(tissue_index_list) ) \n\t\n\ttrain_tissue_index_list = random_tissue_index[0:tissue_num]\n\t#train_tissue_index_list = random_tissue_index[0:-1]\n\n\ttest_tissue_index_list = random_tissue_index[tissue_num:tissue_num*2]\n\t#test_tissue_index_list = [random_tissue_index[-1]]\n\n\t#print train_tissue_index_list\n\t#print test_tissue_index_list\n\t\n\tfor tissue_index in train_tissue_index_list:\n\n\t\tsub_list = index_list[tissue_index]\n\t\trandom.shuffle(sub_list)\n\n\t\ttrain_sampled_index_list +=\tsub_list[0:K]\n\n\tfor tissue_index in test_tissue_index_list:\n\n\t\tsub_list = index_list[tissue_index]\n\t\trandom.shuffle(sub_list)\n\n\t\ttest_sampled_index_list += sub_list[0:K]\n\t\t\n\trandom.shuffle( train_sampled_index_list )\n\trandom.shuffle( test_sampled_index_list )\n\n\t#print '===', train_sampled_index_list\n\ttrain_feature = torch.FloatTensor( feature[train_sampled_index_list,:] )\n\ttrain_label = torch.FloatTensor( label[train_sampled_index_list,:] )\n\n\tdataset = du.TensorDataset(train_feature, train_label)\n\tloader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )\n\n\ttrain_data_list = []\n\tfor batch_feature, batch_label in loader:\n\t\tif batch_feature.size()[0] == 1:\n\t\t\tcontinue\n\t\ttrain_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )\n\n\t#print '===', test_sampled_index_list,feature.shape\n\ttest_feature = torch.FloatTensor( feature[test_sampled_index_list,:] )\n\ttest_label = torch.FloatTensor( label[test_sampled_index_list,:] )\n\n\tdataset = du.TensorDataset( test_feature, test_label )\n\tloader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )\n\n\ttest_data_list = []\n\tfor batch_feature, batch_label in loader:\n\t\tif batch_feature.size()[0] == 1:\n\t\t\tcontinue\n\t\ttest_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )\n\n\treturn train_data_list, test_data_list\n\ndef get_observed_data_loader2(feature, label, tissue_index_list, K, batch_size):\n\t\n\tindex_list = copy.deepcopy(tissue_index_list)\n\ttrain_sampled_index_list, test_sampled_index_list = [], []\n\n\tfor index, sub_list in enumerate(index_list):\n\n\t\trandom.shuffle(sub_list)\n\n\t\tif 2*K < len(sub_list):\n\t\t\ttrain_sampled_index_list += sub_list[0:K]\n\t\t\ttest_sampled_index_list += sub_list[K:2*K]\n\n\t\telif K < len(sub_list):\n\t\t\ttrain_sampled_index_list += sub_list[0:K]\n\t\t\trandom.shuffle(sub_list)\n\t\t\ttest_sampled_index_list += sub_list[0:K]\n\n\t\telse:\n\t\t\ttrain_sampled_index_list += sub_list\n\t\t\ttest_sampled_index_list += sub_list\n\n\trandom.shuffle( train_sampled_index_list )\n\trandom.shuffle( test_sampled_index_list )\n\t\n\ttrain_feature = torch.FloatTensor( feature[train_sampled_index_list,:] )\n\ttrain_label = torch.FloatTensor( label[train_sampled_index_list,:] )\n\t\n\tdataset = du.TensorDataset(train_feature, train_label)\t\n\tloader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )\n\t\n\ttrain_data_list = []\n\tfor batch_feature, batch_label in loader:\n\t\ttrain_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )\n\n\ttest_feature = torch.FloatTensor( feature[test_sampled_index_list,:] )\n\ttest_label = torch.FloatTensor( label[test_sampled_index_list,:] )\n\t\n\tdataset = du.TensorDataset( test_feature, test_label )\n\tloader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )\n\n\ttest_data_list = []\n\tfor batch_feature, batch_label in loader:\n\t\ttest_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )\n\n\treturn train_data_list, test_data_list\n\ndef load_unseen_data_loader(train_index_file, test_index_file, feature, label, K, trial, batch_size=1):\n\n\ttrain_index_list = np.load( train_index_file )\n\ttest_index_list = np.load( test_index_file )\n\n\ttrain_feature = torch.FloatTensor( feature[train_index_list,:] )\n\ttrain_label = torch.FloatTensor( label[train_index_list,] )\n\n\ttest_feature = torch.FloatTensor( feature[test_index_list,:] )\n\ttest_label = torch.FloatTensor( label[test_index_list,] )\n\n\ttrain_dataset = du.TensorDataset( train_feature, train_label )\n\ttest_dataset = du.TensorDataset( test_feature, test_label )\n\n\ttrain_loader = du.DataLoader(train_dataset, batch_size=1)\n\ttrain_data_list = []\n\tfor batch_feature, batch_label in train_loader:\n\t\ttrain_data_list.append((batch_feature.cuda(), batch_label.cuda()))\n\n\ttest_loader = du.DataLoader(test_dataset, batch_size=batch_size)\n\ttest_data_list = []\n\tfor batch_feature, batch_label in test_loader:\n\t\ttest_data_list.append((batch_feature.cuda(), batch_label.cuda()))\n\n\treturn train_data_list, test_data_list\n\ndef get_unseen_data_loader(feature, label, K, batch_size=1):\n\n\tindex_list = np.random.permutation(feature.shape[0])\n\n\ttrain_index_list = index_list[0:K]\n\ttest_index_list = index_list[K:]\n\n\ttrain_feature = torch.FloatTensor( feature[train_index_list,:] )\n\ttrain_label = torch.FloatTensor( label[train_index_list,] )\n\n\ttest_feature = torch.FloatTensor( feature[test_index_list,:] )\n\ttest_label = torch.FloatTensor( label[test_index_list,] )\n\n\ttrain_dataset = du.TensorDataset( train_feature, train_label )\n\ttest_dataset = du.TensorDataset( test_feature, test_label )\n\n\ttrain_loader = du.DataLoader(train_dataset, batch_size=batch_size)\n\ttrain_data_list = []\n\tfor batch_feature, batch_label in train_loader:\n\t\ttrain_data_list.append((batch_feature.cuda(), batch_label.cuda()))\n\t\n\ttest_loader = du.DataLoader(test_dataset, batch_size=batch_size)\n\ttest_data_list = []\n\tfor batch_feature, batch_label in test_loader:\n\t\ttest_data_list.append((batch_feature.cuda(), batch_label.cuda()))\n\n\treturn train_data_list, test_data_list\n"
] |
[
[
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"numpy.random.permutation",
"torch.FloatTensor",
"numpy.load"
]
] |
ricagj/train_your_own_game_AI
|
[
"37cff4733536e882c8b9f35ccf4a1eb5f550132c"
] |
[
"game_player/grab_screen.py"
] |
[
"import cv2\nimport numpy as np\nimport win32gui, win32ui, win32con, win32api\n\n# ---*---\n\ndef grab_screen(x, x_w, y, y_h):\n\n # ่ทๅๆก้ข\n hwin = win32gui.GetDesktopWindow()\n\n w = x_w - x\n h = y_h - y\n\n # ่ฟๅๅฅๆ็ชๅฃ็่ฎพๅค็ฏๅขใ่ฆ็ๆดไธช็ชๅฃ๏ผๅ
ๆฌ้ๅฎขๆทๅบ๏ผๆ ้ขๆ ๏ผ่ๅ๏ผ่พนๆก\n hwindc = win32gui.GetWindowDC(hwin)\n\n # ๅๅปบ่ฎพๅคๆ่ฟฐ่กจ\n srcdc = win32ui.CreateDCFromHandle(hwindc)\n\n # ๅๅปบไธไธชๅ
ๅญ่ฎพๅคๆ่ฟฐ่กจ\n memdc = srcdc.CreateCompatibleDC()\n\n # ๅๅปบไฝๅพๅฏน่ฑก\n bmp = win32ui.CreateBitmap()\n bmp.CreateCompatibleBitmap(srcdc, w, h)\n memdc.SelectObject(bmp)\n \n # ๆชๅพ่ณๅ
ๅญ่ฎพๅคๆ่ฟฐ่กจ\n memdc.BitBlt((0, 0), (w, h), srcdc, (x, y), win32con.SRCCOPY)\n\n signedIntsArray = bmp.GetBitmapBits(True)\n img = np.fromstring(signedIntsArray, dtype='uint8')\n img.shape = (h, w, 4)\n\n # ๅ
ๅญ้ๆพ\n srcdc.DeleteDC()\n memdc.DeleteDC()\n win32gui.ReleaseDC(hwin, hwindc)\n win32gui.DeleteObject(bmp.GetHandle())\n\n return cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\n\n# ---------- ๆณจๆ๏ผไปฅไธ้่ฆ่ฎพ็ฝฎ ----------\n\nGAME_WIDTH = 0 # ๆธธๆ็ชๅฃๅฎฝๅบฆ\nGAME_HEIGHT = 0 # ๆธธๆ็ชๅฃ้ซๅบฆ\nwhite_border = 0 # ๆธธๆ่พนๆก\n\n# ---------- ๆณจๆ๏ผไปฅไธ้่ฆ่ฎพ็ฝฎ ----------\n\ndef get_game_screen():\n return grab_screen(\n x = 0,\n x_w = GAME_WIDTH,\n y = white_border,\n y_h = white_border+GAME_HEIGHT)\n\n# ๅ
จๅฑ\nFULL_WIDTH = 1920\nFULL_HEIGHT = 1080\n\ndef get_full_screen():\n return grab_screen(\n x = 0,\n x_w = FULL_WIDTH,\n y = 0,\n y_h = FULL_HEIGHT)"
] |
[
[
"numpy.fromstring"
]
] |
pcorless/inference-sdk
|
[
"4a69d91e9f544bcf8d6e6cccffd33918bdbab11f"
] |
[
"inference-test-tool/test_inference_mask.py"
] |
[
"\"\"\"\nThis script lets you test if the inference outputs will be processed correctly by the Arterys server.\n\"\"\"\n\nimport os\nimport argparse\nimport random\nfrom io import BytesIO\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pydicom\nfrom utils import load_image_data, sort_images, create_folder, get_pixels\nimport cv2\n\ncolors = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 1, 0],\n [1, 0, 1],\n [0, 1, 1]]\n\ndef get_colors(index, max_value):\n if index < len(colors):\n arr = np.array(colors[index]) * max_value\n else:\n rng = np.random.RandomState(index)\n arr = rng.randint(0, max_value, 3)\n \n return np.append(arr, [max_value])\n\ndef apply_lut(mask, colormap):\n # colormap is an array of 1024 uint8\n if colormap is None:\n cmap = cv2.applyColorMap(mask, cv2.COLORMAP_JET)\n cmap = np.reshape(cmap, (-1, 3))\n return np.hstack((cmap, np.reshape(np.full(cmap.shape[0], 255, dtype=np.uint8), (-1, 1))))\n\n lut = np.reshape(colormap, [-1, 4])\n return np.dstack([cv2.LUT(mask, lut[:, i]) for i in range(4)])\n\ndef create_lut_from_anchorpoints(anchorpoints):\n assert anchorpoints[0]['threshold'] == 0.0, \"The first anchorpoint must have a threshold of 0.0\"\n assert anchorpoints[-1]['threshold'] == 1.0, \"The last anchorpoint must have a threshold of 1.0\"\n\n lut = []\n for c in range(4):\n channel_arr = []\n for p in range(len(anchorpoints) - 1): \n thr = (anchorpoints[p+1]['threshold'] - anchorpoints[p]['threshold']) * 256\n \n # Make sure we get 256 values and there is no rounding issue\n if p == len(anchorpoints) - 2: \n thr = 256 - len(channel_arr)\n\n values = np.linspace(anchorpoints[p]['color'][c], \n anchorpoints[p+1]['color'][c], int(thr), dtype=np.uint8)\n channel_arr.extend(values)\n lut.append(channel_arr)\n lut = np.transpose(np.array(lut)) \n return lut\n\ndef _get_images_and_masks(dicom_images, inference_results):\n if isinstance(dicom_images, str):\n images = load_image_data(dicom_images)\n images = sort_images(images)\n else:\n images = dicom_images\n\n if isinstance(inference_results, str):\n masks = [np.fromfile(inference_results, dtype=np.uint8)]\n else:\n masks = inference_results\n return (images, masks)\n\ndef generate_images_with_masks(dicom_images, inference_results, response_json, output_folder):\n \"\"\" This function will save images to disk to preview how a mask looks on the input images.\n It saves one image for each input DICOM file. All masks in `inference_results` will be applied to the \n whole 3D volume of DICOM images. Each mask will show in a different color.\n \n - dicom_images: Array of DCM_Image or path to a folder with images\n - inference_results: Array with mask buffers (one for each image), or path to folder with a numpy file containing one mask.\n - response_json: The JSON response from the inference server\n - output_folder: Where the output images will be saved \n \"\"\" \n images, masks = _get_images_and_masks(dicom_images, inference_results)\n create_folder(output_folder)\n\n # Filter out secondary capture outputs\n all_mask_parts = [p for p in response_json[\"parts\"] if p['binary_type'] != 'dicom_secondary_capture']\n secondary_capture_indexes = [i for (i,p) in enumerate(response_json[\"parts\"]) if p['binary_type'] == 'dicom_secondary_capture']\n masks = np.array(masks)\n secondary_capture_indexes_bool = np.in1d(range(masks.shape[0]), secondary_capture_indexes)\n secondary_captures = masks[secondary_capture_indexes_bool]\n non_sc_masks = masks[~secondary_capture_indexes_bool]\n\n # Create DICOM files for secondary capture outputs\n for index, sc in enumerate(secondary_captures):\n dcm = pydicom.read_file(BytesIO(sc.tobytes()))\n file_path = os.path.join(output_folder, 'sc_' + str(index) + '.dcm')\n pydicom.dcmwrite(file_path, dcm)\n\n offset = 0\n for index, image in enumerate(images):\n dcm = pydicom.dcmread(image.path)\n pixels = get_pixels(dcm)\n\n # Reshape and add alpha\n pixels = np.reshape(pixels, (-1, 3))\n pixels = np.hstack((pixels, np.reshape(np.full(pixels.shape[0], 255, dtype=np.uint8), (-1, 1))))\n\n for mask_index, (mask, json_part) in enumerate(zip(non_sc_masks, all_mask_parts)):\n # If the input holds multiple timepoints but the result only includes 1 timepoint\n if image.timepoint is not None and image.timepoint > 0 and json_part['binary_data_shape']['timepoints'] == 1:\n continue\n # get mask for this image\n image_mask = mask[offset : offset + dcm.Rows * dcm.Columns] \n pixels = _draw_mask_on_image(pixels, image_mask, json_part, response_json, mask_index, mask_index) \n\n offset += dcm.Rows * dcm.Columns\n\n # write image to output folder\n output_filename = os.path.join(output_folder, str(index) + '_' + os.path.basename(os.path.normpath(image.path)))\n output_filename += '.png'\n\n if pixels.shape[1] != 4:\n pixels = np.hstack((pixels, np.reshape(np.full(pixels.shape[0], 255, dtype=np.uint8), (-1, 1))))\n pixels = np.reshape(pixels, (dcm.Rows, dcm.Columns, 4))\n plt.imsave(output_filename, pixels)\n\n for mask_index, mask in enumerate(non_sc_masks):\n assert mask.shape[0] <= offset, \"Mask {} does not have the same size ({}) as the volume ({})\".format(mask_index, mask.shape[0], offset)\n\ndef generate_images_for_single_image_masks(dicom_images, inference_results, response_json, output_folder):\n \"\"\" This function will save images to disk to preview how a mask looks on the input images.\n It saves one image for each input DICOM file with the corresponding `inference_results` mask\n applied as overlay.\n \n - dicom_images: Array of DCM_Image or path to a folder with images\n - inference_results: Array with mask buffers (one for each image)\n - response_json: The JSON response from the inference server\n - output_folder: Where the output images will be saved \n\n The difference with `generate_images_with_masks` is that `generate_images_with_masks` applies each mask to the whole\n volume while this functions applies each mask to one image.\n \"\"\"\n images, masks = _get_images_and_masks(dicom_images, inference_results)\n create_folder(output_folder)\n \n for index, (image, mask, json_part) in enumerate(zip(images, masks, response_json[\"parts\"])):\n dcm = pydicom.dcmread(image.path)\n pixels = get_pixels(dcm)\n\n # Reshape and add alpha\n pixels = np.reshape(pixels, (-1, 3))\n pixels = np.hstack((pixels, np.reshape(np.full(pixels.shape[0], 255, dtype=np.uint8), (-1, 1))))\n \n # get mask for this image \n pixels = _draw_mask_on_image(pixels, mask, json_part, response_json, index, 0)\n\n # write image to output folder\n output_filename = os.path.join(output_folder, str(index) + '_' + os.path.basename(os.path.normpath(image.path)))\n output_filename += '.png'\n \n pixels = np.reshape(pixels, (dcm.Rows, dcm.Columns, 4))\n plt.imsave(output_filename, pixels)\n\ndef _draw_mask_on_image(pixels, image_mask, json_part, response_json, mask_index, label):\n mask_alpha = 0.5\n max_value = np.iinfo(pixels.dtype).max\n\n assert image_mask.shape[0] == pixels.shape[0], \\\n \"The size of mask {} ({}) does not match the size of the image ({})\".format(mask_index, image_mask.shape[0], pixels.shape[0])\n\n # apply mask\n if json_part['binary_type'] == 'probability_mask': \n threshold = (json_part['probability_threshold'] * 255) if 'probability_threshold' in json_part else 128\n pixels[image_mask > threshold] = pixels[image_mask > threshold] * (1 - mask_alpha) + \\\n (mask_alpha * np.array(get_colors(label, max_value)).astype(np.float)).astype(np.uint8)\n elif json_part['binary_type'] == 'numeric_label_mask':\n for n in range(1, max_value):\n pixels[image_mask == n] = pixels[image_mask == n] * (1 - mask_alpha) + \\\n (mask_alpha * np.array(get_colors(n, max_value)).astype(np.float)).astype(np.uint8)\n elif json_part['binary_type'] == 'heatmap':\n if 'palette' in json_part and json_part['palette'] in response_json['palettes']:\n palette = response_json['palettes'][json_part['palette']]\n if palette['type'] == 'anchorpoints': \n lut = create_lut_from_anchorpoints(palette['data'])\n heatmap = apply_lut(image_mask, lut)\n else:\n heatmap = apply_lut(image_mask, palette['data'])\n else:\n heatmap = apply_lut(image_mask, None)\n heatmap = np.reshape(heatmap, [-1, 4])\n \n pixels = (pixels * (1 - mask_alpha) + np.reshape(mask_alpha * (heatmap[:, 3] / 255.0), (-1, 1)) * heatmap).astype(np.uint8)\n pixels[:, 3] = 255\n else:\n # Ignoring others like 'dicom_secondary_capture'\n pass\n \n return pixels\n "
] |
[
[
"matplotlib.pyplot.imsave",
"numpy.fromfile",
"numpy.reshape",
"numpy.full",
"numpy.append",
"numpy.iinfo",
"numpy.array",
"numpy.random.RandomState"
]
] |
abrammer/windspharm
|
[
"a4a297216ca6faa63234fcd5deae5a3c28b1132b"
] |
[
"windspharm/tests/test_error_handling.py"
] |
[
"\"\"\"Tests for error handling in `windspharm`.\"\"\"\n# Copyright (c) 2012-2016 Andrew Dawson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom __future__ import absolute_import\n\nimport pytest\nimport numpy as np\nimport numpy.ma as ma\n\nfrom windspharm.tests import VectorWindTest, solvers\nfrom .reference import reference_solutions\n\n\nclass ErrorHandlersTest(VectorWindTest):\n \"\"\"Base class for all error handler tests.\"\"\"\n interface = None\n gridtype = None\n\n @classmethod\n def setup_class(cls):\n msg = 'missing dependencies required to test the {!s} interface'\n if cls.interface not in solvers:\n pytest.skip(msg.format(cls.interface))\n\n\n# ----------------------------------------------------------------------------\n# Tests for the standard interface\n\n\nclass TestStandardErrorHandlers(ErrorHandlersTest):\n \"\"\"Standard interface error handler tests.\"\"\"\n interface = 'standard'\n gridtype = 'regular'\n\n def test_masked_values(self):\n # masked values in inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n mask = np.empty(solution['uwnd'].shape, dtype=np.bool)\n mask[:] = False\n mask[1, 1] = True\n u = ma.array(solution['uwnd'], mask=mask, fill_value=1.e20)\n v = ma.array(solution['vwnd'], mask=mask, fill_value=1.e20)\n with pytest.raises(ValueError):\n solvers[self.interface](u, v, gridtype=self.gridtype)\n\n def test_nan_values(self):\n # NaN values in inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n solution['vwnd'][1, 1] = np.nan\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'],\n solution['vwnd'],\n gridtype=self.gridtype)\n\n def test_invalid_shape_components(self):\n # invalid shape inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](\n solution['uwnd'][np.newaxis].repeat(2, axis=0),\n solution['vwnd'][np.newaxis].repeat(2, axis=0),\n gridtype=self.gridtype)\n\n def test_different_shape_components(self):\n # different shape inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'],\n solution['vwnd'][:-1],\n gridtype=self.gridtype)\n\n def test_invalid_rank_components(self):\n # invalid rank inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](\n solution['uwnd'][..., np.newaxis, np.newaxis],\n solution['vwnd'][..., np.newaxis, np.newaxis],\n gridtype=self.gridtype)\n\n def test_different_rank_components(self):\n # different rank inputs should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'][..., np.newaxis],\n solution['vwnd'],\n gridtype=self.gridtype)\n\n def test_invalid_gridtype(self):\n # invalid grid type specification should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'],\n gridtype='curvilinear')\n\n def test_gradient_masked_values(self):\n # masked values in gradient input should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'],\n gridtype=self.gridtype)\n mask = np.empty(solution['uwnd'].shape, dtype=np.bool)\n mask[:] = False\n mask[1, 1] = True\n chi = ma.array(solution['chi'], mask=mask, fill_value=1.e20)\n with pytest.raises(ValueError):\n vw.gradient(chi)\n\n def test_gradient_nan_values(self):\n # NaN values in gradient input should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'],\n gridtype=self.gridtype)\n solution['chi'][1, 1] = np.nan\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'])\n\n def test_gradient_invalid_shape(self):\n # input to gradient of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'],\n gridtype=self.gridtype)\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'][:-1])\n\n\n# ----------------------------------------------------------------------------\n# Tests for the cdms interface\n\n\nclass TestCDMSErrorHandlers(ErrorHandlersTest):\n \"\"\"cdms interface error handler tests.\"\"\"\n interface = 'cdms'\n gridtype = 'regular'\n\n def test_non_variable_input(self):\n # input not a cdms2 variable should raise an error\n solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_different_shape_components(self):\n # inputs not the same shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'],\n solution['vwnd'].reorder('xy'))\n\n def test_unknown_grid(self):\n # inputs where a lat-lon grid cannot be identified should raise an\n # error\n solution = reference_solutions(self.interface, self.gridtype)\n lat = solution['vwnd'].getLatitude()\n delattr(lat, 'axis')\n lat.id = 'unknown'\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_non_variable_gradient_input(self):\n # input to gradient not a cdms2 variable should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.gradient(dummy_solution['chi'])\n\n def test_gradient_non_variable_input(self):\n # input to gradient not a cdms2 variable should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.gradient(dummy_solution['chi'])\n\n def test_gradient_different_shape(self):\n # input to gradient of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'][:-1])\n\n def test_gradient_unknown_grid(self):\n # input to gradient with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n lat = solution['chi'].getLatitude()\n delattr(lat, 'axis')\n lat.id = 'unknown'\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'])\n\n def test_truncate_non_variable_input(self):\n # input to truncate not a cdms2 variable should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.truncate(dummy_solution['chi'])\n\n def test_truncate_different_shape(self):\n # input to truncate of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'][:-1])\n\n def test_truncate_unknown_grid(self):\n # input to truncate with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n lat = solution['chi'].getLatitude()\n delattr(lat, 'axis')\n lat.id = 'unknown'\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'])\n\n\n# ----------------------------------------------------------------------------\n# Tests for the iris interface\n\n\nclass TestIrisErrorHandlers(ErrorHandlersTest):\n \"\"\"Iris interface error handler tests.\"\"\"\n interface = 'iris'\n gridtype = 'regular'\n\n def test_non_cube_input(self):\n # input not an iris cube should raise an error\n solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_different_shape_components(self):\n # inputs not the same shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n solution['vwnd'].transpose([1, 0])\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_unknown_grid(self):\n # inputs where a lat-lon grid cannot be identified should raise an\n # error\n solution = reference_solutions(self.interface, self.gridtype)\n solution['vwnd'].coord('latitude').rename('unknown')\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_gradient_non_cube_input(self):\n # input to gradient not an iris cube should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.gradient(dummy_solution['chi'])\n\n def test_gradient_different_shape(self):\n # input to gradient of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'][:-1])\n\n def test_gradient_unknown_grid(self):\n # input to gradient with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n solution['chi'].coord('latitude').rename('unknown')\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'])\n\n def test_truncate_non_cube_input(self):\n # input to truncate not an iris cube should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.truncate(dummy_solution['chi'])\n\n def test_truncate_different_shape(self):\n # input to truncate of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'][:-1])\n\n def test_truncate_unknown_grid(self):\n # input to truncate with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n solution['chi'].coord('latitude').rename('unknown')\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'])\n\n\n# ----------------------------------------------------------------------------\n# Tests for the xarray interface\n\n\nclass TestXarrayErrorHandlers(ErrorHandlersTest):\n \"\"\"xarray interface error handler tests.\"\"\"\n interface = 'xarray'\n gridtype = 'regular'\n\n def test_non_dataarray_input(self):\n # input not an xarray.DataArray should raise an error\n solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_different_shape_components(self):\n # inputs not the same shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n solution['vwnd'] = solution['vwnd'].transpose()\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_unknown_grid(self):\n # inputs where a lat-lon grid cannot be identified should raise an\n # error\n solution = reference_solutions(self.interface, self.gridtype)\n solution['vwnd'].coords.update(\n {'unknown': ('latitude',\n solution['vwnd'].coords['latitude'].values)})\n solution['vwnd'] = solution['vwnd'].swap_dims({'latitude': 'unknown'})\n del solution['vwnd'].coords['latitude']\n with pytest.raises(ValueError):\n solvers[self.interface](solution['uwnd'], solution['vwnd'])\n\n def test_gradient_non_dataarray_input(self):\n # input to gradient not an xarray.DataArray should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.gradient(dummy_solution['chi'])\n\n def test_gradient_different_shape(self):\n # input to gradient of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'][:-1])\n\n def test_gradient_unknown_grid(self):\n # input to gradient with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n solution['chi'].coords.update(\n {'unknown': ('latitude',\n solution['chi'].coords['latitude'].values)})\n solution['chi'] = solution['chi'].swap_dims({'latitude': 'unknown'})\n del solution['chi'].coords['latitude']\n with pytest.raises(ValueError):\n vw.gradient(solution['chi'])\n\n def test_truncate_non_dataarray_input(self):\n # input to truncate not an xarray.DataArray should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n dummy_solution = reference_solutions('standard', self.gridtype)\n with pytest.raises(TypeError):\n vw.truncate(dummy_solution['chi'])\n\n def test_truncate_different_shape(self):\n # input to truncate of different shape should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'][:-1])\n\n def test_truncate_unknown_grid(self):\n # input to truncate with no identifiable grid should raise an error\n solution = reference_solutions(self.interface, self.gridtype)\n vw = solvers[self.interface](solution['uwnd'], solution['vwnd'])\n solution['chi'].coords.update(\n {'unknown': ('latitude',\n solution['chi'].coords['latitude'].values)})\n solution['chi'] = solution['chi'].swap_dims({'latitude': 'unknown'})\n del solution['chi'].coords['latitude']\n with pytest.raises(ValueError):\n vw.truncate(solution['chi'])\n"
] |
[
[
"numpy.ma.array",
"numpy.empty"
]
] |
ChadPro/DataToTFRecord
|
[
"54ea5ef5a512c8ce3d43cecb6da85ed9090e7747",
"54ea5ef5a512c8ce3d43cecb6da85ed9090e7747"
] |
[
"cifar10/cifar10_224.py",
"VOC/pascalvoc_common.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport time\nfrom scipy.misc import imread,imresize\nfrom os import walk\nfrom os.path import join\nimport sys\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n# Image \nIMG_SIZE = 224\nIMG_CHANNELS = 3\n\ndef read_and_decode(filename_queue):\n reader = tf.TFRecordReader()\n _,serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(serialized_example,features={\n 'label':tf.FixedLenFeature([],tf.int64),\n 'image_raw':tf.FixedLenFeature([],tf.string)\n })\n image = tf.decode_raw(features['image_raw'],tf.uint8)\n label = tf.cast(features['label'],tf.int32)\n # image.set_shape([IMG_WIDTH*IMG_HEIGHT*IMG_CHANNELS])\n image = tf.reshape(image,[IMG_SIZE,IMG_SIZE,IMG_CHANNELS])\n return image, label\n\ndef inputs(train_path, val_path, data_set,batch_size,num_epochs):\n if not num_epochs:\n num_epochs = None\n if data_set == 'train':\n read_file = train_path\n else:\n read_file = val_path\n with tf.name_scope('tfrecord_input') as scope:\n filename_queue = tf.train.string_input_producer([read_file], num_epochs=num_epochs)\n image,label = read_and_decode(filename_queue)\n images,labels = tf.train.shuffle_batch([image,label], batch_size=batch_size, num_threads=64, capacity=5000, min_after_dequeue=3000)\n\n ll = tf.expand_dims(labels, 1)\n indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)\n concated = tf.concat([indices, ll], 1)\n onehot_labels = tf.sparse_to_dense(concated, tf.stack([batch_size, 10]), 1.0, 0.0)\n\n return images, onehot_labels, labels",
"# Copyright 2015 Paul Balanca. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides data for the Pascal VOC Dataset (images + annotations).\n\"\"\"\nimport os\n\nimport tensorflow as tf\nimport dataset_utils\n\nslim = tf.contrib.slim\n\nVOC_LABELS = {\n 'none': (0, 'Background'),\n 'aeroplane': (1, 'Vehicle'),\n 'bicycle': (2, 'Vehicle'),\n 'bird': (3, 'Animal'),\n 'boat': (4, 'Vehicle'),\n 'bottle': (5, 'Indoor'),\n 'bus': (6, 'Vehicle'),\n 'car': (7, 'Vehicle'),\n 'cat': (8, 'Animal'),\n 'chair': (9, 'Indoor'),\n 'cow': (10, 'Animal'),\n 'diningtable': (11, 'Indoor'),\n 'dog': (12, 'Animal'),\n 'horse': (13, 'Animal'),\n 'motorbike': (14, 'Vehicle'),\n 'person': (15, 'Person'),\n 'pottedplant': (16, 'Indoor'),\n 'sheep': (17, 'Animal'),\n 'sofa': (18, 'Indoor'),\n 'train': (19, 'Vehicle'),\n 'tvmonitor': (20, 'Indoor'),\n}\n\nVOC_HUMAN_LIGHT_LABELS = {\n 'none' : (0, 'Background'),\n 'human_red' : (1, 'TrafficLight'),\n 'human_geeen' : (2, 'TrafficLight')\n}\n\ndef label_dict(dataname='pascalvoc'):\n if dataname == 'humanlight':\n return VOC_HUMAN_LIGHT_LABELS\n \n return VOC_LABELS\n\ndef get_split(split_name, dataset_dir, file_pattern, reader,\n split_to_sizes, items_to_descriptions, num_classes):\n \"\"\"Gets a dataset tuple with instructions for reading Pascal VOC dataset.\n\n Args:\n split_name: A train/test split name.\n dataset_dir: The base directory of the dataset sources.\n file_pattern: The file pattern to use when matching the dataset sources.\n It is assumed that the pattern contains a '%s' string so that the split\n name can be inserted.\n reader: The TensorFlow reader type.\n\n Returns:\n A `Dataset` namedtuple.\n\n Raises:\n ValueError: if `split_name` is not a valid train/test split.\n \"\"\"\n if split_name not in split_to_sizes:\n raise ValueError('split name %s was not recognized.' % split_name)\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Allowing None in the signature so that dataset_factory can use the default.\n if reader is None:\n reader = tf.TFRecordReader\n # Features in Pascal VOC TFRecords.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n 'image/channels': tf.FixedLenFeature([1], tf.int64),\n 'image/shape': tf.FixedLenFeature([3], tf.int64),\n 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),\n }\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\n 'shape': slim.tfexample_decoder.Tensor('image/shape'),\n 'object/bbox': slim.tfexample_decoder.BoundingBox(\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),\n 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\n 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),\n 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),\n }\n decoder = slim.tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n labels_to_names = None\n if dataset_utils.has_labels(dataset_dir):\n labels_to_names = dataset_utils.read_label_file(dataset_dir)\n # else:\n # labels_to_names = create_readable_names_for_imagenet_labels()\n # dataset_utils.write_label_file(labels_to_names, dataset_dir)\n\n return slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=reader,\n decoder=decoder,\n num_samples=split_to_sizes[split_name],\n items_to_descriptions=items_to_descriptions,\n num_classes=num_classes,\n labels_to_names=labels_to_names)\n"
] |
[
[
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.range",
"tensorflow.stack",
"tensorflow.decode_raw",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.train.string_input_producer",
"tensorflow.name_scope",
"tensorflow.TFRecordReader",
"tensorflow.train.shuffle_batch"
],
[
"tensorflow.FixedLenFeature",
"tensorflow.VarLenFeature"
]
] |
cc-ai/pytorch_GAN_zoo
|
[
"e1281b176f0f4b356dbd2c31b0cf867678b6409d"
] |
[
"visualization/np_visualizer.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport numpy as np\nimport scipy\nimport scipy.misc\nimport imageio\nimport torch\nfrom PIL import Image\n\ndef make_numpy_grid(arrays_list, gridMaxWidth=2048,\n imgMinSize=128,\n interpolation='nearest'):\n\n # NCWH format\n N, C, W, H = arrays_list.shape\n\n arrays_list = (arrays_list + 1.0) * 255.0 / 2.0\n\n if C == 1:\n arrays_list = np.reshape(arrays_list, (N, W, H))\n\n gridMaxWidth = max(gridMaxWidth, W)\n\n imgSize = max(W, imgMinSize)\n imgHeight = int((float(imgSize) / W) * H)\n nImgsPerRows = min(N, int(gridMaxWidth // imgSize))\n\n gridWidth = nImgsPerRows * imgSize\n\n nRows = N // nImgsPerRows\n if N % nImgsPerRows > 0:\n nRows += 1\n\n gridHeight = nRows * imgHeight\n if C == 1:\n outGrid = np.zeros((gridHeight, gridWidth), dtype='uint8')\n else:\n outGrid = np.zeros((gridHeight, gridWidth, C), dtype='uint8')\n outGrid += 255\n\n indexImage = 0\n for r in range(nRows):\n for c in range(nImgsPerRows):\n\n if indexImage == N:\n break\n\n xStart = c * imgSize\n yStart = r * imgHeight\n\n # tmpImage = scipy.misc.imresize(\n # arrays_list[indexImage], (imgSize, imgHeight), interp=interpolation)\n \n tmpImage = np.array(Image.fromarray(arrays_list[indexImage]).resize((imgSize, imgHeight)))\n \n if C == 1:\n outGrid[yStart:(yStart + imgHeight),\n xStart:(xStart + imgSize)] = tmpImage\n else:\n outGrid[yStart:(yStart + imgHeight),\n xStart:(xStart + imgSize), :] = tmpImage\n\n indexImage += 1\n\n return outGrid\n\n\ndef publishTensors(data, out_size_image, caption=\"\", window_token=None, env=\"main\"):\n return None\n\n\ndef publishLoss(*args, **kwargs):\n return None\n\n\ndef publishLinePlot(data, xData, name=\"\", window_token=None, env=\"main\"):\n return None\n\n\ndef publishScatterPlot(data, name=\"\", window_token=None):\n return None\n\n\ndef saveTensor(data, out_size_image, path):\n\n interpolation = 'nearest'\n if isinstance(out_size_image, tuple):\n out_size_image = out_size_image[0]\n data = torch.clamp(data, min=-1, max=1)\n outdata = make_numpy_grid(\n data.numpy(), imgMinSize=out_size_image, interpolation=interpolation)\n imageio.imwrite(path, outdata)\n\n\ndef delete_env(env_name):\n return None\n"
] |
[
[
"numpy.reshape",
"torch.clamp",
"numpy.zeros"
]
] |
PKUfudawei/cmssw
|
[
"8fbb5ce74398269c8a32956d7c7943766770c093"
] |
[
"RecoEgamma/ElectronIdentification/python/FWLite.py"
] |
[
"import ROOT\nimport ctypes\nimport pprint\nfrom numpy import exp\n\n# Python wrappers around the Electron MVAs.\n# Usage example in RecoEgamma/ElectronIdentification/test\n\nclass ElectronMVAID:\n \"\"\" Electron MVA wrapper class.\n \"\"\"\n\n def __init__(self, name, tag, categoryCuts, xmls, variablesFile, debug=False):\n self.name = name\n self.tag = tag\n self.categoryCuts = categoryCuts\n self.variablesFile = variablesFile\n self.xmls = ROOT.vector(ROOT.string)()\n for x in xmls: self.xmls.push_back(x)\n self._init = False\n self._debug = debug\n\n def __call__(self, ele, rho, debug=False):\n '''returns a tuple mva_value, category \n ele: a reco::GsfElectron\n convs: conversions\n beam_spot: beam spot\n rho: energy density in the event\n debug: enable debugging mode. \n\n example: \n \n event.getByLabel(('slimmedElectrons'), ele_handle)\n event.getByLabel(('fixedGridRhoFastjetAll'), rho_handle)\n \n electrons = ele_handle.product()\n rho = rho_handle.product()\n\n mva, category = electron_mva_id(electron[0], rho)\n '''\n if not self._init:\n print('Initializing ' + self.name + self.tag)\n ROOT.gInterpreter.Declare('#include \"RecoEgamma/ElectronIdentification/interface/ElectronMVAEstimatorRun2.h\"')\n ROOT.gSystem.Load(\"libRecoEgammaElectronIdentification\")\n categoryCutStrings = ROOT.vector(ROOT.string)()\n for x in self.categoryCuts : \n categoryCutStrings.push_back(x)\n self.estimator = ROOT.ElectronMVAEstimatorRun2(\n self.tag, self.name, len(self.xmls), \n self.variablesFile, categoryCutStrings, self.xmls, self._debug)\n self._init = True\n category = ctypes.c_int(0)\n mva = self.estimator.mvaValue(ele, rho[0], category)\n return mva, category.value\n\n\nclass WorkingPoints(object):\n '''Working Points. Keeps track of the cuts associated to a given flavour of the MVA ID \n for each working point and allows to test the working points'''\n\n def __init__(self, name, tag, working_points, logistic_transform=False):\n self.name = name \n self.tag = tag\n self.working_points = self._reformat_cut_definitions(working_points)\n self.logistic_transform = logistic_transform\n\n def _reformat_cut_definitions(self, working_points):\n new_definitions = dict()\n for wpname, definitions in working_points.items():\n new_definitions[wpname] = dict()\n for name, cut in definitions.cuts.items():\n categ_id = int(name.lstrip('cutCategory'))\n cut = cut.replace('pt','x')\n formula = ROOT.TFormula('_'.join([self.name, wpname, name]), cut)\n new_definitions[wpname][categ_id] = formula\n return new_definitions\n\n def passed(self, ele, mva, category, wp):\n '''return true if ele passes wp'''\n threshold = self.working_points[wp][category].Eval(ele.pt())\n if self.logistic_transform:\n mva = 2.0/(1.0+exp(-2.0*mva))-1\n return mva > threshold\n\n\n# Import information needed to construct the e/gamma MVAs\n\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools \\\n import EleMVA_6CategoriesCuts, mvaVariablesFile, EleMVA_3CategoriesCuts\n\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V2_cff \\\n import mvaWeightFiles as Fall17_iso_V2_weightFiles\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_noIso_V2_cff \\\n import mvaWeightFiles as Fall17_noIso_V2_weightFiles\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_GeneralPurpose_V1_cff \\\n import mvaSpring16WeightFiles_V1 as mvaSpring16GPWeightFiles_V1\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_HZZ_V1_cff \\\n import mvaSpring16WeightFiles_V1 as mvaSpring16HZZWeightFiles_V1\n\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_GeneralPurpose_V1_cff \\\n import workingPoints as mvaSpring16GP_V1_workingPoints\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_HZZ_V1_cff \\\n import workingPoints as mvaSpring16HZZ_V1_workingPoints\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V2_cff \\\n import workingPoints as Fall17_iso_V2_workingPoints\nfrom RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_noIso_V2_cff \\\n import workingPoints as Fall17_noIso_V2_workingPoints\n\n# Dictionary with the relecant e/gmma MVAs\n\nelectron_mvas = {\n \"Fall17IsoV2\" : ElectronMVAID(\"ElectronMVAEstimatorRun2\",\"Fall17IsoV2\",\n EleMVA_6CategoriesCuts, Fall17_iso_V2_weightFiles, mvaVariablesFile),\n \"Fall17NoIsoV2\" : ElectronMVAID(\"ElectronMVAEstimatorRun2\",\"Fall17NoIsoV2\",\n EleMVA_6CategoriesCuts, Fall17_noIso_V2_weightFiles, mvaVariablesFile),\n \"Spring16HZZV1\" : ElectronMVAID(\"ElectronMVAEstimatorRun2\",\"Spring16HZZV1\",\n EleMVA_6CategoriesCuts, mvaSpring16HZZWeightFiles_V1, mvaVariablesFile),\n \"Spring16GPV1\" : ElectronMVAID(\"ElectronMVAEstimatorRun2\",\"Spring16GeneralPurposeV1\",\n EleMVA_3CategoriesCuts, mvaSpring16GPWeightFiles_V1, mvaVariablesFile),\n }\n\nworking_points = {\n \"Fall17IsoV2\" : WorkingPoints(\"ElectronMVAEstimatorRun2\",\"Fall17IsoV2\",\n Fall17_iso_V2_workingPoints),\n \"Fall17NoIsoV2\" : WorkingPoints(\"ElectronMVAEstimatorRun2\",\"Fall17NoIsoV2\",\n Fall17_noIso_V2_workingPoints),\n \"Spring16HZZV1\" : WorkingPoints(\"ElectronMVAEstimatorRun2\",\"Spring16HZZV1\",\n mvaSpring16HZZ_V1_workingPoints, logistic_transform=True),\n \"Spring16GPV1\" : WorkingPoints(\"ElectronMVAEstimatorRun2\",\"Spring16GeneralPurposeV1\",\n mvaSpring16GP_V1_workingPoints, logistic_transform=True),\n\n }\n"
] |
[
[
"numpy.exp"
]
] |
Tandon-A/ufldl-python-solutions
|
[
"f58be0f49c4e2266c21316915853404a1a0df55f"
] |
[
"Logitsic_Regression.py"
] |
[
"import numpy as np \nimport scipy.optimize\nimport time \nimport matplotlib.pyplot as plt \nimport pandas as pd \nimport math\n\n\ndef log_reg(theta,x,y):\n \"\"\"\n Arguments:\n theta - A vector containing the parameter values to optimize.\n X - The examples stored in a matrix.\n X(i,j) is the i'th coordinate of the j'th example.\n y - The target value for each example. y(j) is the target for example j.\n \n Basic function to compute cost and gradient for given arguments. \n \"\"\"\n\n no_of_ex = x.shape[1]\n cost = 0\n grad = np.zeros(theta.shape)\n for i in range(no_of_ex):\n val = np.sum(theta[:]*x[:,i])\n val = 1/ (1 + math.exp(-val)) \n cost = cost + (y[i]*math.log(val)) + (1-y[i])*math.log(1-val)\n grad = grad + x[:,i]*(val-y[i])\n cost = -cost\n return cost,grad\n \n\n \n \ndef log_rec_vec(theta,x,y):\n \"\"\"\n An optimized function to compute cost and gradient for given arguments\n \"\"\"\n val = np.dot(theta,x) \n val = 1/(1+np.exp(-val))\n grad = np.transpose(np.dot(x,np.transpose(val - y)))\n cost = -np.sum(y*np.log(val) + (1-y)*np.log(1-val)) \n return grad,cost \n \ndef cost_fun(theta,x,y):\n \"\"\"\n Function to calculate cost\n \"\"\"\n val = np.dot(theta,x) \n val = 1/(1+np.exp(-val))\n cost = -np.sum(y*np.log(val) + (1-y)*np.log(1-val)) \n return cost\n \ndef grad_fun(theta,x,y):\n \"\"\"\n Function to calculate gradient\n \"\"\"\n val = np.dot(theta,x) \n val = 1/(1+np.exp(-val))\n grad = np.transpose(np.dot(x,np.transpose(val - y)))\n return grad\n\ndef safe_log(x):\n \"\"\"\n Function to calculate safe_log i.e. replace nan/inf with -1e+4\n \"\"\"\n l = np.log(x)\n l[np.logical_or(np.isnan(l),np.isinf(l)) ] = -1e+4\n return l \n \ndef safe_cost_fun(theta,x,y):\n \"\"\"\n Function to calculate cost using safe_log\n \"\"\"\n val = np.dot(theta,x) \n val = 1/(1+np.exp(-val))\n cost = -np.sum(y*safe_log(val) + (1-y)*safe_log(1-val)) \n return cost\n\n\ndef accuracy(theta,x,y):\n \"\"\"\n Function to calculate accuracy of the logistic regression model\n \"\"\"\n val = np.dot(theta,x)\n val = 1/(1+np.exp(-val))\n correct = np.sum(np.equal(y, val>0.5))\n return correct/y.size\n\ndata = pd.read_csv(\"mnist.csv\") #specify the path to csv file of MNIST database\ndata = np.array(data)\ndata = np.insert(data,1,1,axis=1)\nnp.random.shuffle(data)\ntrain = data[0:30000]\ntest = data[30000:]\n\n#taking data rows with label digit = 0 or label digit = 1\ntrain_data = train[np.logical_or(train[:,0] == 0, train[:,0] == 1), 1:] \ntrain_label = train[np.logical_or(train[:,0] == 0, train[:,0] == 1), 0]\n\ntest_data = test[np.logical_or(test[:,0] == 0, test[:,0] == 1), 1:]\ntest_label = test[np.logical_or(test[:,0] == 0, test[:,0] == 1), 0]\n\n#normalizing database\ntrain_data[train_data>0] = 1\ntest_data[test_data>0] = 1\ntrain_data = np.transpose(train_data)\ntest_data = np.transpose(test_data)\n\nj_hist = []\n\nt0 = time.time()\nres = scipy.optimize.minimize(\n fun=cost_fun,\n x0=np.random.rand(train_data.shape[0])*0.001,\n args=(train_data, train_label),\n method='L-BFGS-B',\n jac=grad_fun,\n options={'maxiter': 100, 'disp': True},\n callback=lambda x: j_hist.append(cost_fun(x, train_data, train_label)),\n)\nt1 = time.time()\noptimal_theta = res.x\nprint (\"Optimization using lbfgs took %r seconds\" %(t1-t0))\n\nplt.plot(j_hist, marker='o')\nplt.xlabel('Iterations')\nplt.ylabel('J(theta)')\n\n\n\nj_hist = []\n\nt0 = time.time()\nres = scipy.optimize.minimize(\n fun=safe_cost_fun,\n x0=np.random.rand(train_data.shape[0])*0.001,\n args=(train_data, train_label),\n method='bfgs',\n jac=grad_fun,\n options={'maxiter': 100, 'disp': True},\n callback=lambda x: j_hist.append(safe_cost_fun(x, train_data, train_label)),\n)\nt1 = time.time()\noptimal_theta = res.x\nprint (\"Optimization using bfgs and safe log took %r seconds\" %(t1-t0))\n\nplt.plot(j_hist, marker='o')\nplt.xlabel('Iterations')\nplt.ylabel('J(theta)')\n\n\nprint (\"training accuracy = %r\" %(accuracy(optimal_theta,train_data,train_label)))\nprint (\"testing accuracy = %r\" %(accuracy(optimal_theta,test_data,test_label)))\n\n"
] |
[
[
"numpy.dot",
"numpy.log",
"pandas.read_csv",
"numpy.isinf",
"numpy.isnan",
"numpy.random.shuffle",
"matplotlib.pyplot.plot",
"numpy.logical_or",
"numpy.insert",
"numpy.equal",
"numpy.transpose",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] |
aecgabriel/uuv_test
|
[
"d0729639808a0616881723116aebd62d299aabb9"
] |
[
"uuv_control/uuv_trajectory_control/src/uuv_trajectory_generator/path_generator/cs_interpolator.py"
] |
[
"# Copyright (c) 2016 The UUV Simulator Authors.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom scipy.interpolate import splrep, splev\nimport numpy as np\nfrom copy import deepcopy\nfrom uuv_waypoints import Waypoint, WaypointSet\nfrom ..trajectory_point import TrajectoryPoint\nfrom tf.transformations import quaternion_multiply, quaternion_about_axis, quaternion_conjugate, quaternion_from_matrix, euler_from_matrix\nfrom line_segment import LineSegment\nfrom bezier_curve import BezierCurve\nfrom path_generator import PathGenerator\nfrom visualization_msgs.msg import MarkerArray\n\n\nclass CSInterpolator(PathGenerator):\n \"\"\"\n Interpolator that will generate cubic Bezier curve segments for a set of waypoints.\n \"\"\"\n LABEL = 'cubic'\n\n def __init__(self):\n super(CSInterpolator, self).__init__(self)\n\n # Set of interpolation functions for each degree of freedom\n # The heading function interpolates the given heading offset and its\n # value is added to the heading computed from the trajectory\n self._interp_fcns = dict(pos=None,\n heading=None)\n self._heading_spline = None\n\n def init_interpolator(self):\n if self._waypoints is None:\n return False\n\n self._markers_msg = MarkerArray()\n self._marker_id = 0\n\n self._interp_fcns['pos'] = list()\n self._segment_to_wp_map = [0]\n if self._waypoints.num_waypoints == 2:\n self._interp_fcns['pos'].append(\n LineSegment(self._waypoints.get_waypoint(0).pos,\n self._waypoints.get_waypoint(1).pos))\n self._segment_to_wp_map.append(1)\n elif self._waypoints.num_waypoints > 2:\n self._interp_fcns['pos'] = BezierCurve.generate_cubic_curve(\n [self._waypoints.get_waypoint(i).pos for i in range(self._waypoints.num_waypoints)])\n else:\n return False\n\n # Reparametrizing the curves\n lengths = [seg.get_length() for seg in self._interp_fcns['pos']]\n lengths = [0] + lengths\n self._s = np.cumsum(lengths) / np.sum(lengths)\n mean_vel = np.mean(\n [self._waypoints.get_waypoint(k).max_forward_speed for k in range(self._waypoints.num_waypoints)])\n if self._duration is None:\n self._duration = np.sum(lengths) / mean_vel\n if self._start_time is None:\n self._start_time = 0.0\n\n if self._waypoints.num_waypoints == 2:\n head_offset_line = deepcopy(self._waypoints.get_waypoint(1).heading_offset)\n self._interp_fcns['heading'] = lambda x: head_offset_line\n else:\n # Set a simple spline to interpolate heading offset, if existent\n heading = [self._waypoints.get_waypoint(i).heading_offset for i in range(self._waypoints.num_waypoints)]\n self._heading_spline = splrep(self._s, heading, k=3, per=False)\n self._interp_fcns['heading'] = lambda x: splev(x, self._heading_spline)\n return True\n\n return True\n\n def set_parameters(self, params):\n \"\"\"Not implemented for this interpolator.\"\"\"\n return True\n\n def get_samples(self, max_time, step=0.001):\n if self._waypoints is None:\n return None\n if self._interp_fcns['pos'] is None:\n return None\n s = np.arange(0, 1 + step, step)\n\n pnts = list()\n for i in s:\n pnt = TrajectoryPoint()\n pnt.pos = self.generate_pos(i).tolist()\n pnt.t = 0.0\n pnts.append(pnt)\n return pnts\n\n def generate_pos(self, s):\n if self._interp_fcns['pos'] is None:\n return None\n idx = self.get_segment_idx(s)\n if idx == 0:\n u_k = 0\n pos = self._interp_fcns['pos'][idx].interpolate(u_k)\n else:\n u_k = (s - self._s[idx - 1]) / (self._s[idx] - self._s[idx - 1])\n pos = self._interp_fcns['pos'][idx - 1].interpolate(u_k)\n return pos\n\n def generate_pnt(self, s, t, *args):\n pnt = TrajectoryPoint()\n # Trajectory time stamp\n pnt.t = t\n # Set position vector\n pnt.pos = self.generate_pos(s).tolist()\n # Set rotation quaternion\n pnt.rotq = self.generate_quat(s)\n return pnt\n\n def generate_quat(self, s):\n s = max(0, s)\n s = min(s, 1)\n\n if s == 0:\n self._last_rot = deepcopy(self._init_rot)\n return self._init_rot\n\n last_s = max(0, s - self._s_step)\n\n this_pos = self.generate_pos(s)\n last_pos = self.generate_pos(last_s)\n\n dx = this_pos[0] - last_pos[0]\n dy = this_pos[1] - last_pos[1]\n dz = this_pos[2] - last_pos[2]\n\n rotq = self._compute_rot_quat(dx, dy, dz)\n self._last_rot = rotq\n # Calculating the step for the heading offset\n q_step = quaternion_about_axis(\n self._interp_fcns['heading'](s),\n np.array([0, 0, 1]))\n # Adding the heading offset to the rotation quaternion\n rotq = quaternion_multiply(rotq, q_step)\n return rotq\n"
] |
[
[
"scipy.interpolate.splrep",
"numpy.arange",
"numpy.cumsum",
"scipy.interpolate.splev",
"numpy.array",
"numpy.sum"
]
] |
jaysonfig/EXOSIMS
|
[
"d81436c2b17cdb779cad519b1932d3e3ad49b55e",
"d81436c2b17cdb779cad519b1932d3e3ad49b55e"
] |
[
"EXOSIMS/PlanetPopulation/JupiterTwin.py",
"EXOSIMS/SurveySimulation/tieredScheduler.py"
] |
[
"from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation\nimport numpy as np\nimport astropy.units as u\n\nclass JupiterTwin(PlanetPopulation):\n \"\"\"\n Population of Jupiter twins (11.209 R_Earth, 317.83 M_Eearth, 1 p_Earth)\n On eccentric orbits (0.7 to 1.5 AU)*5.204.\n Numbers pulled from nssdc.gsfc.nasa.gov/planetary/factsheet/jupiterfact.html\n \n This implementation is intended to enforce this population regardless\n of JSON inputs. The only inputs that will not be disregarded are erange\n and constrainOrbits.\n \"\"\"\n\n def __init__(self, eta=1, erange=[0.,0.048], constrainOrbits=True, **specs):\n #eta is probability of planet occurance in a system. I set this to 1\n specs['erange'] = erange\n specs['constrainOrbits'] = constrainOrbits\n aEtoJ = 5.204\n RpEtoJ = 11.209\n MpEtoJ = 317.83\n pJ = 0.538# 0.538 from nssdc.gsfc.nasa.gov\n # specs being modified in JupiterTwin\n specs['eta'] = eta\n specs['arange'] = [1*aEtoJ,1*aEtoJ]#0.7*aEtoJ, 1.5*aEtoJ]\n specs['Rprange'] = [1*RpEtoJ,1*RpEtoJ]\n specs['Mprange'] = [1*MpEtoJ,1*MpEtoJ]\n specs['prange'] = [pJ,pJ]\n specs['scaleOrbits'] = True\n\n self.RpEtoJ = RpEtoJ\n self.pJ = pJ\n\n PlanetPopulation.__init__(self, **specs)\n \n def gen_plan_params(self, n):\n \"\"\"Generate semi-major axis (AU), eccentricity, geometric albedo, and\n planetary radius (earthRad)\n \n Semi-major axis and eccentricity are uniformly distributed with all\n other parameters constant.\n \n Args:\n n (integer):\n Number of samples to generate\n \n Returns:\n tuple:\n a (astropy Quantity array):\n Semi-major axis in units of AU\n e (float ndarray):\n Eccentricity\n p (float ndarray):\n Geometric albedo\n Rp (astropy Quantity array):\n Planetary radius in units of earthRad\n \n \"\"\"\n n = self.gen_input_check(n)\n # generate samples of semi-major axis\n ar = self.arange.to('AU').value\n # check if constrainOrbits == True for eccentricity\n if self.constrainOrbits:\n # restrict semi-major axis limits\n arcon = np.array([ar[0]/(1.-self.erange[0]), ar[1]/(1.+self.erange[0])])\n a = np.random.uniform(low=arcon[0], high=arcon[1], size=n)*u.AU\n tmpa = a.to('AU').value\n\n # upper limit for eccentricity given sma\n elim = np.zeros(len(a))\n amean = np.mean(ar)\n elim[tmpa <= amean] = 1. - ar[0]/tmpa[tmpa <= amean]\n elim[tmpa > amean] = ar[1]/tmpa[tmpa>amean] - 1.\n elim[elim > self.erange[1]] = self.erange[1]\n elim[elim < self.erange[0]] = self.erange[0]\n \n # uniform distribution\n e = np.random.uniform(low=self.erange[0], high=elim, size=n)\n else:\n a = np.random.uniform(low=ar[0], high=ar[1], size=n)*u.AU\n e = np.random.uniform(low=self.erange[0], high=self.erange[1], size=n)\n\n # generate geometric albedo\n p = self.pJ*np.ones((n,))\n # generate planetary radius\n Rp = np.ones((n,))*u.earthRad*self.RpEtoJ\n \n return a, e, p, Rp\n",
"from EXOSIMS.Prototypes.SurveySimulation import SurveySimulation\nimport EXOSIMS, os\nimport astropy.units as u\nimport astropy.constants as const\nimport numpy as np\nimport itertools\nfrom scipy import interpolate\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport time\nfrom EXOSIMS.util.deltaMag import deltaMag\n\n\nclass tieredScheduler(SurveySimulation):\n \"\"\"tieredScheduler \n \n This class implements a tiered scheduler that independantly schedules the observatory\n while the starshade slews to its next target.\n \n Args:\n coeffs (iterable 7x1):\n Cost function coefficients: slew distance, completeness, intTime,\n deep-dive least visited ramp, deep-dive unvisited ramp, unvisited ramp, \n and least-visited ramp\n occHIPs (iterable nx1):\n List of star HIP numbers to initialize occulter target list.\n topstars (integer):\n Number of HIP numbers to recieve preferential treatment.\n revisit_wait (float):\n Wait time threshold for star revisits. The value given is the fraction of a \n characterized planet's period that must be waited before scheduling a revisit.\n revisit_weight (float):\n Weight used to increase preference for coronograph revisits.\n GAPortion (float):\n Portion of mission time used for general astrophysics.\n int_inflection (boolean):\n Calculate integration time using the pre-calculated integration time curves.\n Default is False.\n GA_simult_det_fraction (float):\n Fraction of detection time to be considered as GA time.\n promote_hz_stars (boolean):\n Flag that allows promotion of targets with planets in the habitable zone \n to the occulter target list.\n phase1_end (int):\n Number of days to wait before the end of phase 1, when phase 1 ends,\n target promotion begins.\n n_det_remove (int):\n Minimum number of visits with no detections required to filter off star\n n_det_min (int):\n Minimum number of detections required for promotion\n occ_max_visits (int):\n Number of maximum visits to a star allowed by the occulter.\n max_successful_chars (int):\n Maximum number of successful characterizations on a given star before \n it is removed from the target list.\n max_successful_dets (int):\n Maximum number of successful detections on a given star before \n it is removed from the target list.\n nmax_promo_det (int):\n Number of detection on a star required to be promoted regardless of\n detection occurance times.\n lum_exp (int):\n Exponent used in the luminosity weighting function.\n tot_det_int_cutoff (float):\n Number of total days the scheduler is allowed to spend on detections.\n \\*\\*specs:\n user specified values\n \"\"\"\n\n def __init__(self, coeffs=[2,1,1,8,4,1,1], occHIPs=[], topstars=0, revisit_wait=0.5, \n revisit_weight=1.0, GAPortion=.25, int_inflection=False,\n GA_simult_det_fraction=.07, promote_hz_stars=False, phase1_end=365, \n n_det_remove=3, n_det_min=3, occ_max_visits=3, max_successful_chars=1,\n max_successful_dets=4, nmax_promo_det=4, lum_exp=1, tot_det_int_cutoff=None,\n **specs):\n \n SurveySimulation.__init__(self, **specs)\n \n #verify that coefficients input is iterable 4x1\n if not(isinstance(coeffs,(list,tuple,np.ndarray))) or (len(coeffs) != 7):\n raise TypeError(\"coeffs must be a 7 element iterable\")\n\n TK = self.TimeKeeping\n TL = self.TargetList\n OS = self.OpticalSystem\n SU = self.SimulatedUniverse\n\n #Add to outspec\n self._outspec['coeffs'] = coeffs\n self._outspec['occHIPs'] = occHIPs\n self._outspec['topstars'] = topstars\n self._outspec['revisit_wait'] = revisit_wait\n self._outspec['revisit_weight'] = revisit_weight\n self._outspec['GAPortion'] = GAPortion\n self._outspec['int_inflection'] = int_inflection\n self._outspec['GA_simult_det_fraction'] = GA_simult_det_fraction\n self._outspec['promote_hz_stars'] = promote_hz_stars\n self._outspec['phase1_end'] = phase1_end\n self._outspec['n_det_remove'] = n_det_remove\n self._outspec['n_det_min'] = n_det_min\n self._outspec['occ_max_visits'] = occ_max_visits\n self._outspec['max_successful_chars'] = max_successful_chars\n self._outspec['lum_exp'] = lum_exp\n\n #normalize coefficients\n coeffs = np.array(coeffs)\n coeffs = coeffs/np.linalg.norm(coeffs, ord=1)\n \n self.coeffs = coeffs\n if occHIPs != []:\n occHIPs_path = os.path.join(EXOSIMS.__path__[0],'Scripts',occHIPs)\n assert os.path.isfile(occHIPs_path), \"%s is not a file.\"%occHIPs_path\n with open(occHIPs_path, 'r') as ofile:\n HIPsfile = ofile.read()\n self.occHIPs = HIPsfile.split(',')\n if len(self.occHIPs) <= 1:\n self.occHIPs = HIPsfile.split('\\n')\n else:\n # assert occHIPs != [], \"occHIPs target list is empty, occHIPs file must be specified in script file\"\n self.occHIPs = occHIPs\n\n self.occHIPs = [hip.strip() for hip in self.occHIPs]\n\n self.occ_arrives = TK.currentTimeAbs.copy() # The timestamp at which the occulter finishes slewing\n self.occ_starRevisit = np.array([]) # Array of star revisit times\n self.occ_starVisits = np.zeros(TL.nStars, dtype=int) # The number of times each star was visited by the occulter\n self.is_phase1 = True # Flag that determines whether or not we are in phase 1\n self.phase1_end = TK.missionStart.copy() + phase1_end*u.d # The designated end time for the first observing phase\n self.FA_status = np.zeros(TL.nStars, dtype=bool) # False Alarm status array \n self.GA_percentage = GAPortion # Percentage of mission devoted to general astrophysics\n self.GAtime = 0.*u.d # Current amount of time devoted to GA\n self.GA_simult_det_fraction = GA_simult_det_fraction # Fraction of detection time allocated to GA\n self.goal_GAtime = None # The desired amount of GA time based off of mission time\n self.curves = None\n self.ao = None\n self.int_inflection = int_inflection # Use int_inflection to calculate int times\n self.promote_hz_stars = promote_hz_stars # Flag to promote hz stars\n self.last_chard = None # Keeps track of last characterized star to avoid repeats\n self.lum_exp = lum_exp # The exponent to use for luminosity weighting on coronograph targets \n\n self.ready_to_update = False\n self.occ_slewTime = 0.*u.d\n self.occ_sd = 0.*u.rad\n\n self.sInd_charcounts = {} # Number of characterizations by star index\n self.sInd_detcounts = np.zeros(TL.nStars, dtype=int) # Number of detections by star index\n self.sInd_dettimes = {}\n self.n_det_remove = n_det_remove # Minimum number of visits with no detections required to filter off star\n self.n_det_min = n_det_min # Minimum number of detections required for promotion\n self.occ_max_visits = occ_max_visits # Maximum number of allowed occulter visits\n self.max_successful_chars = max_successful_chars # Maximum allowed number of successful chars of deep dive targets before removal from target list\n self.max_successful_dets = max_successful_dets\n self.nmax_promo_det = nmax_promo_det\n if tot_det_int_cutoff is None:\n self.tot_det_int_cutoff = np.inf\n else:\n self.tot_det_int_cutoff = tot_det_int_cutoff*u.d\n self.tot_dettime = 0.0*u.d\n\n self.topstars = topstars # Allow preferential treatment of top n stars in occ_sInds target list\n self.coeff_data_a3 = []\n self.coeff_data_a4 = []\n self.coeff_time = []\n\n # self.revisit_wait = revisit_wait * u.d\n EEID = 1*u.AU*np.sqrt(TL.L)\n mu = const.G*(TL.MsTrue)\n T = (2.*np.pi*np.sqrt(EEID**3/mu)).to('d')\n self.revisit_wait = revisit_wait * T\n\n self.revisit_weight = revisit_weight\n self.no_dets = np.ones(self.TargetList.nStars, dtype=bool)\n\n self.promoted_stars = [] # list of stars promoted from the coronograph list to the starshade list\n self.ignore_stars = [] # list of stars that have been removed from the occ_sInd list\n self.t_char_earths = np.array([]) # corresponding integration times for earths\n\n # Precalculating intTimeFilter\n allModes = OS.observingModes\n char_mode = list(filter(lambda mode: 'spec' in mode['inst']['name'], allModes))[0]\n sInds = np.arange(TL.nStars) #Initialize some sInds array\n #ORIGINALself.occ_valfZmin, self.occ_absTimefZmin = self.ZodiacalLight.calcfZmin(sInds, self.Observatory, TL, self.TimeKeeping, char_mode, self.cachefname) # find fZmin to use in intTimeFilter\n koMap = self.koMaps[char_mode['syst']['name']]\n self.fZQuads = self.ZodiacalLight.calcfZmin(sInds, self.Observatory, TL, self.TimeKeeping, char_mode, self.cachefname, koMap, self.koTimes) # find fZmin to use in intTimeFilter\n self.occ_valfZmin, self.occ_absTimefZmin = self.ZodiacalLight.extractfZmin_fZQuads(self.fZQuads)\n fEZ = self.ZodiacalLight.fEZ0 # grabbing fEZ0\n dMag = self.dMagint[sInds] # grabbing dMag\n WA = self.WAint[sInds] # grabbing WA\n self.occ_intTimesIntTimeFilter = self.OpticalSystem.calc_intTime(TL, sInds, self.occ_valfZmin, fEZ, dMag, WA, self.mode)*char_mode['timeMultiplier'] # intTimes to filter by\n self.occ_intTimeFilterInds = np.where((self.occ_intTimesIntTimeFilter > 0)*(self.occ_intTimesIntTimeFilter <= self.OpticalSystem.intCutoff) > 0)[0] # These indices are acceptable for use simulating\n\n # Promote all stars assuming they have known earths\n occ_sInds_with_earths = []\n if TL.earths_only:\n\n Obs = self.Observatory\n ZL = self.ZodiacalLight\n char_mode = list(filter(lambda mode: 'spec' in mode['inst']['name'], OS.observingModes))[0]\n\n # check for earths around the available stars\n for sInd in np.arange(TL.nStars):\n pInds = np.where(SU.plan2star == sInd)[0]\n pinds_earthlike = self.is_earthlike(pInds, sInd)\n if np.any(pinds_earthlike):\n self.known_earths = np.union1d(self.known_earths, pInds[pinds_earthlike]).astype(int)\n occ_sInds_with_earths.append(sInd)\n self.promoted_stars = np.union1d(self.promoted_stars, occ_sInds_with_earths).astype(int)\n\n # calculate example integration times\n sInds = SU.plan2star[self.known_earths]\n fZ = ZL.fZ(Obs, TL, sInds, TK.currentTimeAbs.copy(), char_mode)\n fEZ = SU.fEZ[self.known_earths].to('1/arcsec2')\n WAp = SU.WA[self.known_earths]\n dMag = SU.dMag[self.known_earths]\n self.t_char_earths = OS.calc_intTime(TL, sInds, fZ, fEZ, dMag, WAp, char_mode)\n\n\n def run_sim(self):\n \"\"\"Performs the survey simulation \n \n Returns:\n mission_end (string):\n Message printed at the end of a survey simulation.\n \n \"\"\"\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n Comp = self.Completeness\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n self.currentSep = Obs.occulterSep\n \n # Choose observing modes selected for detection (default marked with a flag),\n det_mode = list(filter(lambda mode: mode['detectionMode'] == True, OS.observingModes))[0]\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], OS.observingModes))\n if np.any(spectroModes):\n char_mode = spectroModes[0]\n # if no spectro mode, default char mode is first observing mode\n else:\n char_mode = OS.observingModes[0]\n \n # Begin Survey, and loop until mission is finished\n self.logger.info('OB{}: survey beginning.'.format(TK.OBnumber+1))\n self.vprint('OB{}: survey beginning.'.format(TK.OBnumber+1))\n t0 = time.time()\n sInd = None\n occ_sInd = None\n cnt = 0\n\n while not TK.mission_is_over(OS, Obs, det_mode):\n \n # Acquire the NEXT TARGET star index and create DRM\n prev_occ_sInd = occ_sInd\n old_sInd = sInd #used to save sInd if returned sInd is None\n waitTime = None\n DRM, sInd, occ_sInd, t_det, sd, occ_sInds = self.next_target(sInd, occ_sInd, det_mode, char_mode)\n\n true_t_det = t_det*det_mode['timeMultiplier'] + Obs.settlingTime + det_mode['syst']['ohTime']\n if sInd != occ_sInd and sInd is not None:\n assert t_det != 0, \"Integration time can't be 0.\"\n\n if sInd is not None and (TK.currentTimeAbs.copy() + true_t_det) >= self.occ_arrives and occ_sInd != self.last_chard:\n sInd = occ_sInd\n if sInd == occ_sInd:\n self.ready_to_update = True\n\n time2arrive = self.occ_arrives - TK.currentTimeAbs.copy()\n\n if sInd is not None:\n cnt += 1\n\n # clean up revisit list when one occurs to prevent repeats\n if np.any(self.starRevisit) and np.any(np.where(self.starRevisit[:,0] == float(sInd))):\n s_revs = np.where(self.starRevisit[:,0] == float(sInd))[0]\n t_revs = np.where(self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy() < 0*u.d)[0]\n self.starRevisit = np.delete(self.starRevisit, np.intersect1d(s_revs,t_revs),0)\n\n # get the index of the selected target for the extended list\n if TK.currentTimeNorm.copy() > TK.missionLife and self.starExtended.shape[0] == 0:\n for i in range(len(self.DRM)):\n if np.any([x == 1 for x in self.DRM[i]['plan_detected']]):\n self.starExtended = np.hstack((self.starExtended, self.DRM[i]['star_ind']))\n self.starExtended = np.unique(self.starExtended)\n \n # Beginning of observation, start to populate DRM\n DRM['OB_nb'] = TK.OBnumber+1\n DRM['ObsNum'] = cnt\n DRM['star_ind'] = sInd\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int).tolist()\n\n if sInd == occ_sInd:\n # wait until expected arrival time is observed\n if time2arrive > 0*u.d:\n TK.advanceToAbsTime(self.occ_arrives)\n if time2arrive > 1*u.d:\n self.GAtime = self.GAtime + time2arrive.to('day')\n\n TK.obsStart = TK.currentTimeNorm.copy().to('day')\n\n self.logger.info(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n self.vprint(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n\n if sInd != occ_sInd:\n self.starVisits[sInd] += 1\n # PERFORM DETECTION and populate revisit list attribute.\n # First store fEZ, dMag, WA\n if np.any(pInds):\n DRM['det_fEZ'] = SU.fEZ[pInds].to('1/arcsec2').value.tolist()\n DRM['det_dMag'] = SU.dMag[pInds].tolist()\n DRM['det_WA'] = SU.WA[pInds].to('mas').value.tolist()\n detected, det_fZ, det_systemParams, det_SNR, FA = self.observation_detection(sInd, t_det, det_mode)\n\n if np.any(detected):\n self.sInd_detcounts[sInd] += 1\n self.sInd_dettimes[sInd] = (self.sInd_dettimes.get(sInd) or []) + [TK.currentTimeNorm.copy().to('day')]\n self.vprint(' Det. results are: %s'%(detected))\n\n # update GAtime\n self.GAtime = self.GAtime + t_det.to('day')*self.GA_simult_det_fraction\n self.tot_dettime += t_det.to('day')\n\n # populate the DRM with detection results\n DRM['det_time'] = t_det.to('day')\n DRM['det_status'] = detected\n DRM['det_SNR'] = det_SNR\n DRM['det_fZ'] = det_fZ.to('1/arcsec2')\n DRM['det_params'] = det_systemParams\n DRM['FA_det_status'] = int(FA)\n\n det_comp = Comp.comp_per_intTime(t_det, TL, sInd, det_fZ,\n self.ZodiacalLight.fEZ0, self.WAint[sInd], det_mode)[0]\n DRM['det_comp'] = det_comp\n DRM['det_mode'] = dict(det_mode)\n del DRM['det_mode']['inst'], DRM['det_mode']['syst']\n \n elif sInd == occ_sInd:\n self.occ_starVisits[occ_sInd] += 1\n self.last_chard = occ_sInd\n # PERFORM CHARACTERIZATION and populate spectra list attribute.\n occ_pInds = np.where(SU.plan2star == occ_sInd)[0]\n sInd = occ_sInd\n\n DRM['slew_time'] = self.occ_slewTime.to('day').value\n DRM['slew_angle'] = self.occ_sd.to('deg').value\n slew_mass_used = self.occ_slewTime*Obs.defburnPortion*Obs.flowRate\n DRM['slew_dV'] = (self.occ_slewTime*self.ao*Obs.defburnPortion).to('m/s').value\n DRM['slew_mass_used'] = slew_mass_used.to('kg')\n Obs.scMass = Obs.scMass - slew_mass_used\n DRM['scMass'] = Obs.scMass.to('kg')\n if Obs.twotanks:\n Obs.slewMass = Obs.slewMass - slew_mass_used\n DRM['slewMass'] = Obs.slewMass.to('kg')\n\n self.logger.info(' Starshade and telescope aligned at target star')\n self.vprint(' Starshade and telescope aligned at target star')\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_mode)\n if np.any(characterized):\n self.vprint(' Char. results are: %s'%(characterized.T))\n else:\n # make sure we don't accidentally double characterize\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + .01*u.d)\n assert char_intTime != 0, \"Integration time can't be 0.\"\n if np.any(occ_pInds):\n DRM['char_fEZ'] = SU.fEZ[occ_pInds].to('1/arcsec2').value.tolist()\n DRM['char_dMag'] = SU.dMag[occ_pInds].tolist()\n DRM['char_WA'] = SU.WA[occ_pInds].to('mas').value.tolist()\n DRM['char_mode'] = dict(char_mode)\n del DRM['char_mode']['inst'], DRM['char_mode']['syst']\n\n # update the occulter wet mass\n if OS.haveOcculter and char_intTime is not None:\n DRM = self.update_occulter_mass(DRM, sInd, char_intTime, 'char')\n char_comp = Comp.comp_per_intTime(char_intTime, TL, occ_sInd, char_fZ,\n self.ZodiacalLight.fEZ0, self.WAint[occ_sInd], char_mode)[0]\n DRM['char_comp'] = char_comp\n FA = False\n # populate the DRM with characterization results\n DRM['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n #DRM['char_counts'] = self.sInd_charcounts[sInd]\n DRM['char_status'] = characterized[:-1] if FA else characterized\n DRM['char_SNR'] = char_SNR[:-1] if FA else char_SNR\n DRM['char_fZ'] = char_fZ.to('1/arcsec2')\n DRM['char_params'] = char_systemParams\n # populate the DRM with FA results\n DRM['FA_det_status'] = int(FA)\n DRM['FA_char_status'] = characterized[-1] if FA else 0\n DRM['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n DRM['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 if FA else 0./u.arcsec**2\n DRM['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n DRM['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec if FA else 0.*u.arcsec\n\n # add star back into the revisit list\n if np.any(characterized):\n char = np.where(characterized)[0]\n pInds = np.where(SU.plan2star == sInd)[0]\n smin = np.min(SU.s[pInds[char]])\n pInd_smin = pInds[np.argmin(SU.s[pInds[char]])]\n\n Ms = TL.MsTrue[sInd]\n sp = smin\n Mp = SU.Mp[pInd_smin]\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/2.\n\n self.goal_GAtime = self.GA_percentage * TK.currentTimeNorm.copy().to('day')\n goal_GAdiff = self.goal_GAtime - self.GAtime\n\n # allocate extra time to GA if we are falling behind\n if goal_GAdiff > 1*u.d and TK.currentTimeAbs.copy() < self.occ_arrives:\n GA_diff = min(self.occ_arrives - TK.currentTimeAbs.copy(), goal_GAdiff)\n self.vprint('Allocating time %s to general astrophysics'%(GA_diff))\n self.GAtime = self.GAtime + GA_diff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + GA_diff)\n # allocate time if there is no target for the starshade\n elif goal_GAdiff > 1*u.d and (self.occ_arrives - TK.currentTimeAbs.copy()) < -5*u.d and not np.any(occ_sInds):\n self.vprint('No Available Occulter Targets: Allocating time %s to general astrophysics'%(goal_GAdiff))\n self.GAtime = self.GAtime + goal_GAdiff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + goal_GAdiff)\n\n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n # Append result values to self.DRM\n self.DRM.append(DRM)\n\n # Calculate observation end time\n TK.obsEnd = TK.currentTimeNorm.copy().to('day')\n\n # With prototype TimeKeeping, if no OB duration was specified, advance\n # to the next OB with timestep equivalent to time spent on one target\n if np.isinf(TK.OBduration) and (TK.missionPortion < 1):\n self.arbitrary_time_advancement(TK.currentTimeNorm.to('day').copy() - DRM['arrival_time'])\n\n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs,det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMaps,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife.copy()\n TK.currentTimeAbs = TK.missionFinishAbs.copy()\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n \n else:\n dtsim = (time.time()-t0)*u.s\n mission_end = \"Mission complete: no more time available.\\n\"\\\n + \"Simulation duration: %s.\\n\" %dtsim.astype('int')\\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n\n self.logger.info(mission_end)\n self.vprint(mission_end)\n\n return mission_end\n\n\n def promote_coro_targets(self, occ_sInds, sInds):\n \"\"\"\n Determines which coronograph targets to promote to occulter targets\n\n Args:\n occ_sInds (numpy array):\n occulter targets\n sInds (numpy array):\n coronograph targets\n\n Returns:\n occ_sInds (numpy array):\n updated occulter targets\n \"\"\"\n\n TK = self.TimeKeeping\n SU = self.SimulatedUniverse\n TL = self.TargetList\n promoted_occ_sInds = np.array([], dtype=int)\n\n # if phase 1 has ended\n if TK.currentTimeAbs > self.phase1_end:\n if self.is_phase1 is True:\n self.vprint('Entering detection phase 2: target list for occulter expanded')\n self.is_phase1 = False\n # If we only want to promote stars that have planets in the habitable zone\n if self.promote_hz_stars:\n # stars must have had >= n_det_min detections\n promote_stars = sInds[np.where(self.sInd_detcounts[sInds] >= self.n_det_min)[0]]\n if np.any(promote_stars):\n for sInd in promote_stars:\n pInds = np.where(SU.plan2star == sInd)[0]\n sp = SU.s[pInds]\n Ms = TL.MsTrue[sInd]\n Mp = SU.Mp[pInds]\n mu = const.G*(Mp + Ms)\n T = (2.*np.pi*np.sqrt(sp**3/mu)).to('d')\n # star must have detections that span longer than half a period and be in the habitable zone\n # and have a smaller radius that a sub-neptune\n pinds_earthlike = self.is_earthlike(pInds, sInd)\n if (np.any((T/2.0 < (self.sInd_dettimes[sInd][-1] - self.sInd_dettimes[sInd][0]))) and np.any(pinds_earthlike)) \\\n or ((self.sInd_detcounts[sInd] >= self.nmax_promo_det) and np.any(pinds_earthlike)):\n earthlikes = pInds[pinds_earthlike]\n self.known_earths = np.union1d(self.known_earths, pInds[pinds_earthlike]).astype(int)\n promoted_occ_sInds = np.append(promoted_occ_sInds, sInd)\n if sInd not in self.promoted_stars:\n self.promoted_stars.append(sInd)\n occ_sInds = np.union1d(occ_sInds, promoted_occ_sInds)\n else:\n occ_sInds = np.union1d(occ_sInds, sInds[np.where((self.starVisits[sInds] == self.nVisitsMax) & \n (self.occ_starVisits[sInds] == 0))[0]])\n\n occ_sInds = np.union1d(occ_sInds, np.intersect1d(sInds, self.known_rocky))\n self.promoted_stars = list(np.union1d(self.promoted_stars, np.intersect1d(sInds, self.known_rocky)).astype(int))\n return occ_sInds.astype(int)\n\n\n def next_target(self, old_sInd, old_occ_sInd, det_mode, char_mode):\n \"\"\"Finds index of next target star and calculates its integration time.\n \n This method chooses the next target star index based on which\n stars are available, their integration time, and maximum completeness.\n Returns None if no target could be found.\n \n Args:\n old_sInd (integer):\n Index of the previous target star for the telescope\n old_occ_sInd (integer):\n Index of the previous target star for the occulter\n det_mode (dict):\n Selected observing mode for detection\n char_mode (dict):\n Selected observing mode for characterization\n \n Returns:\n DRM (dicts):\n Contains the results of survey simulation\n sInd (integer):\n Index of next target star. Defaults to None.\n occ_sInd (integer):\n Index of next occulter target star. Defaults to None.\n t_det (astropy Quantity):\n Selected star integration time for detection in units of day. \n Defaults to None.\n \n \"\"\"\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n SU = self.SimulatedUniverse\n \n # selecting appropriate koMap\n occ_koMap = self.koMaps[char_mode['syst']['name']]\n koMap = self.koMaps[det_mode['syst']['name']]\n \n # Create DRM\n DRM = {}\n \n # In case of an occulter, initialize slew time factor\n # (add transit time and reduce starshade mass)\n assert OS.haveOcculter == True\n self.ao = Obs.thrust/Obs.scMass\n\n # Star indices that correspond with the given HIPs numbers for the occulter\n # XXX ToDo: print out HIPs that don't show up in TL\n HIP_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]\n if TL.earths_only:\n HIP_sInds = np.union1d(HIP_sInds, self.promoted_stars).astype(int)\n sInd = None\n \n # Now, start to look for available targets\n while not TK.mission_is_over(OS, Obs, det_mode):\n # allocate settling time + overhead time\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n occ_tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n occ_tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n\n # 0 initialize arrays\n slewTimes = np.zeros(TL.nStars)*u.d\n fZs = np.zeros(TL.nStars)/u.arcsec**2\n dV = np.zeros(TL.nStars)*u.m/u.s\n intTimes = np.zeros(TL.nStars)*u.d\n occ_intTimes = np.zeros(TL.nStars)*u.d\n occ_tovisit = np.zeros(TL.nStars, dtype=bool)\n sInds = np.arange(TL.nStars)\n\n # 1 Find spacecraft orbital START positions and filter out unavailable \n # targets. If occulter, each target has its own START position.\n sd = Obs.star_angularSep(TL, old_occ_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, char_mode)\n slewTimes = Obs.calculate_slewTimes(TL, old_occ_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs)\n\n # 2.1 filter out totTimes > integration cutoff\n if len(sInds) > 0:\n occ_sInds = np.intersect1d(self.occ_intTimeFilterInds, sInds)\n if len(sInds) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n \n # Starttimes based off of slewtime\n occ_startTimes = occ_tmpCurrentTimeAbs.copy() + slewTimes\n occ_startTimesNorm = occ_tmpCurrentTimeNorm.copy() + slewTimes\n\n startTimes = tmpCurrentTimeAbs.copy() + np.zeros(TL.nStars)*u.d\n startTimesNorm = tmpCurrentTimeNorm.copy()\n\n # 2.5 Filter stars not observable at startTimes\n try:\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_startTimes[occ_sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds_occ_ko = occ_sInds[tmpIndsbool]\n occ_sInds = sInds_occ_ko[np.where(np.in1d(sInds_occ_ko, HIP_sInds))[0]]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds_occ_ko = np.asarray([],dtype=int)\n occ_sInds = np.asarray([],dtype=int)\n\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(startTimes[sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds = np.asarray([],dtype=int)\n\n # 2.9 Occulter target promotion step\n occ_sInds = self.promote_coro_targets(occ_sInds, sInds_occ_ko)\n\n # 3 Filter out all previously (more-)visited targets, unless in \n # revisit list\n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, TK.currentTimeNorm.copy())\n\n # revisit list, with time after start\n if np.any(occ_sInds):\n occ_tovisit[occ_sInds] = (self.occ_starVisits[occ_sInds] == self.occ_starVisits[occ_sInds].min())\n if self.occ_starRevisit.size != 0:\n dt_rev = TK.currentTimeNorm.copy() - self.occ_starRevisit[:,1]*u.day\n ind_rev = [int(x) for x in self.occ_starRevisit[dt_rev > 0, 0] if x in occ_sInds]\n occ_tovisit[ind_rev] = True\n occ_sInds = np.where(occ_tovisit)[0]\n\n # 4 calculate integration times for ALL preselected targets, \n # and filter out totTimes > integration cutoff\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, det_mode)\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)\n occ_maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n if len(occ_sInds) > 0:\n if self.int_inflection:\n fEZ = ZL.fEZ0\n WA = self.WAint\n occ_intTimes[occ_sInds] = self.calc_int_inflection(occ_sInds, fEZ, occ_startTimes, WA[occ_sInds], char_mode, ischar=True)\n totTimes = occ_intTimes*char_mode['timeMultiplier']\n occ_endTimes = occ_startTimes + totTimes\n else:\n # characterization_start = occ_startTimes\n occ_intTimes[occ_sInds] = self.calc_targ_intTime(occ_sInds, occ_startTimes[occ_sInds], char_mode) * (1 + self.charMargin)\n\n # Adjust integration time for stars with known earths around them\n for occ_star in occ_sInds:\n if occ_star in self.promoted_stars:\n occ_earths = np.intersect1d(np.where(SU.plan2star == occ_star)[0], self.known_earths).astype(int)\n if np.any(occ_earths):\n fZ = ZL.fZ(Obs, TL, occ_star, occ_startTimes[occ_star], char_mode)\n fEZ = SU.fEZ[occ_earths].to('1/arcsec2').value/u.arcsec**2\n if SU.lucky_planets:\n phi = (1/np.pi)*np.ones(len(SU.d))\n dMag = deltaMag(SU.p, SU.Rp, SU.d, phi)[occ_earths] # delta magnitude\n WA = np.arctan(SU.a/TL.dist[SU.plan2star]).to('arcsec')[occ_earths] # working angle\n else:\n dMag = SU.dMag[occ_earths]\n WA = SU.WA[occ_earths]\n\n if np.all((WA < char_mode['IWA']) | (WA > char_mode['OWA'])):\n occ_intTimes[occ_star] = 0.*u.d\n else:\n earthlike_inttimes = OS.calc_intTime(TL, occ_star, fZ, fEZ, dMag, WA, char_mode) * (1 + self.charMargin)\n earthlike_inttime = earthlike_inttimes[(earthlike_inttimes < occ_maxIntTime)]\n if len(earthlike_inttime) > 0:\n occ_intTimes[occ_star] = np.max(earthlike_inttime)\n else:\n occ_intTimes[occ_star] = np.max(earthlike_inttimes)\n occ_endTimes = occ_startTimes + (occ_intTimes * char_mode['timeMultiplier']) + Obs.settlingTime + char_mode['syst']['ohTime']\n\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] <= occ_maxIntTime)] # Filters targets exceeding maximum intTime\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] > 0.0*u.d)] # Filters with an inttime of 0\n \n if occ_maxIntTime.value <= 0:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0:\n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], det_mode)\n sInds = sInds[(intTimes[sInds] <= maxIntTime)] # Filters targets exceeding end of OB\n endTimes = startTimes + intTimes\n \n if maxIntTime.value <= 0:\n sInds = np.asarray([],dtype=int)\n\n # 5.2 find spacecraft orbital END positions (for each candidate target), \n # and filter out unavailable targets\n if len(occ_sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_endTimes[occ_sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n occ_sInds = occ_sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(endTimes[sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n sInds = np.asarray([],dtype=int)\n\n # 5.3 Filter off current occulter target star from detection list\n if old_occ_sInd is not None:\n sInds = sInds[np.where(sInds != old_occ_sInd)]\n occ_sInds = occ_sInds[(occ_sInds != old_occ_sInd)]\n\n # 6.1 Filter off any stars visited by the occulter more than the max number of times\n if np.any(occ_sInds):\n occ_sInds = occ_sInds[(self.occ_starVisits[occ_sInds] < self.occ_max_visits)]\n\n # 6.2 Filter off coronograph stars with too many visits and no detections\n no_dets = np.logical_and((self.starVisits[sInds] > self.n_det_remove), (self.sInd_detcounts[sInds] == 0))\n sInds = sInds[np.where(np.invert(no_dets))[0]]\n\n max_dets = np.where(self.sInd_detcounts[sInds] < self.max_successful_dets)[0]\n sInds = sInds[max_dets]\n\n # 7 Filter off cornograph stars with too-long inttimes\n if self.occ_arrives > TK.currentTimeAbs:\n available_time = self.occ_arrives - TK.currentTimeAbs.copy()\n if np.any(sInds[intTimes[sInds] < available_time]):\n sInds = sInds[intTimes[sInds] < available_time]\n\n # 8 remove occ targets on ignore_stars list\n occ_sInds = np.setdiff1d(occ_sInds, np.intersect1d(occ_sInds, self.ignore_stars))\n\n t_det = 0*u.d\n occ_sInd = old_occ_sInd\n if np.any(sInds):\n # choose sInd of next target\n sInd = self.choose_next_telescope_target(old_sInd, sInds, intTimes[sInds])\n # store relevant values\n t_det = intTimes[sInd]\n\n # 8 Choose best target from remaining\n # if the starshade has arrived at its destination, or it is the first observation\n if np.any(occ_sInds):\n if old_occ_sInd is None or ((TK.currentTimeAbs.copy() + t_det) >= self.occ_arrives and self.ready_to_update):\n occ_sInd = self.choose_next_occulter_target(old_occ_sInd, occ_sInds, occ_intTimes)\n if old_occ_sInd is None:\n self.occ_arrives = TK.currentTimeAbs.copy()\n else:\n self.occ_arrives = occ_startTimes[occ_sInd]\n self.occ_slewTime = slewTimes[occ_sInd]\n self.occ_sd = sd[occ_sInd]\n self.ready_to_update = False\n elif not np.any(sInds):\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + 1*u.d)\n continue\n\n if occ_sInd is not None:\n sInds = sInds[(sInds != occ_sInd)]\n\n if self.tot_det_int_cutoff < self.tot_dettime:\n sInds = np.array([])\n\n if np.any(sInds):\n # choose sInd of next target\n sInd = self.choose_next_telescope_target(old_sInd, sInds, intTimes[sInds])\n # store relevant values\n t_det = intTimes[sInd]\n else:\n sInd = None\n\n # if no observable target, call the TimeKeeping.wait() method\n if not np.any(sInds) and not np.any(occ_sInds):\n self.vprint('No Observable Targets at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n return DRM, None, None, None, None, None\n break\n\n else:\n self.logger.info('Mission complete: no more time available')\n self.vprint( 'Mission complete: no more time available')\n return DRM, None, None, None, None, None\n\n if TK.mission_is_over(OS, Obs, det_mode):\n self.logger.info('Mission complete: no more time available')\n self.vprint( 'Mission complete: no more time available')\n return DRM, None, None, None, None, None\n\n return DRM, sInd, occ_sInd, t_det, sd, occ_sInds\n\n def choose_next_occulter_target(self, old_occ_sInd, occ_sInds, intTimes):\n \"\"\"Choose next target for the occulter based on truncated \n depth first search of linear cost function.\n \n Args:\n old_occ_sInd (integer):\n Index of the previous target star\n occ_sInds (integer array):\n Indices of available targets\n intTimes (astropy Quantity array):\n Integration times for detection in units of day\n \n Returns:\n sInd (integer):\n Index of next target star\n \n \"\"\"\n\n # Choose next Occulter target\n\n Comp = self.Completeness\n TL = self.TargetList\n TK = self.TimeKeeping\n OS = self.OpticalSystem\n\n # reshape sInds, store available top9 sInds\n occ_sInds = np.array(occ_sInds, ndmin=1)\n top_HIPs = self.occHIPs[:self.topstars]\n top_sInds = np.intersect1d(np.where(np.in1d(TL.Name, top_HIPs))[0], occ_sInds)\n\n # current stars have to be in the adjmat\n if (old_occ_sInd is not None) and (old_occ_sInd not in occ_sInds):\n occ_sInds = np.append(occ_sInds, old_occ_sInd)\n\n # get completeness values\n comps = Comp.completeness_update(TL, occ_sInds, self.occ_starVisits[occ_sInds], TK.currentTimeNorm.copy())\n \n # if first target, or if only 1 available target, choose highest available completeness\n nStars = len(occ_sInds)\n if (old_occ_sInd is None) or (nStars == 1):\n occ_sInd = np.random.choice(occ_sInds[comps == max(comps)])\n return occ_sInd\n \n # define adjacency matrix\n A = np.zeros((nStars, nStars))\n\n # consider slew distance when there's an occulter\n r_ts = TL.starprop(occ_sInds, TK.currentTimeAbs.copy())\n u_ts = (r_ts.to('AU').value.T/np.linalg.norm(r_ts.to('AU').value, axis=1)).T\n angdists = np.arccos(np.clip(np.dot(u_ts, u_ts.T), -1, 1))\n A[np.ones((nStars),dtype=bool)] = angdists\n A = self.coeffs[0]*(A)/np.pi\n\n # add factor due to completeness\n A = A + self.coeffs[1]*(1 - comps)\n\n # add factor due to intTime\n intTimes[old_occ_sInd] = np.inf\n A = A + self.coeffs[2]*(intTimes[occ_sInds]/OS.intCutoff)\n\n # add factor for unvisited ramp for deep dive stars\n if np.any(top_sInds):\n # add factor for least visited deep dive stars\n f_uv = np.zeros(nStars)\n u1 = np.in1d(occ_sInds, top_sInds)\n u2 = self.occ_starVisits[occ_sInds]==min(self.occ_starVisits[top_sInds])\n unvisited = np.logical_and(u1, u2)\n f_uv[unvisited] = float(TK.currentTimeNorm.copy()/TK.missionLife.copy())**2\n A = A - self.coeffs[3]*f_uv\n\n self.coeff_data_a3.append([occ_sInds,f_uv])\n\n # add factor for unvisited deep dive stars\n no_visits = np.zeros(nStars)\n #no_visits[u1] = np.ones(len(top_sInds))\n u2 = self.occ_starVisits[occ_sInds]==0\n unvisited = np.logical_and(u1, u2)\n no_visits[unvisited] = 1.\n A = A - self.coeffs[4]*no_visits\n\n self.coeff_data_a4.append([occ_sInds, no_visits])\n self.coeff_time.append(TK.currentTimeNorm.copy().value)\n\n # add factor due to unvisited ramp\n f_uv = np.zeros(nStars)\n unvisited = self.occ_starVisits[occ_sInds]==0\n f_uv[unvisited] = float(TK.currentTimeNorm.copy()/TK.missionLife.copy())**2\n A = A - self.coeffs[5]*f_uv\n\n # add factor due to revisited ramp\n if self.occ_starRevisit.size != 0:\n f2_uv = 1 - (np.in1d(occ_sInds, self.occ_starRevisit[:,0]))\n A = A + self.coeffs[6]*f2_uv\n\n # kill diagonal\n A = A + np.diag(np.ones(nStars)*np.Inf)\n\n # take two traversal steps\n step1 = np.tile(A[occ_sInds==old_occ_sInd,:],(nStars,1)).flatten('F')\n step2 = A[np.array(np.ones((nStars,nStars)),dtype=bool)]\n tmp = np.nanargmin(step1+step2)\n occ_sInd = occ_sInds[int(np.floor(tmp/float(nStars)))]\n\n return occ_sInd\n\n def choose_next_telescope_target(self, old_sInd, sInds, t_dets):\n \"\"\"Choose next telescope target based on star completeness and integration time.\n \n Args:\n old_sInd (integer):\n Index of the previous target star\n sInds (integer array):\n Indices of available targets\n t_dets (astropy Quantity array):\n Integration times for detection in units of day\n \n Returns:\n sInd (integer):\n Index of next target star\n \n \"\"\"\n \n Comp = self.Completeness\n TL = self.TargetList\n TK = self.TimeKeeping\n OS = self.OpticalSystem\n Obs = self.Observatory\n allModes = OS.observingModes\n\n nStars = len(sInds)\n\n # reshape sInds\n sInds = np.array(sInds,ndmin=1)\n\n # 1/ Choose next telescope target\n comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], TK.currentTimeNorm.copy())\n\n # add weight for star revisits\n ind_rev = []\n if self.starRevisit.size != 0:\n dt_rev = self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy()\n ind_rev = [int(x) for x in self.starRevisit[dt_rev < 0*u.d, 0] if x in sInds]\n\n f2_uv = np.where((self.starVisits[sInds] > 0) & (self.starVisits[sInds] < self.nVisitsMax), \n self.starVisits[sInds], 0) * (1 - (np.in1d(sInds, ind_rev, invert=True)))\n\n L = TL.L[sInds]\n l_extreme = max([np.abs(np.log10(np.min(TL.L[sInds]))), np.abs(np.log10(np.max(TL.L[sInds])))])\n if l_extreme == 0.0:\n l_weight = 1\n else:\n l_weight = 1 - np.abs(np.log10(TL.L[sInds])/l_extreme)**self.lum_exp\n\n t_weight = t_dets/np.max(t_dets)\n weights = ((comps + self.revisit_weight*f2_uv/float(self.nVisitsMax))/t_weight)*l_weight\n # weights = (comps + self.revisit_weight*f2_uv/float(self.nVisitsMax))*l_weight\n\n sInd = np.random.choice(sInds[weights == max(weights)])\n\n #Check if exoplanetObsTime would be exceeded\n mode = list(filter(lambda mode: mode['detectionMode'] == True, allModes))[0]\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode)\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife)#Maximum intTime allowed\n intTimes2 = self.calc_targ_intTime(np.array([sInd]), TK.currentTimeAbs.copy(), mode)\n if intTimes2 > maxIntTime: # check if max allowed integration time would be exceeded\n self.vprint('max allowed integration time would be exceeded')\n sInd = None\n waitTime = 1.*u.d\n return sInd\n\n\n def calc_int_inflection(self, t_sInds, fEZ, startTime, WA, mode, ischar=False):\n \"\"\"Calculate integration time based on inflection point of Completeness as a function of int_time\n \n Args:\n t_sInds (integer array):\n Indices of the target stars\n fEZ (astropy Quantity array):\n Surface brightness of exo-zodiacal light in units of 1/arcsec2\n startTime (astropy Quantity array):\n Surface brightness of local zodiacal light in units of 1/arcsec2\n WA (astropy Quantity):\n Working angle of the planet of interest in units of arcsec\n mode (dict):\n Selected observing mode\n\n Returns:\n int_times (astropy quantity array):\n The suggested integration time\n \n \"\"\"\n\n Comp = self.Completeness\n TL = self.TargetList\n ZL = self.ZodiacalLight\n Obs = self.Observatory\n\n num_points = 500\n intTimes = np.logspace(-5, 2, num_points)*u.d\n sInds = np.arange(TL.nStars)\n WA = self.WAint # don't use WA input because we don't know planet positions before characterization\n curve = np.zeros([1, sInds.size, intTimes.size])\n\n Cpath = os.path.join(Comp.classpath, Comp.filename+'.fcomp')\n\n # if no preexisting curves exist, either load from file or calculate\n if self.curves is None:\n if os.path.exists(Cpath):\n self.vprint( 'Loading cached completeness file from \"{}\".'.format(Cpath))\n with open(Cpath, 'rb') as cfile:\n curves = pickle.load(cfile)\n self.vprint( 'Completeness curves loaded from cache.')\n else:\n # calculate completeness curves for all sInds\n self.vprint( 'Cached completeness file not found at \"{}\".'.format(Cpath))\n self.vprint( 'Beginning completeness curve calculations.')\n curves = {}\n for t_i, t in enumerate(intTimes):\n fZ = ZL.fZ(Obs, TL, sInds, startTime, mode)\n # curves[0,:,t_i] = OS.calc_dMag_per_intTime(t, TL, sInds, fZ, fEZ, WA, mode)\n curve[0,:,t_i] = Comp.comp_per_intTime(t, TL, sInds, fZ, fEZ, WA, mode)\n curves[mode['systName']] = curve\n with open(Cpath, 'wb') as cfile:\n pickle.dump(curves, cfile)\n self.vprint( 'completeness curves stored in {}'.format(Cpath))\n\n self.curves = curves\n\n # if no curves for current mode\n if mode['systName'] not in self.curves.keys() or TL.nStars != self.curves[mode['systName']].shape[1]:\n for t_i, t in enumerate(intTimes):\n fZ = ZL.fZ(Obs, TL, sInds, startTime, mode)\n curve[0,:,t_i] = Comp.comp_per_intTime(t, TL, sInds, fZ, fEZ, WA, mode)\n\n self.curves[mode['systName']] = curve\n with open(Cpath, 'wb') as cfile:\n pickle.dump(self.curves, cfile)\n self.vprint( 'recalculated completeness curves stored in {}'.format(Cpath))\n\n int_times = np.zeros(len(t_sInds))*u.d\n for i, sInd in enumerate(t_sInds):\n c_v_t = self.curves[mode['systName']][0,sInd,:]\n dcdt = np.diff(c_v_t)/np.diff(intTimes)\n\n # find the inflection point of the completeness graph\n if ischar is False:\n target_point = max(dcdt).value + 10*np.var(dcdt).value\n idc = np.abs(dcdt - target_point/(1*u.d)).argmin()\n int_time = intTimes[idc]\n int_time = int_time*self.starVisits[sInd]\n\n # update star completeness\n idx = (np.abs(intTimes - int_time)).argmin()\n comp = c_v_t[idx]\n TL.comp[sInd] = comp\n else:\n idt = np.abs(intTimes - max(intTimes)).argmin()\n idx = np.abs(c_v_t - c_v_t[idt]*.9).argmin()\n\n # idx = np.abs(comps - max(comps)*.9).argmin()\n int_time = intTimes[idx]\n comp = c_v_t[idx]\n\n int_times[i] = int_time\n\n int_times[int_times<2.000e-5*u.d] = 0.0 *u.d\n return int_times\n\n\n def observation_characterization(self, sInd, mode):\n \"\"\"Finds if characterizations are possible and relevant information\n \n Args:\n sInd (integer):\n Integer index of the star of interest\n mode (dict):\n Selected observing mode for characterization\n \n Returns:\n characterized (integer list):\n Characterization status for each planet orbiting the observed \n target star including False Alarm if any, where 1 is full spectrum, \n -1 partial spectrum, and 0 not characterized\n fZ (astropy Quantity):\n Surface brightness of local zodiacal light in units of 1/arcsec2\n systemParams (dict):\n Dictionary of time-dependant planet properties averaged over the \n duration of the integration\n SNR (float ndarray):\n Characterization signal-to-noise ratio of the observable planets. \n Defaults to None.\n intTime (astropy Quantity):\n Selected star characterization time in units of day. Defaults to None.\n \"\"\"\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n \n # selecting appropriate koMap\n koMap = self.koMaps[mode['syst']['name']]\n\n # find indices of planets around the target\n pInds = np.where(SU.plan2star == sInd)[0]\n pinds_earthlike = np.array([])\n det = np.ones(pInds.size, dtype=bool)\n fEZs = SU.fEZ[pInds].to('1/arcsec2').value\n dMags = SU.dMag[pInds]\n WAs = SU.WA[pInds].to('arcsec').value\n\n FA = (det.size == pInds.size + 1)\n if FA == True:\n pIndsDet = np.append(pInds, -1)[det]\n else:\n pIndsDet = pInds[det]\n\n # initialize outputs, and check if any planet to characterize\n characterized = np.zeros(det.size, dtype=int)\n fZ = 0./u.arcsec**2\n systemParams = SU.dump_system_params(sInd) # write current system params by default\n SNR = np.zeros(len(det))\n intTime = None\n if len(det) == 0: # nothing to characterize\n HIP_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]\n if sInd in HIP_sInds:\n startTime = TK.currentTimeAbs.copy()\n startTimeNorm = TK.currentTimeNorm.copy()\n intTime = self.calc_targ_intTime(np.array([sInd]), startTime, mode)[0]\n extraTime = intTime*(mode['timeMultiplier'] - 1.)#calculates extraTime\n # add a predetermined margin to the integration times\n intTime = intTime*(1 + self.charMargin)\n # apply time multiplier\n totTime = intTime*(mode['timeMultiplier'])\n # end times\n endTimes = startTime + totTime\n endTimesNorm = startTimeNorm + totTime\n # planets to characterize\n tochar = ((totTime > 0) & (totTime <= OS.intCutoff) & \n (endTimesNorm <= TK.OBendTimes[TK.OBnumber]))\n success = TK.allocate_time(intTime + extraTime + mode['syst']['ohTime'] + Obs.settlingTime, True)#allocates time\n if success == False or not tochar:\n intTime = None\n if sInd not in self.sInd_charcounts.keys():\n self.sInd_charcounts[sInd] = characterized\n return characterized, fZ, systemParams, SNR, intTime\n\n # look for last detected planets that have not been fully characterized\n if (FA == False): # only true planets, no FA\n tochar = (self.fullSpectra[pIndsDet] != -2)\n else: # mix of planets and a FA\n truePlans = pIndsDet[:-1]\n tochar = np.append((self.fullSpectra[truePlans] == 0), True)\n\n # 1/ find spacecraft orbital START position and check keepout angle\n if np.any(tochar):\n # start times\n startTime = TK.currentTimeAbs.copy()\n startTimeNorm = TK.currentTimeNorm.copy()\n # planets to characterize\n koTimeInd = np.where(np.round(startTime.value)-self.koTimes.value==0)[0][0] # find indice where koTime is startTime[0]\n # wherever koMap is 1, the target is observable\n tochar[tochar] = koMap[sInd][koTimeInd]\n\n # 2/ if any planet to characterize, find the characterization times\n if np.any(tochar):\n # propagate the whole system to match up with current time\n # calculate characterization times at the detected fEZ, dMag, and WA\n pinds_earthlike = np.logical_and(np.array([(p in self.known_earths) for p in pIndsDet]), tochar)\n\n fZ = ZL.fZ(Obs, TL, sInd, startTime, mode)\n fEZ = fEZs[tochar]/u.arcsec**2\n WAp = self.WAint[sInd]*np.ones(len(tochar))\n dMag = self.dMagint[sInd]*np.ones(len(tochar))\n\n # if lucky_planets, use lucky planet params for dMag and WA\n if SU.lucky_planets:\n phi = (1/np.pi)*np.ones(len(SU.d))\n e_dMag = deltaMag(SU.p, SU.Rp, SU.d, phi) # delta magnitude\n e_WA = np.arctan(SU.a/TL.dist[SU.plan2star]).to('arcsec')# working angle\n else:\n e_dMag = SU.dMag\n e_WA = SU.WA\n WAp[pinds_earthlike[tochar]] = e_WA[pIndsDet[pinds_earthlike]]\n dMag[pinds_earthlike[tochar]] = e_dMag[pIndsDet[pinds_earthlike]]\n\n intTimes = np.zeros(len(tochar))*u.day\n if self.int_inflection:\n for i,j in enumerate(WAp):\n if tochar[i]:\n intTimes[i] = self.calc_int_inflection([sInd], fEZ[i], startTime, j, mode, ischar=True)[0]\n else:\n intTimes[tochar] = OS.calc_intTime(TL, sInd, fZ, fEZ, dMag, WAp, mode)\n\n # add a predetermined margin to the integration times\n intTimes = intTimes*(1 + self.charMargin)\n # apply time multiplier\n totTimes = intTimes*(mode['timeMultiplier'])\n # end times\n endTimes = startTime + totTimes\n endTimesNorm = startTimeNorm + totTimes\n # planets to characterize\n tochar = ((totTimes > 0) & (totTimes <= OS.intCutoff) & \n (endTimesNorm <= TK.OBendTimes[TK.OBnumber]))\n\n # 3/ is target still observable at the end of any char time?\n if np.any(tochar) and Obs.checkKeepoutEnd:\n koTimeInds = np.zeros(len(endTimes.value[tochar]),dtype=int)\n\n # find index in koMap where each endTime is closest to koTimes\n for t,endTime in enumerate(endTimes.value[tochar]):\n if endTime > self.koTimes.value[-1]:\n # case where endTime exceeds largest koTimes element\n endTimeInBounds = np.where(np.floor(endTime)-self.koTimes.value==0)[0]\n koTimeInds[t] = endTimeInBounds[0] if endTimeInBounds.size is not 0 else -1\n else:\n koTimeInds[t] = np.where(np.round(endTime)-self.koTimes.value==0)[0][0] # find indice where koTime is endTimes[0]\n tochar[tochar] = [koMap[sInd][koT] if koT >= 0 else 0 for koT in koTimeInds]\n\n # 4/ if yes, perform the characterization for the maximum char time\n if np.any(tochar):\n #Save Current Time before attempting time allocation\n currentTimeNorm = TK.currentTimeNorm.copy()\n currentTimeAbs = TK.currentTimeAbs.copy()\n\n if np.any(np.logical_and(pinds_earthlike, tochar)):\n intTime = np.max(intTimes[np.logical_and(pinds_earthlike, tochar)])\n else:\n intTime = np.max(intTimes[tochar])\n extraTime = intTime*(mode['timeMultiplier'] - 1.)#calculates extraTime\n success = TK.allocate_time(intTime + extraTime + mode['syst']['ohTime'] + Obs.settlingTime, True)#allocates time\n if success == False: #Time was not successfully allocated\n #Identical to when \"if char_mode['SNR'] not in [0, np.inf]:\" in run_sim()\n char_intTime = None\n lenChar = len(pInds) + 1 if FA else len(pInds)\n characterized = np.zeros(lenChar, dtype=float)\n char_SNR = np.zeros(lenChar, dtype=float)\n char_fZ = 0./u.arcsec**2\n char_systemParams = SU.dump_system_params(sInd)\n return characterized, char_fZ, char_systemParams, char_SNR, char_intTime\n\n pIndsChar = pIndsDet[tochar]\n log_char = ' - Charact. planet(s) %s (%s/%s detected)'%(pIndsChar, \n len(pIndsChar), len(pIndsDet))\n self.logger.info(log_char)\n self.vprint(log_char)\n\n # SNR CALCULATION:\n # first, calculate SNR for observable planets (without false alarm)\n planinds = pIndsChar[:-1] if pIndsChar[-1] == -1 else pIndsChar\n SNRplans = np.zeros(len(planinds))\n if len(planinds) > 0:\n # initialize arrays for SNR integration\n fZs = np.zeros(self.ntFlux)/u.arcsec**2\n systemParamss = np.empty(self.ntFlux, dtype='object')\n Ss = np.zeros((self.ntFlux, len(planinds)))\n Ns = np.zeros((self.ntFlux, len(planinds)))\n # integrate the signal (planet flux) and noise\n dt = intTime/float(self.ntFlux)\n timePlus = Obs.settlingTime.copy() + mode['syst']['ohTime'].copy()#accounts for the time since the current time\n for i in range(self.ntFlux):\n # calculate signal and noise (electron count rates)\n if SU.lucky_planets:\n fZs[i] = ZL.fZ(Obs, TL, sInd, currentTimeAbs, mode)[0]\n Ss[i,:], Ns[i,:] = self.calc_signal_noise(sInd, planinds, dt, mode, \n fZ=fZs[i])\n # allocate first half of dt\n timePlus += dt/2.\n # calculate current zodiacal light brightness\n fZs[i] = ZL.fZ(Obs, TL, sInd, currentTimeAbs + timePlus, mode)[0]\n # propagate the system to match up with current time\n SU.propag_system(sInd, currentTimeNorm + timePlus - self.propagTimes[sInd])\n self.propagTimes[sInd] = currentTimeNorm + timePlus\n # save planet parameters\n systemParamss[i] = SU.dump_system_params(sInd)\n # calculate signal and noise (electron count rates)\n if not SU.lucky_planets:\n Ss[i,:], Ns[i,:] = self.calc_signal_noise(sInd, planinds, dt, mode, \n fZ=fZs[i])\n # allocate second half of dt\n timePlus += dt/2.\n\n # average output parameters\n fZ = np.mean(fZs)\n systemParams = {key: sum([systemParamss[x][key]\n for x in range(self.ntFlux)])/float(self.ntFlux)\n for key in sorted(systemParamss[0])}\n # calculate planets SNR\n S = Ss.sum(0)\n N = Ns.sum(0)\n SNRplans[N > 0] = S[N > 0]/N[N > 0]\n # allocate extra time for timeMultiplier\n\n # if only a FA, just save zodiacal brightness in the middle of the integration\n else:\n totTime = intTime*(mode['timeMultiplier'])\n fZ = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs.copy() + totTime/2., mode)[0]\n\n # calculate the false alarm SNR (if any)\n SNRfa = []\n if pIndsChar[-1] == -1:\n fEZ = fEZs[-1]/u.arcsec**2\n dMag = dMags[-1]\n WA = WAs[-1]*u.arcsec\n C_p, C_b, C_sp = OS.Cp_Cb_Csp(TL, sInd, fZ, fEZ, dMag, WA, mode)\n S = (C_p*intTime).decompose().value\n N = np.sqrt((C_b*intTime + (C_sp*intTime)**2).decompose().value)\n SNRfa = S/N if N > 0 else 0.\n\n # save all SNRs (planets and FA) to one array\n SNRinds = np.where(det)[0][tochar]\n SNR[SNRinds] = np.append(SNRplans, SNRfa)\n\n # now, store characterization status: 1 for full spectrum, \n # -1 for partial spectrum, 0 for not characterized\n char = (SNR >= mode['SNR'])\n # initialize with full spectra\n characterized = char.astype(int)\n WAchar = WAs[char]*u.arcsec\n # find the current WAs of characterized planets\n WA = WAs*u.arcsec\n if FA:\n WAs = np.append(WAs, WAs[-1]*u.arcsec)\n\n all_full = np.copy(characterized)\n all_full[char] = 1\n if sInd not in self.sInd_charcounts.keys():\n self.sInd_charcounts[sInd] = all_full\n else:\n self.sInd_charcounts[sInd] = self.sInd_charcounts[sInd] + all_full\n # encode results in spectra lists (only for planets, not FA)\n charplans = characterized[:-1] if FA else characterized\n self.fullSpectra[pInds[charplans == 1]] += 1\n self.partialSpectra[pInds[charplans == -1]] += 1\n\n # in both cases (detection or false alarm), schedule a revisit \n smin = np.min(SU.s[pInds[det]])\n Ms = TL.MsTrue[sInd]\n\n # if target in promoted_stars list, schedule revisit based off of semi-major axis\n if sInd in self.promoted_stars:\n sp = np.min(SU.a[pInds[det]]).to('AU')\n if np.any(det):\n pInd_smin = pInds[det][np.argmin(SU.a[pInds[det]])]\n Mp = SU.Mp[pInd_smin]\n else:\n Mp = SU.Mp.mean()\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/3.\n # otherwise schedule revisit based off of seperation\n elif smin is not None:\n sp = smin\n if np.any(det):\n pInd_smin = pInds[det][np.argmin(SU.s[pInds[det]])]\n Mp = SU.Mp[pInd_smin]\n else:\n Mp = SU.Mp.mean()\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/2.\n # otherwise, revisit based on average of population semi-major axis and mass\n else:\n sp = SU.s.mean()\n Mp = SU.Mp.mean()\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + 0.75*T\n\n # finally, populate the revisit list (NOTE: sInd becomes a float)\n revisit = np.array([sInd, t_rev.to('day').value])\n if self.occ_starRevisit.size == 0:\n self.occ_starRevisit = np.array([revisit])\n else:\n revInd = np.where(self.occ_starRevisit[:,0] == sInd)[0]\n if revInd.size == 0:\n self.occ_starRevisit = np.vstack((self.occ_starRevisit, revisit))\n else:\n self.occ_starRevisit[revInd, 1] = revisit[1]\n\n # add stars to filter list\n if np.any(characterized.astype(int) == 1):\n top_HIPs = self.occHIPs[:self.topstars]\n\n # if a top star has had max_successful_chars remove from list\n if np.any(self.sInd_charcounts[sInd] >= self.max_successful_chars):\n self.ignore_stars.append(sInd)\n\n # if a promoted star has an earthlike char, then ignore\n # if sInd in self.promoted_stars:\n # c_plans = pInds[charplans == 1]\n # if np.any(np.logical_and((SU.a[c_plans] > .95*u.AU),(SU.a[c_plans] < 1.67*u.AU))):\n # if np.any((.8*(SU.a[c_plans]**-.5).value < SU.Rp[c_plans].value) & (SU.Rp[c_plans].value < 1.4)):\n # self.ignore_stars.append(sInd)\n\n return characterized.astype(int), fZ, systemParams, SNR, intTime\n\n\n def revisitFilter(self, sInds, tmpCurrentTimeNorm):\n \"\"\"Helper method for Overloading Revisit Filtering\n\n Args:\n sInds - indices of stars still in observation list\n tmpCurrentTimeNorm (MJD) - the simulation time after overhead was added in MJD form\n Returns:\n sInds - indices of stars still in observation list\n \"\"\"\n tovisit = np.zeros(self.TargetList.nStars, dtype=bool)#tovisit is a boolean array containing the \n if len(sInds) > 0:#so long as there is at least 1 star left in sInds\n tovisit[sInds] = ((self.starVisits[sInds] == min(self.starVisits[sInds])) \\\n & (self.starVisits[sInds] < self.nVisitsMax))# Checks that no star has exceeded the number of revisits\n if self.starRevisit.size != 0:#There is at least one revisit planned in starRevisit\n dt_rev = self.starRevisit[:,1]*u.day - tmpCurrentTimeNorm#absolute temporal spacing between revisit and now.\n\n #return indices of all revisits within a threshold dt_max of revisit day and indices of all revisits with no detections past the revisit time\n ind_rev2 = [int(x) for x in self.starRevisit[dt_rev < 0*u.d, 0] if (x in sInds)]\n tovisit[ind_rev2] = (self.starVisits[ind_rev2] < self.nVisitsMax)\n sInds = np.where(tovisit)[0]\n\n return sInds\n\n\n def scheduleRevisit(self, sInd, smin, det, pInds):\n \"\"\"A Helper Method for scheduling revisits after observation detection\n\n Args:\n sInd - sInd of the star just detected\n smin - minimum separation of the planet to star of planet just detected\n det - \n pInds - Indices of planets around target star\n Return:\n updates self.starRevisit attribute\n \"\"\"\n TK = self.TimeKeeping\n TL = self.TargetList\n SU = self.SimulatedUniverse\n # in both cases (detection or false alarm), schedule a revisit \n # based on minimum separation\n Ms = TL.MsTrue[sInd]\n if smin is not None and np.nan not in smin: #smin is None if no planet was detected\n sp = smin\n if np.any(det):\n pInd_smin = pInds[det][np.argmin(SU.s[pInds[det]])]\n Mp = SU.Mp[pInd_smin]\n else:\n Mp = SU.Mp.mean()\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/2.\n # otherwise, revisit based on average of population semi-major axis and mass\n else:\n sp = SU.s.mean()\n Mp = SU.Mp.mean()\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + 0.75*T\n # if no detections then schedule revisit based off of revisit_wait\n t_rev = TK.currentTimeNorm.copy() + self.revisit_wait[sInd]\n # finally, populate the revisit list (NOTE: sInd becomes a float)\n revisit = np.array([sInd, t_rev.to('day').value])\n if self.starRevisit.size == 0:#If starRevisit has nothing in it\n self.starRevisit = np.array([revisit])#initialize sterRevisit\n else:\n revInd = np.where(self.starRevisit[:,0] == sInd)[0]#indices of the first column of the starRevisit list containing sInd \n if revInd.size == 0:\n self.starRevisit = np.vstack((self.starRevisit, revisit))\n else:\n self.starRevisit[revInd,1] = revisit[1]#over\n\n"
] |
[
[
"numpy.random.uniform",
"numpy.array",
"numpy.mean",
"numpy.ones"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.arctan",
"numpy.asarray",
"numpy.in1d",
"numpy.vstack",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.nanargmin",
"numpy.mean",
"numpy.any",
"numpy.argmin",
"numpy.var",
"numpy.where",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.intersect1d",
"numpy.copy",
"numpy.diff",
"numpy.zeros",
"numpy.invert",
"numpy.min",
"numpy.logspace",
"numpy.union1d",
"numpy.append",
"numpy.log10",
"numpy.floor",
"numpy.logical_and",
"numpy.array",
"numpy.abs",
"numpy.linalg.norm",
"numpy.tile",
"numpy.ones",
"numpy.isinf",
"numpy.empty"
]
] |
lidongyv/3dndiv
|
[
"ccd7cf75963abf861a49443323585fc0ef08c229"
] |
[
"auxiliary/surface_loss.py"
] |
[
"# -*- coding: utf-8 -*- \r\n#@Author: Lidong Yu \r\n#@Date: 2019-11-18 20:53:24 \r\n#@Last Modified by: Lidong Yu \r\n#@Last Modified time: 2019-11-18 21:44:1\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport os\r\nimport time\r\n\r\ndef compute_pairwise_distance(x):\r\n\t''' computation of pairwise distance matrix\r\n\t---- Input\r\n\t- x: input tensor\t(sample_number,2)\r\n\t---- Return\r\n\t- matrix: output matrix\ttorch.Tensor [sample_number,sample_number]\r\n\t'''\r\n\ty=x\r\n\txx=torch.sum(torch.pow(x,2),dim=1)\r\n\tyy=torch.sum(torch.pow(y,2),dim=1)\r\n\txy=torch.matmul(x,y.transpose(1,0))\r\n\r\n\txx=xx.unsqueeze(0).expand_as(xy)\r\n\tyy=yy.unsqueeze(0).expand_as(xy)\r\n\tdist=xx.transpose(1,0)+yy-2*xy\r\n\treturn torch.clamp(dist,min=1e-6)\r\n\t# if len(x.shape) == 2:\r\n\t# \tmatrix = torch.norm(x[:,None,:] - x[None,:,:], p = 2, dim = 2)\r\n\t# elif len(x.shape) == 3:\r\n\t# \tmatrix = torch.norm(x[:,:,None,:] - x[:,None,:,:], p = 2, dim = 3)\r\n\t# #be attention i do not use the norm\r\n\t# return matrix\r\n\r\ndef middle_position(i,j,size):\r\n\t#current is just the simplest version\r\n\t#u can try to add more middle steps then\r\n\tpi=np.array([i//size,i%size])\r\n\tpj=np.array([j//size,j%size])\r\n\tif pi[1]>pj[1]:\r\n\t\tpj+=pi\r\n\t\tpi=pj-pi\r\n\t\tpj=pj-pi\r\n\tif pi[0]>pj[0]:\r\n\t\treturn pi[0]*size+pj[1]\r\n\telse:\r\n\t\treturn pj[0]*size+pi[1]\r\n\r\ndef init_middle(x,size):\r\n\tpos=[]\r\n\tif os.path.exists('./auxiliary/sample/size%s.npy'%(str(size))):\r\n\t\tpos=np.load('./auxiliary/sample/size%s.npy'%(str(size)))\r\n\telse:\r\n\t\tfor i in range(x.shape[0]):\r\n\t\t\tfor j in range(x.shape[0]):\r\n\t\t\t\tmiddle=middle_position(i,j,size)\r\n\t\t\t\tpos.append([i,middle,j])\r\n\t\tnp.save('./auxiliary/sample/size%s.npy'%(str(size)),pos)\r\n\treturn np.array(pos)\r\ndef compute_norm_pairwise_distance(x,mid_pos):\r\n\t''' computation of normalized pairwise distance matrix\r\n\t---- Input\r\n\t- x: input tensor\ttorch.Tensor (sample_number,2)\r\n\t---- Return\r\n\t- matrix: output matrix\ttorch.Tensor [sample_num, sample_num]\r\n\t'''\r\n\r\n\tx_pair_dist = compute_pairwise_distance(x).view(-1)\r\n\t# size=np.sqrt(x.shape[0])\r\n\t# connection=torch.zeros_like(x_pair_dist)\r\n\t# only compute the pair on the grid, usless\r\n\t# for i in range(x.shape[0]):\r\n\t# \tfor j in range(x.shape[0]):\r\n\t# \t\tif i//size==j//size or i%size==j%size:\r\n\t# \t\t\tconnection=1\r\n\t# dist_straight=x_pair_dist*connection\r\n\tsurface_dist=torch.zeros_like(x_pair_dist)\r\n\t#2s for this, too slow\r\n\t# for i in range(x.shape[0]):\r\n\t# \tfor j in range(x.shape[0]):\r\n\t# \t\tmiddle=torch.tensor(middle_position(i,j,size)).to(x.device).long()\r\n\t\t\t# surface_dist[i,j]=x_pair_dist[i,middle]+x_pair_dist[middle,j]\r\n\tsurface_dist=x_pair_dist[mid_pos[:,:2]]+x_pair_dist[mid_pos[:,1:]]\r\n\tnormalizer = torch.sum(surface_dist, dim = -1,keepdim=True)\r\n\tx_norm_pair_dist = surface_dist / (normalizer + 1e-12).detach()\r\n\r\n\r\n\t# x_pair_dist = compute_pairwise_distance(x)\r\n\t# normalizer = torch.sum(x_pair_dist, dim = -1)\r\n\t# x_norm_pair_dist = x_pair_dist / (normalizer[...,None] + 1e-12).detach()\r\n\r\n\treturn x_norm_pair_dist\r\n\r\ndef NDiv_loss_surface(x, y,mid_pos, alpha=1,mode=2):\r\n\t''' NDiv loss function.\r\n\t---- Input\r\n\t- x: (sample_number,2)\r\n\t#x is the 2d grid, the shortest path the min 2d\r\n\t- y: (sample_number,3)\r\n\t#y is the 3d points, the corresponidng to 2d is set by index\r\n\t- loss: normalized diversity loss.\r\n\t'''\r\n\t#original ndiv 0.00043s\r\n\t#costum ndiv 0.00063s\r\n\t#surface ndiv 2.3s\r\n\t#speed up 0.002s\r\n\ta=time.time()\r\n\tx=x.view(-1,2)\r\n\ty=y.view(-1,3)\r\n\tsize=2/np.sqrt(x.shape[0])\r\n\t# mid_pos=init_middle(x,size)\r\n\tS = x.shape[0]\r\n\tx_norm_pair_dist = compute_norm_pairwise_distance(x,mid_pos)\r\n\ty_norm_pair_dist = compute_norm_pairwise_distance(y,mid_pos)\r\n\t\r\n\tif mode==-1:\r\n\t\treturn 0*torch.mean(x)\r\n\tif mode==0:\r\n\t\tndiv_loss_matrix = torch.abs(x_norm_pair_dist - y_norm_pair_dist)\r\n\tif mode==1:\r\n\t\tndiv_loss_matrix = F.relu(y_norm_pair_dist-x_norm_pair_dist * alpha )\r\n\tif mode==2:\r\n\t\tndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)\r\n\tif mode==3:\r\n\t\tndiv_loss_matrix =torch.clamp(torch.abs(x_norm_pair_dist - y_norm_pair_dist),min=0.1*size)\r\n\tif mode==4:\r\n\t\tndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)\r\n\tndiv_loss = ndiv_loss_matrix.sum(-1).sum(-1) / (S * (S - 1))\r\n\tprint(time.time()-a)\r\n\treturn ndiv_loss\r\n\r\nif __name__ == '__main__':\r\n\tx=torch.rand(100,2)\r\n\ty=torch.rand(100,3)\r\n\tloss=NDiv_loss_surface(x,y)\r\n"
] |
[
[
"torch.abs",
"torch.mean",
"numpy.sqrt",
"torch.sum",
"torch.zeros_like",
"torch.nn.functional.relu",
"torch.rand",
"torch.clamp",
"numpy.array",
"torch.pow"
]
] |
Wang518hongyu/PyGEM
|
[
"1c9fa133133b3d463b1383d4792c535fa61c5b8d"
] |
[
"run_preprocessing.py"
] |
[
"\"\"\"\npygemfxns_preprocessing.py is a list of the model functions that are used to preprocess the data into the proper format.\n\n\"\"\"\n\n# Built-in libraries\nimport os\nimport glob\nimport argparse\n# External libraries\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\nimport netCDF4 as nc\nfrom time import strftime\nfrom datetime import datetime\nfrom scipy.spatial.distance import cdist\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\n# Local libraries\nimport pygem.pygem_input as pygem_prms\nimport pygemfxns_modelsetup as modelsetup\nimport pygemfxns_massbalance as massbalance\nimport class_climate\nfrom analyze_mcmc import load_glacierdata_byglacno\n\n\n#%% TO-DO LIST:\n# - clean up create lapse rate input data (put it all in pygem_prms.py)\n\n#%%\ndef getparser():\n \"\"\"\n Use argparse to add arguments from the command line\n \n Parameters\n ----------\n option_createlapserates : int\n Switch for processing lapse rates (default = 0 (no))\n option_wgms : int\n Switch for processing wgms data (default = 0 (no))\n \n Returns\n -------\n Object containing arguments and their respective values.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"select pre-processing options\")\n # add arguments\n parser.add_argument('-option_createlapserates', action='store', type=int, default=0,\n help='option to create lapse rates or not (1=yes, 0=no)')\n parser.add_argument('-option_createtempstd', action='store', type=int, default=0,\n help='option to create temperature std of daily data or not (1=yes, 0=no)')\n parser.add_argument('-option_wgms', action='store', type=int, default=0,\n help='option to pre-process wgms data (1=yes, 0=no)')\n parser.add_argument('-option_coawstmerge', action='store', type=int, default=0,\n help='option to merge COAWST climate data products (1=yes, 0=no)')\n parser.add_argument('-option_mbdata_fillwregional', action='store', type=int, default=0,\n help='option to fill in missing mass balance data with regional mean and std (1=yes, 0=no)')\n parser.add_argument('-option_frontalablation_cal', action='store', type=int, default=0,\n help='option to calibrate frontal ablation for a glacier')\n parser.add_argument('-option_farinotti2019_input', action='store', type=int, default=0,\n help='option to produce Farinotti 2019 input products (1=yes, 0=no)')\n parser.add_argument('-option_mbdata_regional', action='store', type=int, default=0,\n help='option to analzye mass balance data from various sources (1=yes, 0=no)')\n parser.add_argument('-option_unh_climatedata', action='store', type=int, default=0,\n help='option to pre-process UNH climate data into standard form (1=yes, 0=no)')\n parser.add_argument('-option_regional_meltfactors', action='store', type=int, default=0,\n help='option to produce regional meltfactors consistent with hypsometry data (1=yes, 0=no)')\n return parser\n\nparser = getparser()\nargs = parser.parse_args()\n\n#%%\n#rgi_regionsO1 = [13,14,15]\n#main_glac_rgi_all = pd.DataFrame()\n#for region in rgi_regionsO1:\n# main_glac_rgi_region = modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2='all', \n# rgi_glac_number='all')\n# main_glac_rgi_all = main_glac_rgi_all.append(main_glac_rgi_region)\n\n\n#%%\nif args.option_mbdata_regional == 1:\n option_alaska = 0\n option_iceland = 0\n option_svalbard = 0\n option_russianarctic = 0\n option_andes = 0\n \n option_wgms = 1\n \n if option_wgms == 1:\n print('HERE!')\n ds_wgms = pd.read_csv(pygem_prms.wgms_fp + pygem_prms.wgms_d_fn_preprocessed)\n ds_wgms = ds_wgms.sort_values('RGIId', ascending=True)\n ds_wgms.reset_index(drop=True, inplace=True)\n ds_wgms['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds_wgms.RGIId.values]\n ds_wgms['glacno'] = [x.split('-')[1] for x in ds_wgms.RGIId.values]\n region_list = sorted(list(ds_wgms.RegO1.unique()))\n \n print('here2')\n \n for region in region_list:\n print(region)\n ds_region = ds_wgms.loc[np.where(ds_wgms.RegO1 == region)[0]]\n glacno_list_wgms = sorted(list(ds_region.glacno.unique()))\n print('here 3')\n main_glac_rgi_wgms = load_glacierdata_byglacno(glacno_list_wgms, option_loadhyps_climate=0, \n option_loadcal_data=0)\n \n print('Region ' + str(region) + ':',\n '\\n Count:', str(ds_region.shape[0]),\n '\\n Glacier Area [km2]:', str(main_glac_rgi_wgms.Area.sum()))\n \n \n if option_alaska == 1:\n ds_fp = pygem_prms.main_directory + '/../DEMs/McNabb_data/wgms_dv/'\n ds_fn1 = 'Alaska_dV_17jun.csv'\n ds_fn2 = 'BrooksRange_dV_17jun.csv'\n \n ds1 = pd.read_csv(ds_fp + ds_fn1)\n ds2 = pd.read_csv(ds_fp + ds_fn2)\n ds = ds1.append(ds2)\n ds = ds.sort_values('RGIId', ascending=True)\n # remove nan values\n ds = (ds.drop(np.where(np.isnan(ds['smb'].values) == True)[0].tolist(), axis=0)) \n ds.reset_index(drop=True, inplace=True)\n ds['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds.RGIId.values]\n ds['glacno'] = [x.split('-')[1] for x in ds.RGIId.values]\n glacno_list1 = sorted(list(ds.glacno.unique()))\n \n # Add Larsen\n ds3 = pd.read_csv(pygem_prms.larsen_fp + pygem_prms.larsen_fn)\n ds3 = (ds3.drop(np.where(np.isnan(ds3['mb_mwea'].values) == True)[0].tolist(), axis=0)) \n ds3.reset_index(drop=True, inplace=True)\n ds3['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds3.RGIId.values]\n ds3['glacno'] = [x.split('-')[1] for x in ds3.RGIId.values]\n glacno_list2 = sorted(list(ds3.glacno.unique()))\n \n glacno_list = glacno_list1 + glacno_list2\n glacno_list = sorted(list(set(glacno_list)))\n \n main_glac_rgi = load_glacierdata_byglacno(glacno_list, option_loadhyps_climate=0, option_loadcal_data=0)\n \n print('\\nRegion 1:')\n print('Count:', main_glac_rgi.shape[0], '(' + str(np.round(main_glac_rgi.shape[0] / 27108*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi.Area.sum() / 86725.053 * 100,1)) + '%)')\n \n if option_iceland == 1:\n ds_fp = pygem_prms.main_directory + '/../DEMs/McNabb_data/wgms_dv/'\n ds_fn = 'Iceland_dV_29jun.csv'\n \n ds = pd.read_csv(ds_fp + ds_fn)\n ds = ds.sort_values('RGIId', ascending=True)\n # remove nan values\n ds = (ds.drop(np.where(np.isnan(ds['smb'].values) == True)[0].tolist(), axis=0)) \n ds.reset_index(drop=True, inplace=True)\n ds['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds.RGIId.values]\n ds['glacno'] = [x.split('-')[1] for x in ds.RGIId.values]\n glacno_list = sorted(list(ds.glacno.unique()))\n \n main_glac_rgi = load_glacierdata_byglacno(glacno_list, option_loadhyps_climate=0, option_loadcal_data=0)\n \n print('\\nRegion 6:')\n print('Count:', main_glac_rgi.shape[0], '(' + str(np.round(main_glac_rgi.shape[0] / 568*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi.Area.sum() / 11059.7 * 100,1)) + '%)')\n \n if option_svalbard == 1:\n ds_fp = pygem_prms.main_directory + '/../DEMs/McNabb_data/wgms_dv/'\n ds_fn1 = 'Svalbard_dV_29jun.csv'\n ds_fn2 = 'JanMayen_dV_29jun.csv'\n \n ds1 = pd.read_csv(ds_fp + ds_fn1)\n ds2 = pd.read_csv(ds_fp + ds_fn2)\n ds = ds1.append(ds2)\n ds = ds.sort_values('RGIId', ascending=True)\n # remove nan values\n ds = (ds.drop(np.where(np.isnan(ds['smb'].values) == True)[0].tolist(), axis=0)) \n ds.reset_index(drop=True, inplace=True)\n ds['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds.RGIId.values]\n ds['glacno'] = [x.split('-')[1] for x in ds.RGIId.values]\n glacno_list = sorted(list(ds.glacno.unique()))\n \n main_glac_rgi = load_glacierdata_byglacno(glacno_list, option_loadhyps_climate=0, option_loadcal_data=0)\n \n print('\\nRegion 7:')\n print('Count:', main_glac_rgi.shape[0], '(' + str(np.round(main_glac_rgi.shape[0] / 1615*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi.Area.sum() / 33958.934 * 100,1)) + '%)')\n \n if option_russianarctic == 1:\n ds_fp = pygem_prms.main_directory + '/../DEMs/McNabb_data/wgms_dv/'\n ds_fn1 = 'FranzJosefLand_17jun.csv'\n ds_fn2 = 'NovayaZemlya_dV_17jun.csv'\n ds_fn3 = 'SevernayaZemlya_dV_17jun.csv'\n \n ds1 = pd.read_csv(ds_fp + ds_fn1)\n ds2 = pd.read_csv(ds_fp + ds_fn2)\n ds3 = pd.read_csv(ds_fp + ds_fn3)\n ds = ds1.append(ds2)\n ds = ds.append(ds3)\n ds = ds.sort_values('RGIId', ascending=True)\n # remove nan values\n ds = (ds.drop(np.where(np.isnan(ds['smb'].values) == True)[0].tolist(), axis=0)) \n ds.reset_index(drop=True, inplace=True)\n ds['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds.RGIId.values]\n ds['glacno'] = [x.split('-')[1] for x in ds.RGIId.values]\n glacno_list = sorted(list(ds.glacno.unique()))\n \n main_glac_rgi = load_glacierdata_byglacno(glacno_list, option_loadhyps_climate=0, option_loadcal_data=0)\n \n print('\\nRegion 9:')\n print('Count:', main_glac_rgi.shape[0], '(' + str(np.round(main_glac_rgi.shape[0] / 1069*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi.Area.sum() / 51591.6 * 100,1)) + '%)')\n \n if option_andes == 1:\n ds_fp = pygem_prms.main_directory + '/../DEMs/Berthier/'\n ds_fn = 'MB_all_glaciers_Andes_rgi60_2000.0-2018.3.csv'\n \n ds = pd.read_csv(ds_fp + ds_fn)\n ds = ds.sort_values('RGIId', ascending=True)\n # remove nan values\n ds = (ds.drop(np.where(np.isnan(ds['MB [m w.e a-1]'].values) == True)[0].tolist(), axis=0)) \n ds.reset_index(drop=True, inplace=True)\n ds['RegO1'] = [int(x.split('-')[1].split('.')[0]) for x in ds.RGIId.values]\n ds['glacno'] = [x.split('-')[1] for x in ds.RGIId.values]\n \n main_glac_rgi = load_glacierdata_byglacno(ds.glacno.values, option_loadhyps_climate=0, option_loadcal_data=0)\n \n # Count how many in region\n ds_r16 = ds.loc[np.where(ds.RegO1 == 16)[0]]\n main_glac_rgi16 = main_glac_rgi.loc[np.where(main_glac_rgi.O1Region == 16)[0]]\n print('Region 16:')\n print('Count: 2891 glaciers in South America; others in Region 16 in Mexico, Africa, and Papau New Guinea')\n print('Count:', ds_r16.shape[0], '(' + str(np.round(ds_r16.shape[0] / 2891*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi16.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi16.Area.sum() / 2341 * 100,1)) + '%)')\n \n ds_r17 = ds.loc[np.where(ds.RegO1 == 17)[0]]\n main_glac_rgi17 = main_glac_rgi.loc[np.where(main_glac_rgi.O1Region == 17)[0]]\n print('\\nRegion 17:')\n print('Count:', ds_r17.shape[0], '(' + str(np.round(ds_r17.shape[0] / 15908*100, 1)) + '%)')\n print('Glacier Area [km2]:', np.round(main_glac_rgi17.Area.sum(),1), '(' + \n str(np.round(main_glac_rgi17.Area.sum() / 29429 * 100,1)) + '%)')\n\n \n \n#%% REMOVE POOR OBSERVATIONS AND FILL MISSING MB DATA WITH REGIONAL MEAN AND STD\nif args.option_mbdata_fillwregional == 1:\n print('Filling in missing data with regional estimates...')\n # Input data\n ds_fp = pygem_prms.shean_fp\n ds_fn = 'hma_mb_20190215_0815_std+mean.csv'\n \n# dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'\n# dict_cn = 'kaab_name'\n# dict_csv = pd.read_csv(dict_fn)\n# rgi_dict = dict(zip(dict_csv.RGIId, dict_csv[dict_cn]))\n dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_bolch.csv'\n dict_cn = 'bolch_name'\n dict_csv = pd.read_csv(dict_fn)\n rgi_dict = dict(zip(dict_csv.RGIId, dict_csv[dict_cn]))\n \n # Load mass balance measurements and identify unique rgi regions \n ds = pd.read_csv(ds_fp + ds_fn)\n ds = ds.sort_values('RGIId', ascending=True)\n ds.reset_index(drop=True, inplace=True)\n ds['RGIId'] = round(ds['RGIId'], 5)\n ds['rgi_regO1'] = ds['RGIId'].astype(int)\n ds['rgi_str'] = ds['RGIId'].apply(lambda x: '%.5f' % x)\n rgi_regionsO1 = sorted(ds['rgi_regO1'].unique().tolist())\n\n main_glac_rgi = pd.DataFrame()\n for region in rgi_regionsO1:\n main_glac_rgi_region = modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2='all', \n rgi_glac_number='all')\n main_glac_rgi = main_glac_rgi.append(main_glac_rgi_region)\n main_glac_rgi.reset_index(drop=True, inplace=True)\n \n # Add mass balance and uncertainty to main_glac_rgi\n # Select glaciers with data such that main_glac_rgi and ds indices are aligned correctly\n main_glac_rgi_wdata = (\n main_glac_rgi.iloc[np.where(main_glac_rgi['RGIId_float'].isin(ds['RGIId']) == True)[0],:]).copy()\n main_glac_rgi_wdata.reset_index(drop=True, inplace=True)\n dict_rgi_mb = dict(zip(main_glac_rgi_wdata.RGIId, ds.mb_mwea))\n dict_rgi_mb_sigma = dict(zip(main_glac_rgi_wdata.RGIId, ds.mb_mwea_sigma))\n main_glac_rgi['mb_mwea'] = main_glac_rgi.RGIId.map(dict_rgi_mb)\n main_glac_rgi['mb_mwea_sigma'] = main_glac_rgi.RGIId.map(dict_rgi_mb_sigma)\n\n # Too high of sigma causes large issues for model\n # sigma theoretically should be independent of region\n all_sigma_mean = main_glac_rgi['mb_mwea_sigma'].mean()\n all_sigma_std = main_glac_rgi['mb_mwea_sigma'].std()\n# all_sigma_q1 = main_glac_rgi['mb_mwea_sigma'].quantile(0.25)\n# all_sigma_q3 = main_glac_rgi['mb_mwea_sigma'].quantile(0.75)\n# all_sigma_IQR = all_sigma_q3 - all_sigma_q1\n all_sigma_threshold = all_sigma_mean + 3 * all_sigma_std\n \n print('Sigma Threshold:\\n# glaciers removed:', \n main_glac_rgi.query('(mb_mwea_sigma > @all_sigma_threshold)').shape[0],\n '\\n% Area removed:', \n np.round(main_glac_rgi.query('(mb_mwea_sigma > @all_sigma_threshold)').Area.sum() / main_glac_rgi.Area.sum() \n * 100,1))\n \n main_glac_rgi.loc[main_glac_rgi.query('(mb_mwea_sigma > @all_sigma_threshold)').index.values, 'mb_mwea'] = np.nan\n (main_glac_rgi.loc[main_glac_rgi.query('(mb_mwea_sigma > @all_sigma_threshold)').index.values, \n 'mb_mwea_sigma']) = np.nan\n \n # Loop through groups\n main_glac_rgi['group'] = main_glac_rgi.RGIId.map(rgi_dict)\n # Regional mass balance mean and stdev\n groups = main_glac_rgi.group.unique().tolist()\n group_cn = 'group'\n groups = [x for x in groups if str(x) != 'nan']\n\n removal_glaciers = 0\n removal_area = 0\n total_area = 0\n for ngroup, group in enumerate(groups):\n # Select subset of data\n main_glac_rgi_group = main_glac_rgi.loc[main_glac_rgi[group_cn] == group].copy()\n group_stats = pd.Series()\n group_stats['mean'] = np.nanmean(main_glac_rgi_group['mb_mwea'])\n group_stats['std'] = main_glac_rgi_group['mb_mwea'].std()\n# group_stats['q1'] = main_glac_rgi_group['mb_mwea'].quantile(0.25)\n# group_stats['q3'] = main_glac_rgi_group['mb_mwea'].quantile(0.75)\n# group_stats['IQR'] = group_stats['q3'] - group_stats['q1']\n# group_stats['sigma_mean'] = main_glac_rgi_group['mb_mwea_sigma'].mean()\n# group_stats['sigma_std'] = main_glac_rgi_group['mb_mwea_sigma'].std()\n# group_stats['sigma_q1'] = main_glac_rgi_group['mb_mwea_sigma'].quantile(0.25)\n# group_stats['sigma_q3'] = main_glac_rgi_group['mb_mwea_sigma'].quantile(0.75)\n# group_stats['sigma_IQR'] = group_stats['sigma_q3'] - group_stats['sigma_q1']\n \n main_glac_rgi_group['zscore'] = (main_glac_rgi_group['mb_mwea'] - group_stats['mean']) / group_stats['std']\n main_glac_rgi.loc[main_glac_rgi.query('(group == @group)').index.values, 'zscore'] = main_glac_rgi_group.zscore\n \n group_stats['mean_weighted'] = (\n (main_glac_rgi_group.query('(-3 <= zscore <= 3)').mb_mwea * \n main_glac_rgi_group.query('(-3 <= zscore <= 3)').Area).sum() / \n main_glac_rgi_group.query('(-3 <= zscore <= 3)').Area.sum())\n\n group_stats['std_weighted'] = (\n ((main_glac_rgi_group.query('(-3 <= zscore <= 3)').mb_mwea_sigma**2 *\n main_glac_rgi_group.query('(-3 <= zscore <= 3)').Area).sum() / \n main_glac_rgi_group.query('(-3 <= zscore <= 3)').Area.sum())**0.5)\n \n print('\\n',group, 'mean:', np.round(group_stats.mean_weighted, 2), 'std:', np.round(group_stats.std_weighted,2), \n '\\n# glaciers removed:', main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').shape[0],\n '\\n% area removed:', np.round(main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').Area.sum() / \n main_glac_rgi.query('(group == @group)').Area.sum() * 100,2))\n \n removal_glaciers += main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').shape[0]\n removal_area += main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').Area.sum()\n total_area += main_glac_rgi.query('(group == @group)').Area.sum()\n \n # Replace regional outliers with mean and std\n main_glac_rgi.loc[main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').index.values, 'mb_mwea'] = (\n group_stats['mean_weighted'])\n main_glac_rgi.loc[main_glac_rgi.query('(group == @group) & (abs(zscore) > 3)').index.values, 'mb_mwea_sigma'] = (\n group_stats['std_weighted'])\n \n # Replace missing values with mean and std\n main_glac_rgi.loc[(main_glac_rgi['group'] == group) & \n (main_glac_rgi.mb_mwea.isnull() == True), 'mb_mwea'] = group_stats['mean_weighted']\n main_glac_rgi.loc[(main_glac_rgi['group'] == group) & \n (main_glac_rgi.mb_mwea_sigma.isnull() == True), 'mb_mwea_sigma'] = group_stats['std_weighted']\n \n print('\\nHMA:\\n # glaciers removed:', removal_glaciers, 'area_removed[%]:', removal_area/total_area*100)\n \n # Glaciers without a region compare to all HMA\n all_mean_weighted = ((main_glac_rgi.query('(-3 <= zscore <= 3)').mb_mwea * \n main_glac_rgi.query('(-3 <= zscore <= 3)').Area).sum() / \n main_glac_rgi.query('(-3 <= zscore <= 3)').Area.sum())\n all_std_weighted = (((main_glac_rgi.query('(-3 <= zscore <= 3)').mb_mwea_sigma**2 * \n main_glac_rgi.query('(-3 <= zscore <= 3)').Area).sum() / \n main_glac_rgi.query('(-3 <= zscore <= 3)').Area.sum())**0.5)\n \n # Replace outliers with mean and std\n main_glac_rgi.loc[main_glac_rgi['group'].isnull() == True, 'zscore'] = (\n main_glac_rgi.loc[main_glac_rgi['group'].isnull() == True, 'mb_mwea'] - all_mean_weighted / all_std_weighted)\n main_glac_rgi.loc[(main_glac_rgi['group'].isnull() == True) & \n (abs(main_glac_rgi['zscore']) > 3), 'mb_mwea'] = all_mean_weighted\n main_glac_rgi.loc[(main_glac_rgi['group'].isnull() == True) & \n (abs(main_glac_rgi['zscore']) > 3), 'mb_mwea_sigma'] = all_std_weighted\n \n # Replace missing values with mean and std\n main_glac_rgi.loc[(main_glac_rgi['group'].isnull() == True) & \n (main_glac_rgi['mb_mwea'].isnull() == True), 'mb_mwea'] = all_mean_weighted\n main_glac_rgi.loc[(main_glac_rgi['group'].isnull() == True) & \n (main_glac_rgi['mb_mwea_sigma'].isnull() == True), 'mb_mwea_sigma'] = all_std_weighted\n \n print('\\nHMA mean:', np.round(all_mean_weighted,2), 'std:', np.round(all_std_weighted,2))\n \n# # Export filled dataset\n# ds_export = pd.DataFrame(columns=ds.columns)\n# ds_export['RGIId'] = main_glac_rgi['RGIId_float']\n# export_cns = ds.columns.tolist()\n# remove_cns = ['RGIId', 'rgi_regO1', 'rgi_str', 'mb_mwea', 'mb_mwea_sigma', 'mb_m3wea', 'mb_m3wea_sigma']\n# for cn in remove_cns:\n# export_cns.remove(cn)\n# for cn in export_cns:\n# export_dict = dict(zip(main_glac_rgi_wdata.RGIId, ds[cn]))\n# ds_export[cn] = main_glac_rgi.RGIId.map(export_dict)\n# \n# ds_export['mb_mwea'] = main_glac_rgi['mb_mwea']\n# ds_export['mb_mwea_sigma'] = main_glac_rgi['mb_mwea_sigma']\n# nodata_idx = np.where(ds_export['z_min'].isnull() == True)[0]\n# ds_export.loc[nodata_idx, 'area_m2'] = main_glac_rgi.loc[nodata_idx, 'Area'] * 10**6\n# ds_export['mb_m3wea'] = ds_export['mb_mwea'] * ds_export['area_m2']\n# ds_export['mb_m3wea_sigma'] = ds_export['mb_mwea_sigma'] * ds_export['area_m2']\n# ds_export.loc[nodata_idx, 't1'] = ds_export['t1'].min()\n# ds_export.loc[nodata_idx, 't2'] = ds_export['t2'].max()\n# ds_export.loc[nodata_idx, 'dt'] = ds_export['t2'] - ds_export['t1']\n# ds_export.loc[nodata_idx, 'z_med'] = main_glac_rgi.loc[nodata_idx, 'Zmed']\n# ds_export.loc[nodata_idx, 'z_min'] = main_glac_rgi.loc[nodata_idx, 'Zmin']\n# ds_export.loc[nodata_idx, 'z_max'] = main_glac_rgi.loc[nodata_idx, 'Zmax']\n# ds_export.loc[nodata_idx, 'z_slope'] = main_glac_rgi.loc[nodata_idx, 'Slope']\n# ds_export.loc[nodata_idx, 'z_aspect'] = main_glac_rgi.loc[nodata_idx, 'Aspect']\n# output_fn = ds_fn.replace('.csv', '_all_filled.csv')\n# ds_export.to_csv(ds_fp + output_fn, index=False)\n\n#%% COAWST Climate Data\nif args.option_coawstmerge == 1:\n print('Merging COAWST climate data...')\n\n def coawst_merge_netcdf(vn, coawst_fp, coawst_fn_prefix):\n \"\"\"\n Merge COAWST products to form a timeseries\n\n Parameters\n ----------\n vn : str\n variable name\n coawst_fp : str\n filepath of COAWST climate data\n \n Returns\n -------\n exports netcdf of merged climate data\n \"\"\"\n # Sorted list of files to merge\n ds_list = []\n for i in os.listdir(coawst_fp):\n if i.startswith(coawst_fn_prefix):\n ds_list.append(i)\n ds_list = sorted(ds_list)\n # Merge files\n count = 0\n for i in ds_list:\n count += 1\n ds = xr.open_dataset(coawst_fp + i)\n var = ds[vn].values\n lat = ds.LAT.values\n lon = ds.LON.values\n if vn == 'HGHT':\n var_all = var\n elif count == 1:\n var_all = var\n month_start_str = i.split('_')[3].split('.')[0].split('-')[0]\n elif count == len(ds_list):\n var_all = np.append(var_all, var, axis=0)\n month_end_str = i.split('_')[3].split('.')[0].split('-')[1]\n else:\n var_all = np.append(var_all, var, axis=0)\n \n print('Max TOTPRECIP:', ds.TOTPRECIP.values.max())\n print('Max TOTRAIN:', ds.TOTRAIN.values.max())\n print('Max TOTSNOW:', ds.TOTSNOW.values.max())\n \n # Merged dataset\n if vn == 'HGHT':\n ds_all_fn = coawst_fn_prefix + vn + '.nc'\n ds_all = xr.Dataset({vn: (['x', 'y'], var)},\n coords={'LON': (['x', 'y'], lon),\n 'LAT': (['x', 'y'], lat)},\n attrs=ds[vn].attrs)\n ds_all[vn].attrs = ds[vn].attrs\n else:\n # reference time in format for pd.date_range\n time_ref = month_start_str[0:4] + '-' + month_start_str[4:6] + '-' + month_start_str[6:8]\n ds_all_fn = coawst_fn_prefix + vn + '_' + month_start_str + '-' + month_end_str + '.nc'\n ds_all = xr.Dataset({vn: (['time', 'x', 'y'], var_all)},\n coords={'LON': (['x', 'y'], lon),\n 'LAT': (['x', 'y'], lat),\n 'time': pd.date_range(time_ref, periods=len(ds_list), freq='MS'),\n 'reference_time': pd.Timestamp(time_ref)})\n ds_all[vn].attrs = ds[vn].attrs\n # Export to netcdf\n ds_all.to_netcdf(coawst_fp + '../' + ds_all_fn)\n ds_all.close()\n \n # Load climate data\n gcm = class_climate.GCM(name='COAWST')\n # Process each variable\n for vn in pygem_prms.coawst_vns:\n coawst_merge_netcdf(vn, pygem_prms.coawst_fp_unmerged, pygem_prms.coawst_fn_prefix_d02)\n# coawst_merge_netcdf(vn, pygem_prms.coawst_fp_unmerged, pygem_prms.coawst_fn_prefix_d01)\n \n\n#%% WGMS PRE-PROCESSING\nif args.option_wgms == 1:\n print('Processing WGMS datasets...')\n # Connect the WGMS mass balance datasets with the RGIIds and relevant elevation bands\n # Note: WGMS reports the RGI in terms of V5 as opposed to V6. Some of the glaciers have changed their RGIId between\n # the two versions, so need to convert WGMS V5 Ids to V6 Ids using the GLIMSID.\n # PROBLEMS WITH DATASETS:\n # - need to be careful with information describing dataset as some descriptions appear to be incorrect.\n \n # ===== Dictionaries (WGMS --> RGIID V6) =====\n # Load RGI version 5 & 6 and create dictionary linking the two\n # -required to avoid errors associated with changes in RGIId between the two versions in some regions\n rgiv6_fn_all = glob.glob(pygem_prms.rgiv6_fn_prefix)\n rgiv5_fn_all = glob.glob(pygem_prms.rgiv5_fn_prefix)\n # Create dictionary of all regions\n # - regions that didn't change between versions (ex. 13, 14, 15) will all the be same. Others that have changed\n # may vary greatly.\n for n in range(len(rgiv6_fn_all)):\n print('Region', n+1)\n rgiv6_fn = glob.glob(pygem_prms.rgiv6_fn_prefix)[n]\n rgiv6 = pd.read_csv(rgiv6_fn, encoding='latin1')\n rgiv5_fn = glob.glob(pygem_prms.rgiv5_fn_prefix)[n]\n rgiv5 = pd.read_csv(rgiv5_fn, encoding='latin1')\n # Dictionary to link versions 5 & 6\n rgi_version_compare = rgiv5[['RGIId', 'GLIMSId']].copy()\n rgi_version_compare['RGIIdv6'] = np.nan\n # Link versions 5 & 6 based on GLIMSID\n for r in range(rgiv5.shape[0]):\n try:\n # Use GLIMSID\n rgi_version_compare.iloc[r,2] = (\n rgiv6.iloc[rgiv6['GLIMSId'].values == rgiv5.loc[r,'GLIMSId'],0].values[0])\n # # Use Lat/Lon\n # latlon_dif = abs(rgiv6[['CenLon', 'CenLat']].values - rgiv5[['CenLon', 'CenLat']].values[r,:])\n # latlon_dif[abs(latlon_dif) < 1e-6] = 0\n # rgi_version_compare.iloc[r,2] = rgiv6.iloc[np.where(latlon_dif[:,0] + latlon_dif[:,1] < 0.001)[0][0],0]\n except:\n rgi_version_compare.iloc[r,2] = np.nan\n rgiv56_dict_reg = dict(zip(rgi_version_compare['RGIId'], rgi_version_compare['RGIIdv6']))\n latdict_reg = dict(zip(rgiv6['RGIId'], rgiv6['CenLat']))\n londict_reg = dict(zip(rgiv6['RGIId'], rgiv6['CenLon']))\n rgiv56_dict = {}\n latdict = {}\n londict = {}\n rgiv56_dict.update(rgiv56_dict_reg)\n latdict.update(latdict_reg)\n londict.update(londict_reg)\n # RGI Lookup table\n rgilookup = pd.read_csv(pygem_prms.rgilookup_fullfn, skiprows=2)\n rgidict = dict(zip(rgilookup['FoGId'], rgilookup['RGIId']))\n # WGMS Lookup table\n wgmslookup = pd.read_csv(pygem_prms.wgms_fp + pygem_prms.wgms_lookup_fn, encoding='latin1')\n wgmsdict = dict(zip(wgmslookup['WGMS_ID'], wgmslookup['RGI_ID']))\n # Manual lookup table\n mandict = {10402: 'RGI60-13.10093',\n 10401: 'RGI60-15.03734',\n 6846: 'RGI60-15.12707'}\n #%%\n # ===== WGMS (D) Geodetic mass balance data =====\n if 'wgms_d' in pygem_prms.wgms_datasets:\n print('Processing geodetic thickness change data')\n wgms_mb_geo_all = pd.read_csv(pygem_prms.wgms_fp + pygem_prms.wgms_d_fn, encoding='latin1')\n wgms_mb_geo_all['RGIId_rgidict'] = wgms_mb_geo_all['WGMS_ID'].map(rgidict)\n wgms_mb_geo_all['RGIId_mandict'] = wgms_mb_geo_all['WGMS_ID'].map(mandict)\n wgms_mb_geo_all['RGIId_wgmsdict'] = wgms_mb_geo_all['WGMS_ID'].map(wgmsdict)\n wgms_mb_geo_all['RGIId_wgmsdictv6'] = wgms_mb_geo_all['RGIId_wgmsdict'].map(rgiv56_dict)\n # Use dictionaries to convert wgms data to RGIIds\n wgms_mb_geo_RGIIds_all_raw_wdicts = wgms_mb_geo_all[['RGIId_rgidict', 'RGIId_mandict','RGIId_wgmsdictv6']]\n wgms_mb_geo_RGIIds_all_raw = wgms_mb_geo_RGIIds_all_raw_wdicts.apply(lambda x: sorted(x, key=pd.isnull)[0], 1)\n # Determine regions and glacier numbers\n wgms_mb_geo_all['RGIId'] = wgms_mb_geo_RGIIds_all_raw.values\n wgms_mb_geo_all['version'], wgms_mb_geo_all['glacno'] = wgms_mb_geo_RGIIds_all_raw.str.split('-').dropna().str\n wgms_mb_geo_all['glacno'] = wgms_mb_geo_all['glacno'].apply(pd.to_numeric)\n wgms_mb_geo_all['region'] = wgms_mb_geo_all['glacno'].apply(np.floor)\n wgms_mb_geo = wgms_mb_geo_all[np.isfinite(wgms_mb_geo_all['glacno'])].sort_values('glacno')\n wgms_mb_geo.reset_index(drop=True, inplace=True)\n # Add latitude and longitude \n wgms_mb_geo['CenLat'] = wgms_mb_geo['RGIId'].map(latdict)\n wgms_mb_geo['CenLon'] = wgms_mb_geo['RGIId'].map(londict)\n\n # Export relevant information\n wgms_mb_geo_export = pd.DataFrame()\n export_cols_geo = ['RGIId', 'glacno', 'WGMS_ID', 'CenLat', 'CenLon', 'REFERENCE_DATE', 'SURVEY_DATE', \n 'LOWER_BOUND', 'UPPER_BOUND', 'AREA_SURVEY_YEAR', 'AREA_CHANGE', 'AREA_CHANGE_UNC', \n 'THICKNESS_CHG', 'THICKNESS_CHG_UNC', 'VOLUME_CHANGE', 'VOLUME_CHANGE_UNC', \n 'SD_PLATFORM_METHOD', 'RD_PLATFORM_METHOD', 'REFERENCE', 'REMARKS', 'INVESTIGATOR', \n 'SPONS_AGENCY']\n wgms_mb_geo_export = wgms_mb_geo.loc[(np.isfinite(wgms_mb_geo['THICKNESS_CHG']) | \n (np.isfinite(wgms_mb_geo['VOLUME_CHANGE']))), export_cols_geo]\n # Add observation type for comparison (massbalance, snowline, etc.)\n wgms_mb_geo_export[pygem_prms.wgms_obs_type_cn] = 'mb_geo'\n wgms_mb_geo_export.reset_index(drop=True, inplace=True)\n wgms_mb_geo_export_fn = pygem_prms.wgms_fp + pygem_prms.wgms_d_fn_preprocessed\n wgms_mb_geo_export.to_csv(wgms_mb_geo_export_fn)\n \n # ===== WGMS (EE) Glaciological mass balance data =====\n if 'wgms_ee' in pygem_prms.wgms_datasets:\n print('Processing glaciological mass balance data')\n wgms_mb_glac_all = pd.read_csv(pygem_prms.wgms_fp + pygem_prms.wgms_ee_fn, encoding='latin1')\n wgms_mb_glac_all['RGIId_rgidict'] = wgms_mb_glac_all['WGMS_ID'].map(rgidict)\n wgms_mb_glac_all['RGIId_mandict'] = wgms_mb_glac_all['WGMS_ID'].map(mandict)\n wgms_mb_glac_all['RGIId_wgmsdict'] = wgms_mb_glac_all['WGMS_ID'].map(wgmsdict)\n wgms_mb_glac_all['RGIId_wgmsdictv6'] = wgms_mb_glac_all['RGIId_wgmsdict'].map(rgiv56_dict)\n # Use dictionaries to convert wgms data to RGIIds\n wgms_mb_glac_RGIIds_all_raw_wdicts = wgms_mb_glac_all[['RGIId_rgidict', 'RGIId_mandict','RGIId_wgmsdictv6']]\n wgms_mb_glac_RGIIds_all_raw = wgms_mb_glac_RGIIds_all_raw_wdicts.apply(lambda x: sorted(x, key=pd.isnull)[0], 1) \n # Determine regions and glacier numbers\n wgms_mb_glac_all['RGIId'] = wgms_mb_glac_RGIIds_all_raw.values\n wgms_mb_glac_all['version'], wgms_mb_glac_all['glacno'] = (\n wgms_mb_glac_RGIIds_all_raw.str.split('-').dropna().str)\n wgms_mb_glac_all['glacno'] = wgms_mb_glac_all['glacno'].apply(pd.to_numeric)\n wgms_mb_glac_all['region'] = wgms_mb_glac_all['glacno'].apply(np.floor)\n wgms_mb_glac = wgms_mb_glac_all[np.isfinite(wgms_mb_glac_all['glacno'])].sort_values('glacno')\n wgms_mb_glac.reset_index(drop=True, inplace=True)\n # Add latitude and longitude \n wgms_mb_glac['CenLat'] = wgms_mb_glac['RGIId'].map(latdict)\n wgms_mb_glac['CenLon'] = wgms_mb_glac['RGIId'].map(londict)\n # Import MB overview data to extract survey dates\n wgms_mb_overview = pd.read_csv(pygem_prms.wgms_fp + pygem_prms.wgms_e_fn, encoding='latin1')\n wgms_mb_glac['BEGIN_PERIOD'] = np.nan \n wgms_mb_glac['END_PERIOD'] = np.nan \n wgms_mb_glac['TIME_SYSTEM'] = np.nan\n wgms_mb_glac['END_WINTER'] = np.nan\n for x in range(wgms_mb_glac.shape[0]):\n wgms_mb_glac.loc[x,'BEGIN_PERIOD'] = (\n wgms_mb_overview[(wgms_mb_glac.loc[x,'WGMS_ID'] == wgms_mb_overview['WGMS_ID']) & \n (wgms_mb_glac.loc[x,'YEAR'] == wgms_mb_overview['Year'])]['BEGIN_PERIOD'].values)\n wgms_mb_glac.loc[x,'END_WINTER'] = (\n wgms_mb_overview[(wgms_mb_glac.loc[x,'WGMS_ID'] == wgms_mb_overview['WGMS_ID']) & \n (wgms_mb_glac.loc[x,'YEAR'] == wgms_mb_overview['Year'])]['END_WINTER'].values)\n wgms_mb_glac.loc[x,'END_PERIOD'] = (\n wgms_mb_overview[(wgms_mb_glac.loc[x,'WGMS_ID'] == wgms_mb_overview['WGMS_ID']) & \n (wgms_mb_glac.loc[x,'YEAR'] == wgms_mb_overview['Year'])]['END_PERIOD'].values)\n wgms_mb_glac.loc[x,'TIME_SYSTEM'] = (\n wgms_mb_overview[(wgms_mb_glac.loc[x,'WGMS_ID'] == wgms_mb_overview['WGMS_ID']) & \n (wgms_mb_glac.loc[x,'YEAR'] == wgms_mb_overview['Year'])]['TIME_SYSTEM'].values[0]) \n # Split summer, winter, and annual into separate rows so each becomes a data point in the calibration\n # if summer and winter exist, then discard annual to avoid double-counting the annual measurement\n export_cols_annual = ['RGIId', 'glacno', 'WGMS_ID', 'CenLat', 'CenLon', 'YEAR', 'TIME_SYSTEM', 'BEGIN_PERIOD', \n 'END_WINTER', 'END_PERIOD', 'LOWER_BOUND', 'UPPER_BOUND', 'ANNUAL_BALANCE', \n 'ANNUAL_BALANCE_UNC', 'REMARKS']\n export_cols_summer = ['RGIId', 'glacno', 'WGMS_ID', 'CenLat', 'CenLon', 'YEAR', 'TIME_SYSTEM', 'BEGIN_PERIOD', \n 'END_WINTER', 'END_PERIOD', 'LOWER_BOUND', 'UPPER_BOUND', 'SUMMER_BALANCE', \n 'SUMMER_BALANCE_UNC', 'REMARKS']\n export_cols_winter = ['RGIId', 'glacno', 'WGMS_ID', 'CenLat', 'CenLon', 'YEAR', 'TIME_SYSTEM', 'BEGIN_PERIOD', \n 'END_WINTER', 'END_PERIOD', 'LOWER_BOUND', 'UPPER_BOUND', 'WINTER_BALANCE', \n 'WINTER_BALANCE_UNC', 'REMARKS']\n wgms_mb_glac_annual = wgms_mb_glac.loc[((np.isnan(wgms_mb_glac['WINTER_BALANCE'])) & \n (np.isnan(wgms_mb_glac['SUMMER_BALANCE']))), export_cols_annual]\n wgms_mb_glac_summer = wgms_mb_glac.loc[np.isfinite(wgms_mb_glac['SUMMER_BALANCE']), export_cols_summer]\n wgms_mb_glac_winter = wgms_mb_glac.loc[np.isfinite(wgms_mb_glac['WINTER_BALANCE']), export_cols_winter]\n # Assign a time period to each of the measurements, which will be used for comparison with model data \n wgms_mb_glac_annual['period'] = 'annual'\n wgms_mb_glac_summer['period'] = 'summer'\n wgms_mb_glac_winter['period'] = 'winter'\n # Rename columns such that all rows are the same\n wgms_mb_glac_annual.rename(columns={'ANNUAL_BALANCE': 'BALANCE', 'ANNUAL_BALANCE_UNC': 'BALANCE_UNC'}, \n inplace=True)\n wgms_mb_glac_summer.rename(columns={'SUMMER_BALANCE': 'BALANCE', 'SUMMER_BALANCE_UNC': 'BALANCE_UNC'}, \n inplace=True)\n wgms_mb_glac_winter.rename(columns={'WINTER_BALANCE': 'BALANCE', 'WINTER_BALANCE_UNC': 'BALANCE_UNC'}, \n inplace=True)\n # Export relevant information\n wgms_mb_glac_export = (pd.concat([wgms_mb_glac_annual, wgms_mb_glac_summer, wgms_mb_glac_winter])\n .sort_values(['glacno', 'YEAR']))\n # Add observation type for comparison (massbalance, snowline, etc.)\n wgms_mb_glac_export[pygem_prms.wgms_obs_type_cn] = 'mb_glac'\n wgms_mb_glac_export.reset_index(drop=True, inplace=True)\n wgms_mb_glac_export_fn = pygem_prms.wgms_fp + pygem_prms.wgms_ee_fn_preprocessed\n wgms_mb_glac_export.to_csv(wgms_mb_glac_export_fn)\n\n\n#%% Create netcdf file of lapse rates from temperature pressure level data\nif args.option_createlapserates == 1:\n # Input data\n gcm_fp = pygem_prms.era5_fp\n gcm_fn = pygem_prms.era5_pressureleveltemp_fn\n \n tempname = 't'\n levelname = 'level'\n elev_idx_max = 0\n elev_idx_min = 20\n expver_idx = 0\n output_fn= 'ERA5_lapserates.nc'\n \n # Open dataset\n ds = xr.open_dataset(gcm_fp + gcm_fn) \n # extract the pressure levels [Pa]\n if ds[levelname].attrs['units'] == 'millibars':\n # convert pressure levels from millibars to Pa\n levels = ds[levelname].values * 100\n # Compute the elevation [m a.s.l] of the pressure levels using the barometric pressure formula (pressure in Pa)\n elev = (-pygem_prms.R_gas * pygem_prms.temp_std / (pygem_prms.gravity * pygem_prms.molarmass_air) * \n np.log(levels/pygem_prms.pressure_std))\n\n # Calculate lapse rates by year\n lr = np.zeros((ds.time.shape[0], ds.latitude.shape[0], ds.longitude.shape[0]))\n for ntime, t in enumerate(ds.time.values): \n print('time:', ntime, t)\n \n if 'expver' in ds.keys():\n ds_subset = ds[tempname][ntime, expver_idx, elev_idx_max:elev_idx_min+1, :, :].values\n else:\n ds_subset = ds[tempname][ntime, elev_idx_max:elev_idx_min+1, :, :].values\n ds_subset_reshape = ds_subset.reshape(ds_subset.shape[0],-1)\n lr[ntime,:,:] = (np.polyfit(elev[elev_idx_max:elev_idx_min+1], ds_subset_reshape, deg=1)[0]\n .reshape(ds_subset.shape[1:]))\n\n # Export lapse rates with attibutes\n output_ds = ds.copy()\n output_ds = output_ds.drop('t')\n str_max = str(ds['level'][elev_idx_max].values)\n try:\n str_min = str(ds['level'][elev_idx_min].values)\n except:\n str_min = str(ds['level'][elev_idx_min-1].values)\n levels_str = str_max + ' to ' + str_min\n output_ds['lapserate'] = (('time', 'latitude', 'longitude'), lr, \n {'long_name': 'lapse rate', \n 'units': 'degC m-1',\n 'levels': levels_str})\n encoding = {'lapserate':{'_FillValue': False,\n 'zlib':True,\n 'complevel':9}}\n \n output_ds.to_netcdf(gcm_fp + output_fn, encoding=encoding)\n \n \n#%%\nif args.option_createtempstd == 1:\n ds_fp = '/Volumes/LaCie/ERA5/'\n# ds_fn = 't2m_hourly_1979_1989.nc'\n# ds_fn = 't2m_hourly_1990_1999.nc'\n# ds_fn = 't2m_hourly_2000_2009.nc'\n# ds_fn = 't2m_hourly_2010_2019.nc'\n ds_all_fn = 'ERA5_tempstd_monthly.nc'\n option_merge_files = 1\n \n # Merge completed files together\n if option_merge_files == 1:\n \n #%%\n tempstd_fns = []\n for i in os.listdir(ds_fp):\n if i.startswith('ERA5_tempstd_monthly') and i.endswith('.nc'):\n tempstd_fns.append(i)\n tempstd_fns = sorted(tempstd_fns)\n\n # Open datasets and combine\n for nfile, tempstd_fn in enumerate(tempstd_fns):\n print(tempstd_fn)\n ds = xr.open_dataset(ds_fp + tempstd_fn)\n # Merge datasets of stats into one output\n if nfile == 0:\n ds_all = ds\n else:\n ds_all = xr.concat([ds_all, ds], dim='time')\n \n # Export to netcdf\n encoding = {'t2m_std':{'_FillValue': False}}\n ds_all.to_netcdf(ds_fp + ds_all_fn, encoding=encoding)\n \n else:\n \n output_fn= 'ERA5_tempstd_monthly_' + ds_fn.split('_')[2] + '_' + ds_fn.split('_')[3]\n \n ds = xr.open_dataset(ds_fp + ds_fn)\n \n # ds_subset = ds.t2m[0:30*24,:,:].values\n # t2m_daily = np.moveaxis(np.moveaxis(ds_subset, 0, -1).reshape(-1,24).mean(axis=1)\n # .reshape(ds_subset.shape[1],ds_subset.shape[2],int(ds_subset.shape[0]/24)), -1, 0)\n \n # Calculate daily mean temperature\n ndays = int(ds.time.shape[0] / 24)\n t2m_daily = np.zeros((ndays, ds.latitude.shape[0], ds.longitude.shape[0]))\n for nday in np.arange(ndays):\n if nday%50 == 0:\n print(str(nday) + ' out of ' + str(ndays))\n ds_subset = ds.t2m[nday*24:(nday+1)*24, :, :].values\n t2m_daily[nday,:,:] = (\n np.moveaxis(np.moveaxis(ds_subset, 0, -1).reshape(-1,24).mean(axis=1)\n .reshape(ds_subset.shape[1],ds_subset.shape[2],int(ds_subset.shape[0]/24)), -1, 0))\n \n # Calculate monthly temperature standard deviation\n date = ds.time[::24].values\n date_month = [pd.Timestamp(date[x]).month for x in np.arange(date.shape[0])]\n date_year = [pd.Timestamp(date[x]).year for x in np.arange(date.shape[0])]\n \n date_yyyymm = [str(date_year[x]) + '-' + str(date_month[x]).zfill(2) for x in np.arange(date.shape[0])]\n date_yyyymm_unique = sorted(list(set(date_yyyymm)))\n \n t2m_monthly_std = np.zeros((len(date_yyyymm_unique), ds.latitude.shape[0], ds.longitude.shape[0]))\n date_monthly = []\n for count, yyyymm in enumerate(date_yyyymm_unique):\n if count%12 == 0:\n print(yyyymm)\n date_idx = np.where(np.array(date_yyyymm) == yyyymm)[0]\n date_monthly.append(date[date_idx[0]])\n t2m_monthly_std[count,:,:] = t2m_daily[date_idx,:,:].std(axis=0)\n \n # Export lapse rates with attibutes\n output_ds = ds.copy()\n output_ds = output_ds.drop('t2m')\n output_ds = output_ds.drop('time')\n output_ds['time'] = date_monthly\n output_ds['t2m_std'] = (('time', 'latitude', 'longitude'), t2m_monthly_std, \n {'long_name': 'monthly 2m temperature standard deviation', \n 'units': 'K'})\n encoding = {'t2m_std':{'_FillValue': False}}\n output_ds.to_netcdf(ds_fp + output_fn, encoding=encoding)\n \n # Close dataset\n ds.close()\n \n\n#%%\nif args.option_frontalablation_cal == 1:\n region = [1]\n calving_data = pd.read_csv(pygem_prms.mcnabb_fp + '../alaska_gate_widths_flux.csv')\n \n glac_no = [x.split('-')[1] for x in list(calving_data.RGIId.values)]\n \n region_all = [int(x.split('.')[0]) for x in glac_no]\n rgi_glac_number_all = [x.split('.')[1] for x in glac_no]\n \n rgi_glac_number = []\n for n, reg in enumerate(region_all):\n if reg == region[0]:\n rgi_glac_number.append(rgi_glac_number_all[n])\n\n # Glacier RGI data\n main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=region, rgi_regionsO2 = 'all',\n rgi_glac_number=rgi_glac_number)\n # Glacier hypsometry [km**2], total area\n main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.hyps_filepath,\n pygem_prms.hyps_filedict, pygem_prms.hyps_colsdrop)\n # Ice thickness [m], average\n main_glac_icethickness = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.thickness_filepath, \n pygem_prms.thickness_filedict, pygem_prms.thickness_colsdrop)\n main_glac_hyps[main_glac_icethickness == 0] = 0\n # Width [km], average\n main_glac_width = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.width_filepath,\n pygem_prms.width_filedict, pygem_prms.width_colsdrop)\n # Elevation bins\n elev_bins = main_glac_hyps.columns.values.astype(int) \n # Select dates including future projections\n dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2005, spinupyears=0)\n \n # ===== LOAD CLIMATE DATA =====\n gcm = class_climate.GCM(name=pygem_prms.ref_gcm_name)\n # Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]\n gcm_temp, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi, dates_table)\n gcm_prec, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi, dates_table)\n gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)\n # Air temperature standard deviation\n if pygem_prms.option_ablation != 2:\n gcm_tempstd = np.zeros(gcm_temp.shape)\n elif pygem_prms.ref_gcm_name in ['ERA5']:\n gcm_tempstd, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.tempstd_fn, gcm.tempstd_vn, \n main_glac_rgi, dates_table)\n # Lapse rate [K m-1]\n gcm_lr, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi, dates_table)\n #%%\n \n for n, glac_str_wRGI in enumerate([main_glac_rgi['RGIId'].values[0]]):\n# for n, glac_str_wRGI in enumerate(main_glac_rgi['RGIId'].values):\n # Glacier string\n glacier_str = glac_str_wRGI.split('-')[1]\n print(glacier_str)\n # Glacier number\n glacno = int(glacier_str.split('.')[1])\n # RGI information\n glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[n], :]\n\n # Select subsets of data\n glacier_gcm_elev = gcm_elev[n]\n glacier_gcm_temp = gcm_temp[n,:]\n glacier_gcm_tempstd = gcm_tempstd[n,:]\n glacier_gcm_lrgcm = gcm_lr[n,:]\n glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()\n glacier_gcm_prec = gcm_prec[n,:]\n glacier_area_t0 = main_glac_hyps.iloc[n,:].values.astype(float)\n icethickness_t0 = main_glac_icethickness.iloc[n,:].values.astype(float)\n width_t0 = main_glac_width.iloc[n,:].values.astype(float)\n glac_idx_t0 = glacier_area_t0.nonzero()[0]\n # Set model parameters\n modelparameters = [pygem_prms.lrgcm, pygem_prms.lrglac, pygem_prms.precfactor, pygem_prms.precgrad, pygem_prms.ddfsnow, pygem_prms.ddfice,\n pygem_prms.tempsnow, pygem_prms.tempchange]\n frontalablation_k0 = pygem_prms.frontalablation_k0dict[int(glacier_str.split('.')[0])]\n \n (glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,\n glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,\n glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,\n glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,\n glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec, \n offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (\n massbalance.runmassbalance(modelparameters, glacier_rgi_table, glacier_area_t0, \n icethickness_t0, width_t0, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, \n glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac, \n dates_table, option_areaconstant=0, frontalablation_k=None,\n debug=True))\n print('Add objective function and code ')\n \n#%% \nif args.option_farinotti2019_input == 1:\n print(\"\\nProcess the ice thickness and surface elevation data from Farinotti (2019) to produce area,\" + \n \"ice thickness, width, and length for each elevation bin\\n\")\n \n \n#%%\nif args.option_unh_climatedata == 1:\n climate_fp_hist = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/CCSM4_RCP_85/'\n climate_fp_fut = climate_fp_hist + 'r1i1p1/'\n climate_fp_export = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/'\n \n # Historical climate\n hist_fn_pr = []\n for i in os.listdir(climate_fp_hist):\n if i.startswith('pr') and i.endswith('.nc'):\n hist_fn_pr.append(climate_fp_hist + i)\n hist_fn_pr = sorted(hist_fn_pr)\n \n hist_fn_tas = []\n for i in os.listdir(climate_fp_hist):\n if i.startswith('tas') and i.endswith('.nc'):\n hist_fn_tas.append(climate_fp_hist + i)\n hist_fn_tas = sorted(hist_fn_tas)\n \n # Future climate\n fut_fn_pr = []\n for i in os.listdir(climate_fp_fut + 'pr/'):\n if (i.startswith('pr') and i.endswith('.nc') and \n not i.endswith('_y.nc') and not i.endswith('_mc.nc') and not i.endswith('_yc.nc')):\n fut_fn_pr.append(climate_fp_fut + 'pr/' + i)\n fut_fn_pr = sorted(fut_fn_pr)\n \n fut_fn_tas = []\n for i in os.listdir(climate_fp_fut + 'tas/'):\n if (i.startswith('tas') and i.endswith('.nc') and \n not i.endswith('_y.nc') and not i.endswith('_mc.nc') and not i.endswith('_yc.nc')):\n fut_fn_tas.append(climate_fp_fut + 'tas/' + i)\n fut_fn_tas = sorted(fut_fn_tas)\n \n # Merge lists\n tas_fn = hist_fn_tas.copy()\n tas_fn.extend(fut_fn_tas)\n pr_fn = hist_fn_pr.copy()\n pr_fn.extend(fut_fn_pr)\n \n # Example dataset\n ds_example = xr.open_dataset('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/cmip5/' + \n 'rcp85_r1i1p1_monNG/tas_mon_CanESM2_rcp85_r1i1p1_native.nc')\n ds_example_pr = xr.open_dataset('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/cmip5/' + \n 'rcp85_r1i1p1_monNG/pr_mon_CanESM2_rcp85_r1i1p1_native.nc')\n \n #%%\n # Merge datasets together\n ds_tas_all_fullfn = climate_fp_export + 'tas_mon_CCSM4_rcp85_r1i1p1_native.nc'\n \n data_tas = None\n if os.path.exists(ds_tas_all_fullfn) == False:\n ds_tas_all = None\n for fn in tas_fn[:301]:\n# for fn in tas_fn[:5]:\n print(fn)\n ds_tas = xr.open_dataset(fn)\n\n if data_tas is None:\n data_tas = ds_tas['tas'].values\n else:\n data_tas = np.concatenate((data_tas, ds_tas['tas'].values), axis=0)\n\n #%%\n# import collections\n# \n# lat_values = ds_tas.lat.values\n# lon_values = ds_tas.lon.values\n# time_values = pd.date_range('1960-01-01', '2260-12-15',freq='MS')\n# \n# # Create new dataset with correct format\n# output_coords_dict = collections.OrderedDict()\n# output_coords_dict['tas'] = collections.OrderedDict([('time', time_values), ('lat', lat_values), \n# ('lon', lon_values)])\n# output_attrs_dict = {\n# 'time': {'standard_name': 'time'},\n# 'lat': {'long_name': 'latitude',\n# 'units': 'degrees N'}, \n# 'lon': {'long_name': 'longitude',\n# 'units': 'degrees E'},\n# 'tas': {'long_name': 'air_temperature',\n# 'units': 'K'}}\n# \n# output_ds_all = None\n# encoding = {}\n# for vn in output_coords_dict.keys():\n# empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])\n# output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},\n# coords=output_coords_dict[vn])\n# # Merge datasets of stats into one output\n# if output_ds_all is None:\n# output_ds_all = output_ds\n# else:\n# output_ds_all = xr.merge((output_ds_all, output_ds))\n# \n# # Add attributes\n# for vn in output_attrs_dict.keys():\n# try:\n# output_ds_all[vn].attrs = output_attrs_dict[vn]\n# except:\n# pass\n# # Encoding (specify _FillValue, offsets, etc.)\n# encoding[vn] = {'_FillValue': False,\n## 'zlib':True,\n## 'complevel':9\n# }\n# \n# output_ds_all['lon'].values = lon_values\n# output_ds_all['lat'].values = lat_values\n# output_ds_all['time'].values = time_values\n# output_ds_all['tas'].values = data_tas\n# \n# output_ds_all.attrs = {\n# 'source': 'University of New Hampshire - Alex Prusevich',\n# 'history': 'revised by David Rounce (drounce@alaska.edu) for PyGEM format'}\n# \n# # Export\n# output_ds_all.to_netcdf(ds_tas_all_fullfn)\n \n \n #%%\n # Merge datasets together\n ds_pr_all_fullfn = climate_fp_export + 'pr_mon_CCSM4_rcp85_r1i1p1_native.nc'\n \n data_pr = None\n if os.path.exists(ds_pr_all_fullfn) == False:\n ds_pr_all = None\n for fn in pr_fn[:301]:\n print(fn)\n ds_pr = xr.open_dataset(fn)\n\n if data_pr is None:\n data_pr = ds_pr['pr'].values\n else:\n data_pr = np.concatenate((data_pr, ds_pr['pr'].values), axis=0)\n \n #%%\n print(ds_pr['pr'].units)\n \n #%%\n import collections\n \n lat_values = ds_pr.lat.values\n lon_values = ds_pr.lon.values\n time_values = pd.date_range('1960-01-01', '2260-12-15',freq='MS')\n \n # Create new dataset with correct format\n output_coords_dict = collections.OrderedDict()\n output_coords_dict['pr'] = collections.OrderedDict([('time', time_values), ('lat', lat_values), \n ('lon', lon_values)])\n output_attrs_dict = {\n 'time': {'standard_name': 'time'},\n 'lat': {'long_name': 'latitude',\n 'units': 'degrees N'}, \n 'lon': {'long_name': 'longitude',\n 'units': 'degrees E'},\n 'pr': {'long_name': 'precipitation',\n 'units': ds_pr['pr'].units}}\n \n output_ds_all = None\n encoding = {}\n for vn in output_coords_dict.keys():\n empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])\n output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},\n coords=output_coords_dict[vn])\n # Merge datasets of stats into one output\n if output_ds_all is None:\n output_ds_all = output_ds\n else:\n output_ds_all = xr.merge((output_ds_all, output_ds))\n \n # Add attributes\n for vn in output_attrs_dict.keys():\n try:\n output_ds_all[vn].attrs = output_attrs_dict[vn]\n except:\n pass\n # Encoding (specify _FillValue, offsets, etc.)\n encoding[vn] = {'_FillValue': False,\n# 'zlib':True,\n# 'complevel':9\n }\n \n output_ds_all['lon'].values = lon_values\n output_ds_all['lat'].values = lat_values\n output_ds_all['time'].values = time_values\n output_ds_all['pr'].values = data_pr\n \n output_ds_all.attrs = {\n 'source': 'University of New Hampshire - Alex Prusevich',\n 'history': 'revised by David Rounce (drounce@alaska.edu) for PyGEM format'}\n \n \n # Export\n output_ds_all.to_netcdf(ds_pr_all_fullfn)\n \n \n#%%\nif args.option_regional_meltfactors == 1:\n hd_fp = ('/Users/davidrounce/Documents/Dave_Rounce/DebrisGlaciers_WG/Melt_Intercomparison/output/mb_bins/csv/' + \n '_wdebris_hdts/')\n hd_extrap_fp = ('/Users/davidrounce/Documents/Dave_Rounce/DebrisGlaciers_WG/Melt_Intercomparison/output/mb_bins/' + \n 'csv/_wdebris_hdts_extrap/')\n \n main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=pygem_prms.rgi_regionsO1, \n rgi_regionsO2=pygem_prms.rgi_regionsO2,\n rgi_glac_number=pygem_prms.rgi_glac_number)\n # Glacier hypsometry [km**2], total area\n main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.hyps_filepath, pygem_prms.hyps_filedict,\n pygem_prms.hyps_colsdrop)\n # Ice thickness [m], average\n main_glac_icethickness = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.thickness_filepath,\n pygem_prms.thickness_filedict, pygem_prms.thickness_colsdrop)\n main_glac_icethickness[main_glac_icethickness < 0] = 0\n main_glac_hyps[main_glac_icethickness == 0] = 0\n # Width [km], average\n main_glac_width = modelsetup.import_Husstable(main_glac_rgi, pygem_prms.width_filepath, pygem_prms.width_filedict,\n pygem_prms.width_colsdrop)\n elev_bins = main_glac_hyps.columns.values.astype(int)\n \n # Load debris thickness filenames\n # Glaciers optimized\n glac_hd_fullfns = []\n for i in os.listdir(hd_fp):\n if i.endswith('hd_hdts.csv'):\n region = int(i.split('.')[0])\n if region in pygem_prms.rgi_regionsO1: \n glac_hd_fullfns.append(hd_fp + i)\n \n # Glaciers extrapolated\n for i in os.listdir(hd_extrap_fp):\n if i.endswith('hdts_extrap.csv'):\n region = int(i.split('.')[0])\n if region in pygem_prms.rgi_regionsO1: \n glac_hd_fullfns.append(hd_extrap_fp + i)\n glac_hd_fullfns = sorted(glac_hd_fullfns)\n \n print('for each glacier, check for any z_offse due to the datasets?')\n \n \n \n #%%\n \n\n# if ds_tas_all is None:\n# ds_tas_all = ds_tas\n# else:\n# ds_tas_all = xr.concat((ds_tas_all, ds_tas), dim='time', coords='all')\n# ds_tas_all.to_netcdf(ds_tas_all_fullfn)\n#\n# ds_pr_all_fullfn = climate_fp_export + 'pr_mon_CCSM4_rcp85_r1i1p1_native.nc'\n# if os.path.exists(ds_pr_all_fullfn) == False:\n# ds_pr_all = None\n# for fn in pr_fn[:301]:\n# print(fn)\n# ds_pr = xr.open_dataset(fn)\n# \n# if ds_pr_all is None:\n# ds_pr_all = ds_pr\n# else:\n# ds_pr_all = xr.concat((ds_pr_all, ds_pr), dim='time', coords='all')\n# ds_pr_all.to_netcdf(ds_pr_all_fullfn)\n# \n# startyear = 1960\n# endyear = 2260\n# subtractyear = 100\n# time_values = pd.date_range(str(int(startyear-subtractyear)) + '-01-01',\n# str(int(endyear-subtractyear)) + '-12-15',freq='MS')\n## print(time_values)\n## ds_tas_all = xr.open_dataset('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/' + \n## 'rcp85_r1i1p1_monNG/tas_mon_CCSM4_rcp85_r1i1p1_native.nc')\n## ds_tas_all.time.values = time_values\n## ds_tas_all.to_netcdf('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/' + \n## 'rcp85_r1i1p1_monNG/tas_mon_CCSM4_rcp85_r1i1p1_native-v3.nc')\n# print(ds_tas_all.time)\n# print(ds_tas_all['tas'][9:3608,254,82])\n# \n## ds_pr_all = xr.open_dataset('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/' + \n## 'rcp85_r1i1p1_monNG/pr_mon_CCSM4_rcp85_r1i1p1_native.nc')\n## ds_pr_all.time.values = time_values\n## ds_pr_all.to_netcdf('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Climate_data/UNH_cmip5/' + \n## 'rcp85_r1i1p1_monNG/pr_mon_CCSM4_rcp85_r1i1p1_native-v3.nc')\n## print(ds_pr_all.time)\n\n\n#%% Write csv file from model results\n# Create csv such that not importing the air temperature each time (takes 90 seconds for 13,119 glaciers)\n#output_csvfullfilename = pygem_prms.main_directory + '/../Output/ERAInterim_elev_15_SouthAsiaEast.csv'\n#climate.createcsv_GCMvarnearestneighbor(pygem_prms.gcm_prec_filename, pygem_prms.gcm_prec_varname, dates_table, main_glac_rgi, \n# output_csvfullfilename)\n#np.savetxt(output_csvfullfilename, main_glac_gcmelev, delimiter=\",\") \n \n\n#%% NEAREST NEIGHBOR CALIBRATION PARAMETERS\n## Load csv\n#ds = pd.read_csv(pygem_prms.main_directory + '/../Output/calibration_R15_20180403_Opt02solutionspaceexpanding.csv', \n# index_col='GlacNo')\n## Select data of interest\n#data = ds[['CenLon', 'CenLat', 'lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', \n# 'tempchange']].copy()\n## Drop nan data to retain only glaciers with calibrated parameters\n#data_cal = data.dropna()\n#A = data_cal.mean(0)\n## Select latitude and longitude of calibrated parameters for distance estimate\n#data_cal_lonlat = data_cal.iloc[:,0:2].values\n## Loop through each glacier and select the parameters based on the nearest neighbor\n#for glac in range(data.shape[0]):\n# # Avoid applying this to any glaciers that already were optimized\n# if data.iloc[glac, :].isnull().values.any() == True:\n# # Select the latitude and longitude of the glacier's center\n# glac_lonlat = data.iloc[glac,0:2].values\n# # Set point to be compatible with cdist function (from scipy)\n# pt = [[glac_lonlat[0],glac_lonlat[1]]]\n# # scipy function to calculate distance\n# distances = cdist(pt, data_cal_lonlat)\n# # Find minimum index (could be more than one)\n# idx_min = np.where(distances == distances.min())[1]\n# # Set new parameters\n# data.iloc[glac,2:] = data_cal.iloc[idx_min,2:].values.mean(0)\n# # use mean in case multiple points are equidistant from the glacier\n## Remove latitude and longitude to create csv file\n#parameters_export = data.iloc[:,2:]\n## Export csv file\n#parameters_export.to_csv(pygem_prms.main_directory + '/../Calibration_datasets/calparams_R15_20180403_nearest.csv', \n# index=False) "
] |
[
[
"numpy.polyfit",
"pandas.Series",
"pandas.DataFrame",
"numpy.round",
"numpy.concatenate",
"numpy.nanmean",
"numpy.moveaxis",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.zeros",
"numpy.log",
"pandas.concat",
"numpy.isnan",
"numpy.append",
"pandas.date_range",
"numpy.array",
"numpy.isfinite",
"pandas.Timestamp"
]
] |
kipronokoech/Mask-R-CNN-for-Fruit-Detection
|
[
"8604dc0c617711be336cb67b1b929b310d785942"
] |
[
"evaluation/runMain-One.py"
] |
[
"import os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2 as cv\nfrom MaskReconstruction import MaskConstruction\nfrom Evaluation import MaskRCNN_Evaluation\nimport random\nimport json\nimport pandas as pd\n\n# changing the font size in matplotlib\nimport matplotlib\nfont = {'weight' : 'normal',\n 'size' : 14}\n\nmatplotlib.rc('font', **font)\n\n# Change the working dir into the root\nos.chdir(\"../\")\n\niou_threshold = 0.5\nconfidence = 0.90\n# Path to the images - both train and test set\n# there are two sub folders here - train and val\n\nset1 = random.choice([\"train\",\"val\",\"test\"])\nimages =\"datasets/fruits2\"\noutput_folder = \"fruits2-20210713T1300-0192\"\n\n#Path to train and test images respectively\nimages_path = os.path.join(images,set1)\n\n# Path to the annotation files - for train and test set.\n# annotations = os.path.join(images,\"{}/via_project_fruits.json\".format(set1))\n# print(\"Existence of {} annotation file: \".format(set1),os.path.exists(annotations))\n\n# pick an image at random to use it as a test \n# Skipping the annotation file. Annotation file is named via_project_fruits.json\nimage_name = random.choice([i for i in os.listdir(images_path) if not i.startswith(\"via\")])\nfilename , ext = os.path.splitext(image_name)\nprint(\"Filename: \",filename)\n#path to ground truth masks - genertaed from the annotation files\n# You cannot execute this before executing generate_truth-masks.py script\ntruth_masks = \"./evaluation/truth_masks/fruits2/{}_masks_truth\".format(set1)\nprint(\"Existence of {} masks truth: \".format(set1), os.path.exists(truth_masks))\n\n\n#path to prediction masks - the output of Mask R-CNN in output folder is enough for this\npred_masks = \"./output/{}/{}_masks2\".format(output_folder, set1)\nprint(\"Existence of {} masks pred: \".format(set1),os.path.exists(pred_masks))\n\n\n# example - just puicking one image for testing.\nexample_image = os.path.join(images_path,image_name)\nexample_truth = os.path.join(truth_masks,\"{}_truth.npy\".format(filename))\nexample_pred = os.path.join(pred_masks,\"{}_mask2.npy\".format(filename))\n\nprint(\"Example image: \", os.path.exists(example_image))\nprint(\"Example truth: \", os.path.exists(example_truth))\nprint(\"Example pred: \", os.path.exists(example_pred))\n\n\n\n# Call MaskConstruction class \n# This class contains all functions used to reconstruct and draw masks\n# Parameters: image, ground-truth masks,prediction masks and confidence\ns = MaskConstruction(example_image,example_truth,example_pred,confidence)\n\n# Draw prediction masks\n# if you want to view the output pass a parameter display = True\n# it is false by default\ns.draw_contours(display=True)\n\n# draw rectangular bounding boxes \ns.drawBbox(display=False)\n# Call MaskEvaluation class\n# This class contains all the function used to evaluate Mask-RCNN\n# Parameter: IoU threshold\nss= MaskRCNN_Evaluation(iou_threshold)\n\n# Confusion matrix\na = np.load(example_truth, allow_pickle=True)\nb = np.load(example_pred, allow_pickle=True)\nprint([i[\"box\"] for i in b if i[\"confidence\"]>=confidence])\nprint([i[\"confidence\"] for i in b if i[\"confidence\"]>=confidence])\n\ntp, fp, fn = ss.ConfusionMatrix(truth=a, preds=s.Contors())\nprint(\"True Positives\", tp)\nprint(\"False Positives\", fp)\nprint(\"False Negatives\", fn)\n# Draw ground-truth masks\n# passs a parameter display = True to view the output. False by default\ns.draw_truth_masks(display=True)\n\n\n\n\n\nexit()\n# Write precision and recall into a CSV file\nss.WriteAndDisplayPR(annotations,pred_masks,truth_masks,images_path,set1=set1,break_num=-1)\n\n\n# Print AP for all-point interpolation method, the function also saves the AP values to evaluation/results\nprint(ss.AP_NoInterpolation(set1=set1)[\"AP\"])\n\n# Print AP for 11-point interpolation\nprint(ss.AP_11PointInterpolation(set1=set1)[\"AP\"])\n\n\n# Write the confusion matrix into a JSON file\nss.WriteConfusionMatrix(images_path,truth_masks,pred_masks,set1=set1)\n\n# Plot PR curve, pass display = True parameter to display the plot. False by default\n# the function also returns precision, recall, set1(train or test) and IoU at \n# different level of confidence.\nprecision, recall,set1, iou = ss.PlotPrecisionRecallCurve(set1=set1, display=False)\n\n\nimg = cv.imread(example_image)\nbboxes = np.array([i[\"box\"] for i in b])\nconfs = np.array([i[\"confidence\"] for i in b])\nbb = np.array([i[\"mask\"] for i in b])\n\nd = []\nfor bbox, conf in zip(bboxes, confs):\n\tbbox_img = img[bbox[0]:bbox[2],bbox[1]:bbox[3]]\n\tr = bbox_img[:,:,0].mean().astype(\"uint8\")\n\tg = bbox_img[:,:,1].mean().astype(\"uint8\")\n\tb = bbox_img[:,:,2].mean().astype(\"uint8\")\n\trgb = np.mean([r,g,b]).astype(\"uint8\")\n\td.append(rgb+[conf])\n\tprint(rgb, conf)\n# [378 393 442 455]\n# 410 424\n# bbox[:,:,2] = 255\n\npd.DataFrame(d).to_csv(\"/home/kiprono/Desktop/data.csv\", index=False)\n\n\n\n\n###################### END ###############################\n\n# Everything written below is meant for testing purposes - not essentials\n\n\n\n\n\n\n\n\n# data2 = {\n# \t\t\t\"set\": set1,\n# \t\t\t\"iou\":iou,\n# \t\t\t\"precision\": list(precision), \n# \t\t\t\"recall\": list(recall)\n# \t\t}\n# if not os.path.exists(\"./evaluation/results/auc/{}{}pr.json\".format(set1,iou)):\n# \twith open(\"./evaluation/results/auc/{}{}pr.json\".format(set1,iou),\"w+\") as outfile:\n# \t\tjson.dump(data2, outfile, indent = 3)\n# else:\n# \tprint(\"./evaluation/results/auc/{}{}pr.json already exists\".format(set1,iou))\n\n\n# precision, recall,set1, iou = ss.PlotPrecisionRecallCurve(set1=\"test\")\n# data1 = {\n# \t\t\"set\": set1,\n# \t\t\"iou\":iou,\n# \t\t\"precision\": list(precision), \n# \t\t\"recall\": list(recall)\n# \t}\n# set1, iou = \"test\", iou_threshold\n# if not os.path.exists(\"./evaluation/results/auc/{}{}pr.json\".format(set1,iou)):\n# \twith open(\"./evaluation/results/auc/{}{}pr.json\".format(set1,iou),\"w+\") as outfile:\n# \t\tjson.dump(data1, outfile, indent = 3) \n# else:\n# \tprint(\"./evaluation/results/auc/{}{}pr.json already exists\".format(set1,iou))"
] |
[
[
"pandas.DataFrame",
"numpy.mean",
"numpy.load",
"numpy.array",
"matplotlib.rc"
]
] |
ZachMontgomery/AirfoilDatabase
|
[
"165ee387a7b8ae9642feedf64315081c7a1718c7"
] |
[
"zach/airfoil.py"
] |
[
"import numpy as np\nfrom numpy import pi, sqrt, cos, sin, tan, sign\nfrom numpy import arctan2 as atan2\nfrom numpy import arctan as atan\nfrom numpy import arcsinh as asinh\nfrom numpy import log as ln\n\nimport scipy.optimize as opt\nfrom timeit import default_timer as timer\n\nimport matplotlib.pyplot as plt\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams[\"mathtext.fontset\"] = \"stix\"\n\nclass airfoil(object):\n def __init__(self, naca='0012', nPts=400, flapType=0, xf=1.0, yf=-100.0, delta=0.0):\n super(airfoil, self).__init__()\n\n self.naca = naca\n self.nPts = nPts\n self.flapType = flapType\n self.xf = xf\n self.yf = yf\n self.delta = delta\n\n self.setup()\n\n def setup(self):\n delta = self.delta*pi/180.0\n n = self.nPts\n\n # Set up theta evenly spaced (cosine clustering in x)\n theta = np.linspace(-pi, pi, n)\n\n # Compute x values along camber line\n s = 0.5 * (1.0 - cos(theta))\n\n # Compute nodal x and y coordinates for a symmetric airfoil\n t = float(self.naca[-2:]) * 0.01\n yt = 5.0 * t * (0.2969 * sqrt(s) - 0.1260 * s - 0.3516 * s**2 + 0.2843 * s**3 - 0.1015 * s**4) * sign(theta)\n\n # Get the max camber and location of max camber\n m = 0.01 * float(self.naca[0]) # Maximum camber (% chord)\n p = 0.1 * float(self.naca[1]) # Location of maximum chamber (% chord)\n\n # Compute x and y coordinates for a cambered airfoil\n ycamber = np.zeros(n)\n dydx = np.zeros(n)\n xcamber = np.copy(s)\n\n xf = self.xf\n yf = self.yf\n #Find vertical hinge point if not given\n if(yf==-100.0):\n if xf < p:\n yf = m / p**2 * (2.0 * p * xf - xf**2)\n else:\n yf = m / (1.0 - p)**2 * (1.0 - 2.0 * p + 2.0 * p * xf - xf**2)\n\n self.yf = yf\n #print(\"xf,yf = \",xf,yf)\n #Calculate camber line and slope\n for i in range(n):\n # Leading-edge Geometry\n if s[i] < p:\n ycamber[i] = m / p**2 * (2.0 * p * s[i] - s[i]**2)\n dydx[i] = 2.0 * m / p**2 * (p - s[i])\n # Trailing-edge Geometry\n else:\n ycamber[i] = m / (1.0 - p)**2 * (1.0 - 2.0 * p + 2.0 * p * s[i] - s[i]**2)\n dydx[i] = 2.0 * m / (1.0 - p)**2 * (p - s[i])\n\n #Flap Settings offset yc and dydx\n if((s[i]>xf) and (self.flapType>0) and (delta != 0.0)):\n if(self.flapType==1): #Traditional Flap\n r = sqrt((ycamber[i]-yf)**2 + (s[i]-xf)**2)\n psi = atan((ycamber[i]-yf)/(s[i]-xf))\n xcamber[i] = xf + r*cos(delta-psi)\n ycamber[i] = yf - r*sin(delta-psi)\n dydx[i] = (dydx[i] - tan(delta))/(1+dydx[i]*tan(delta))\n if(self.flapType==2): #Parabolic Flap\n length = sqrt(yf**2+(1-xf)**2)\n ghi = -atan(yf/(1-xf))\n R = sqrt(4*tan(delta)**2 + 1) + asinh(2*tan(delta))/2.0/tan(delta)\n# if(delta<0.01):\n# R = 1.0+sqrt(4*delta**2+1.0)\n xite = 2*length/R\n etate = -2*length/R*tan(delta)\n xio = (xcamber[i]-xf)*length/(1-xf)\n xip = opt.newton(self.f_eq_28,xite*xio/length,fprime=None,args=(xio,length,R,delta),tol=1.0e-12,maxiter=50,fprime2=None)\n etap = -xip**2/xite*tan(delta)\n detadxi = -2*xip/xite*tan(delta)\n xp = xf + xip*cos(ghi) - etap*sin(ghi)\n yp = yf + xip*sin(ghi) + etap*cos(ghi)\n yo = yf*(1-xio/length)\n dyc = ycamber[i] - yo\n xcamber[i] = xp + dyc*sin(atan(2*xip/xite*tan(delta)))\n ycamber[i] = yp + dyc*cos(atan(2*xip/xite*tan(delta)))\n dydx[i] = (dydx[i] - 2*xip*tan(delta)/xite)/(1 + 2*xip*tan(delta)/xite*dydx[i])\n\n # Add thickness offset to camber location for airfoil surface\n angle = atan(dydx)\n self.x = xcamber - yt * sin(angle)\n self.y = ycamber + yt * cos(angle)\n self.theta = theta\n self.xcamber = xcamber\n self.ycamber = ycamber\n self.dydx = dydx\n# for i in range(n):\n# print(i,self.x[i],self.y[i])\n\n def f_eq_28(self,x,xio,length,R,delta):\n return -xio + x/2.0*sqrt((x/length*R*tan(delta))**2 + 1) + length*asinh(x*R*tan(delta)/length)/(2*R*tan(delta))\n\n def panel_geom(self):\n x = self.x\n y = self.y\n\n # Calculate the length of each panel\n length = sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n\n # Calculate the normal vector for each panel\n xnorm = -(y[1:] - y[:-1]) / length\n ynorm = (x[1:] - x[:-1]) / length\n\n # Calculate the centroid (control point) for each panel\n xc = (x[:-1] + x[1:]) / 2.0\n yc = (y[:-1] + y[1:]) / 2.0\n\n return length, xc, yc, xnorm, ynorm\n"
] |
[
[
"numpy.sqrt",
"numpy.arctan",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.sign",
"numpy.tan",
"numpy.copy",
"numpy.zeros",
"scipy.optimize.newton"
]
] |
dendisuhubdy/Greedy_InfoMax
|
[
"8c9b0de0663d9eb2936e082250685b41710aedb8"
] |
[
"GreedyInfoMax/utils/logger.py"
] |
[
"import os\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\n\n\nclass Logger:\n \"\"\"\n Logging class that keeps track of the training parameters and progress, and saves log files and model checkpoints\n \"\"\"\n def __init__(self, opt):\n self.opt = opt\n\n if opt.validate:\n self.val_loss = [[] for i in range(opt.model_splits)]\n else:\n self.val_loss = None\n\n if opt.start_epoch > 0:\n self.loss_last_training = np.load(\n os.path.join(opt.model_path, \"train_loss.npy\")\n ).tolist()\n self.train_loss = copy.deepcopy(self.loss_last_training)\n try:\n self.SVM_accuracy = np.load(\n os.path.join(opt.model_path, \"svm_accuracy.npy\")\n ).tolist()\n except:\n self.SVM_accuracy = []\n else:\n self.loss_last_training = None\n self.train_loss = [[] for i in range(opt.model_splits)]\n self.SVM_accuracy = []\n\n self.num_models_to_keep = 2\n assert self.num_models_to_keep > 0, \"Dont delete all models!!!\"\n\n def create_log(\n self,\n model,\n accuracy=None,\n epoch=0,\n optimizer=None,\n final_test=False,\n final_loss=None,\n acc5=None,\n predict_model=None\n ):\n\n print(\"Saving model and log-file to \" + self.opt.log_path)\n\n # Save the model checkpoint\n torch.save(\n model.state_dict(),\n os.path.join(self.opt.log_path, \"model_{}.ckpt\".format(epoch)),\n )\n\n # remove old model files to keep dir uncluttered\n try:\n os.remove(\n os.path.join(\n self.opt.log_path,\n \"model_{}.ckpt\".format(epoch - self.num_models_to_keep),\n )\n )\n except:\n print(\"not enough models there yet, nothing to delete\")\n\n if predict_model is not None:\n # Save the predict model checkpoint\n torch.save(\n predict_model.state_dict(),\n os.path.join(self.opt.log_path, \"predict_model_{}.ckpt\".format(epoch)),\n )\n\n # remove old model files to keep dir uncluttered\n try:\n os.remove(\n os.path.join(\n self.opt.log_path,\n \"predict_model_{}.ckpt\".format(epoch - self.num_models_to_keep),\n )\n )\n except:\n print(\"not enough models there yet, nothing to delete\")\n\n if optimizer is not None:\n for idx, optims in enumerate(optimizer):\n torch.save(\n optims.state_dict(),\n os.path.join(\n self.opt.log_path, \"optim_{}_{}.ckpt\".format(idx, epoch)\n ),\n )\n\n try:\n os.remove(\n os.path.join(\n self.opt.log_path,\n \"optim_{}_{}.ckpt\".format(\n idx, epoch - self.num_models_to_keep\n ),\n )\n )\n except:\n print(\"not enough models there yet, nothing to delete\")\n\n # Save hyper-parameters\n with open(os.path.join(self.opt.log_path, \"log.txt\"), \"w+\") as cur_file:\n cur_file.write(str(self.opt))\n if accuracy is not None:\n cur_file.write(\"Top 1 - accuracy: \" + str(accuracy))\n if acc5 is not None:\n cur_file.write(\"Top 5 - Accuracy: \" + str(acc5))\n if final_test and accuracy is not None:\n cur_file.write(\" Very Final testing accuracy: \" + str(accuracy))\n if final_test and acc5 is not None:\n cur_file.write(\" Very Final testing top 5 - accuracy: \" + str(acc5))\n\n # Save losses throughout training and plot\n np.save(\n os.path.join(self.opt.log_path, \"train_loss\"), np.array(self.train_loss)\n )\n np.save(\n os.path.join(self.opt.log_path, \"svm_accuracy\"),\n np.array(self.SVM_accuracy),\n )\n\n if self.val_loss is not None:\n np.save(\n os.path.join(self.opt.log_path, \"val_loss\"), np.array(self.val_loss)\n )\n\n self.draw_loss_curve()\n self.draw_svm_accuracies_curve(self.SVM_accuracy, \"accuracy\")\n\n if accuracy is not None:\n np.save(os.path.join(self.opt.log_path, \"accuracy\"), accuracy)\n\n if final_test:\n np.save(os.path.join(self.opt.log_path, \"final_accuracy\"), accuracy)\n np.save(os.path.join(self.opt.log_path, \"final_loss\"), final_loss)\n\n def draw_svm_accuracies_curve(self, svm_accuracies, label):\n lst_iter = np.arange(len(svm_accuracies))\n plt.plot(lst_iter, np.array(svm_accuracies), \"-b\", label=\"SVM \" + label)\n\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.legend(loc=\"upper right\")\n\n # save image\n plt.savefig(os.path.join(self.opt.log_path, \"svm_accuracy_\" + label + \".png\"))\n plt.close()\n\n def draw_loss_curve(self):\n for idx, loss in enumerate(self.train_loss):\n lst_iter = np.arange(len(loss))\n plt.plot(lst_iter, np.array(loss), \"-b\", label=\"train loss\")\n\n if self.loss_last_training is not None:\n lst_iter = np.arange(len(self.loss_last_training[idx]))\n plt.plot(lst_iter, self.loss_last_training[idx], \"-g\")\n\n if self.val_loss is not None:\n lst_iter = np.arange(len(self.val_loss[idx]))\n plt.plot(lst_iter, np.array(self.val_loss[idx]), \"-r\", label=\"val loss\")\n\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend(loc=\"upper right\")\n\n # save image\n plt.savefig(os.path.join(self.opt.log_path, \"loss_{}.png\".format(idx)))\n plt.close()\n\n def append_SVM_acc(self, SVM_acc):\n self.SVM_accuracy.append(SVM_acc)\n\n def append_train_loss(self, train_loss):\n for idx, elem in enumerate(train_loss):\n self.train_loss[idx].append(elem)\n\n def append_val_loss(self, val_loss):\n for idx, elem in enumerate(val_loss):\n self.val_loss[idx].append(elem)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
fang2hou/ABGAN
|
[
"518a959e5d1e6191721d859cff02971e1a3105ea"
] |
[
"abganlibs/models/sagan.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\n\n\nclass Self_Attn(nn.Module):\n \"\"\" Self attention Layer\"\"\"\n\n def __init__(self, in_dim, activation):\n super(Self_Attn, self).__init__()\n self.chanel_in = in_dim\n self.activation = activation\n\n self.query_conv = nn.Conv2d(\n in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\n self.key_conv = nn.Conv2d(\n in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\n self.value_conv = nn.Conv2d(\n in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature \n attention: B X N X N (N is Width*Height)\n \"\"\"\n m_batchsize, C, width, height = x.size()\n proj_query = self.query_conv(x).view(\n m_batchsize, -1, width*height).permute(0, 2, 1) # B X CX(N)\n proj_key = self.key_conv(x).view(\n m_batchsize, -1, width*height) # B X C x (*W*H)\n energy = torch.bmm(proj_query, proj_key) # transpose check\n attention = self.softmax(energy) # BX (N) X (N)\n proj_value = self.value_conv(x).view(\n m_batchsize, -1, width*height) # B X C X N\n\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(m_batchsize, C, width, height)\n\n out = self.gamma*out + x\n return out # , attention\n\n\nclass DCGAN_D(nn.Module):\n def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):\n super(DCGAN_D, self).__init__()\n self.ngpu = ngpu\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n main = nn.Sequential()\n # input is nc x isize x isize\n main.add_module('initial_conv_{0}-{1}'.format(nc, ndf),\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))\n main.add_module('initial_relu_{0}'.format(ndf),\n nn.LeakyReLU(0.2, inplace=True))\n csize, cndf = isize / 2, ndf\n\n # Extra layers\n for t in range(n_extra_layers):\n main.add_module('extra-layers-{0}_{1}_conv'.format(t, cndf),\n nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))\n # main.add_module('extra-layers-{0}_{1}_batchnorm'.format(t, cndf),\n # nn.BatchNorm2d(cndf))\n main.add_module('extra-layers-{0}_{1}_relu'.format(t, cndf),\n nn.LeakyReLU(0.2, inplace=True))\n\n while csize > 4:\n in_feat = cndf\n out_feat = cndf * 2\n main.add_module('pyramid_{0}_{1}_conv'.format(in_feat, out_feat),\n nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))\n # main.add_module('pyramid_{0}_batchnorm'.format(out_feat),\n # nn.BatchNorm2d(out_feat))\n main.add_module('pyramid_{0}_relu'.format(out_feat),\n nn.LeakyReLU(0.2, inplace=True))\n cndf = cndf * 2\n csize = csize / 2\n\n main.add_module('attn', Self_Attn(512, 'relu'))\n\n # state size. K x 4 x 4\n main.add_module('final_{0}-{1}_conv'.format(cndf, 1),\n nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))\n self.main = main\n\n def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:\n output = nn.parallel.data_parallel(\n self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n output = output.mean(0)\n return output.view(1)\n\n\nclass DCGAN_G(nn.Module):\n def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):\n super(DCGAN_G, self).__init__()\n self.ngpu = ngpu\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n cngf, tisize = ngf // 2, 4\n while tisize != isize:\n cngf = cngf * 2\n tisize = tisize * 2\n\n main = nn.Sequential()\n # input is Z, going into a convolution\n main.add_module('initial_{0}-{1}_convt'.format(nz, cngf),\n nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))\n main.add_module('initial_{0}_batchnorm'.format(cngf),\n nn.BatchNorm2d(cngf))\n main.add_module('initial_{0}_relu'.format(cngf),\n nn.ReLU(True))\n\n csize, cndf = 4, cngf\n while csize < isize // 2:\n main.add_module('pyramid_{0}-{1}_convt'.format(cngf, cngf // 2),\n nn.ConvTranspose2d(cngf, cngf // 2, 4, 2, 1, bias=False))\n main.add_module('pyramid_{0}_batchnorm'.format(cngf // 2),\n nn.BatchNorm2d(cngf // 2))\n main.add_module('pyramid_{0}_relu'.format(cngf // 2),\n nn.ReLU(True))\n cngf = cngf // 2\n csize = csize * 2\n\n # Extra layers\n for t in range(n_extra_layers):\n main.add_module('extra-layers-{0}_{1}_conv'.format(t, cngf),\n nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))\n main.add_module('extra-layers-{0}_{1}_batchnorm'.format(t, cngf),\n nn.BatchNorm2d(cngf))\n main.add_module('extra-layers-{0}_{1}_relu'.format(t, cngf),\n nn.ReLU(True))\n\n main.add_module('attn', Self_Attn(32, 'relu'))\n\n main.add_module('final_{0}-{1}_convt'.format(cngf, nc),\n nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))\n main.add_module('final_{0}_tanh'.format(nc),\n nn.ReLU()) # nn.Softmax(1)) #Was TANH nn.Tanh())#\n self.main = main\n\n def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:\n output = nn.parallel.data_parallel(\n self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n #print (output[0,:,0,0])\n # exit()\n return output\n###############################################################################\n\n\nclass DCGAN_D_nobn(nn.Module):\n def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):\n super(DCGAN_D_nobn, self).__init__()\n self.ngpu = ngpu\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n main = nn.Sequential()\n # input is nc x isize x isize\n # input is nc x isize x isize\n main.add_module('initial_conv_{0}-{1}'.format(nc, ndf),\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))\n main.add_module('initial_relu_{0}'.format(ndf),\n nn.LeakyReLU(0.2, inplace=True))\n csize, cndf = isize / 2, ndf\n\n # Extra layers\n for t in range(n_extra_layers):\n main.add_module('extra-layers-{0}_{1}_conv'.format(t, cndf),\n nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))\n main.add_module('extra-layers-{0}_{1}_relu'.format(t, cndf),\n nn.LeakyReLU(0.2, inplace=True))\n\n while csize > 4:\n in_feat = cndf\n out_feat = cndf * 2\n main.add_module('pyramid_{0}-{1}_conv'.format(in_feat, out_feat),\n nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))\n main.add_module('pyramid_{0}_relu'.format(out_feat),\n nn.LeakyReLU(0.2, inplace=True))\n cndf = cndf * 2\n csize = csize / 2\n\n # state size. K x 4 x 4\n main.add_module('final_{0}-{1}_conv'.format(cndf, 1),\n nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))\n self.main = main\n\n def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:\n output = nn.parallel.data_parallel(\n self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n output = output.mean(0)\n return output.view(1)\n\n\nclass DCGAN_G_nobn(nn.Module):\n def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):\n super(DCGAN_G_nobn, self).__init__()\n self.ngpu = ngpu\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n cngf, tisize = ngf // 2, 4\n while tisize != isize:\n cngf = cngf * 2\n tisize = tisize * 2\n\n main = nn.Sequential()\n main.add_module('initial_{0}-{1}_convt'.format(nz, cngf),\n nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))\n main.add_module('initial_{0}_relu'.format(cngf),\n nn.ReLU(True))\n\n csize, cndf = 4, cngf\n while csize < isize // 2:\n main.add_module('pyramid_{0}-{1}_convt'.format(cngf, cngf // 2),\n nn.ConvTranspose2d(cngf, cngf // 2, 4, 2, 1, bias=False))\n main.add_module('pyramid_{0}_relu'.format(cngf // 2),\n nn.ReLU(True))\n cngf = cngf // 2\n csize = csize * 2\n\n # Extra layers\n for t in range(n_extra_layers):\n main.add_module('extra-layers-{0}_{1}_conv'.format(t, cngf),\n nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))\n main.add_module('extra-layers-{0}_{1}_relu'.format(t, cngf),\n nn.ReLU(True))\n\n main.add_module('final_{0}-{1}_convt'.format(cngf, nc),\n nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))\n main.add_module('final_{0}_tanh'.format(nc),\n nn.Softmax()) # Tanh())\n self.main = main\n\n def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:\n output = nn.parallel.data_parallel(\n self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.nn.Conv2d",
"torch.bmm",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
loco-3d/quadruped-walkgen
|
[
"d78c205bcfcc69919eacb7cb4b51e426e2bc626f"
] |
[
"exemple/exemple_simple.py"
] |
[
"# coding: utf8\n\nimport time\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\nfrom GaitProblem import GaitProblem\n\n####################\n# Initialization #\n####################\n\n# Time step of the MPC\ndt_mpc = 0.02\n\n# Period of the MPC\nT_mpc = 0.32\n\n# Creation crocoddyl problem\ngaitProblem = GaitProblem(mu=0.7)\ngaitProblem.createProblem()\n\n# MpcInterface object that contains information about the current state of the robot\n# Change the initial conditions here\nlC = np.array([[0.0, 0.0, 0.2]]).T # CoM centered and at 20 cm above the ground\nabg = np.array([[0.0, 0.0, 0.0]]).T # horizontal base (roll, pitch, 0.0)\nlV = np.array([[0.2, 0.0, 0.0]]).T # motionless base (linear velocity)\nlW = np.array([[0.0, 0.0, 0.0]]).T # motionless base (angular velocity)\nl_feet = np.array([\n [0.19, 0.19, -0.19, -0.19],\n [0.15005, -0.15005, 0.15005, -0.15005],\n [0.0, 0.0, 0.0, 0.0],\n]) # position of feet in local frame\n\nx0 = np.vstack((lC, abg, lV, lW)) # Current state vector\n# The reference state, copy of the initial position,\nxref = np.repeat(x0, np.int(T_mpc / dt_mpc) + 1, axis=1) # Desired future state vectors\nxref[6, :] = 0.0 # Target linear velocity to Vx = 0\n\nfsteps = np.array([\n [\n 1.00000000e00,\n 1.90776486e-01,\n 1.48962816e-01,\n 4.22498932e-03,\n 1.90060159e-01,\n -1.50265109e-01,\n 0.00000000e00,\n -1.89740429e-01,\n 1.50467686e-01,\n 1.78713224e-06,\n -1.90692335e-01,\n -1.48946056e-01,\n 4.22561856e-03,\n ],\n [\n 7.00000000e00,\n 1.90776486e-01,\n 1.48962816e-01,\n 4.22498932e-03,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n -1.90692335e-01,\n -1.48946056e-01,\n 4.22561856e-03,\n ],\n [\n 1.00000000e00,\n 1.90776486e-01,\n 1.48962816e-01,\n 4.22498932e-03,\n 1.90000000e-01,\n -1.50050000e-01,\n 0.00000000e00,\n -1.90000000e-01,\n 1.50050000e-01,\n 0.00000000e00,\n -1.90692335e-01,\n -1.48946056e-01,\n 4.22561856e-03,\n ],\n [\n 7.00000000e00,\n np.nan,\n np.nan,\n np.nan,\n 1.90000000e-01,\n -1.50050000e-01,\n 0.00000000e00,\n -1.90000000e-01,\n 1.50050000e-01,\n 0.00000000e00,\n np.nan,\n np.nan,\n np.nan,\n ],\n [\n 0.00000000e00,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n ],\n [\n 0.00000000e00,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n ],\n])\n\n#############\n# Run MPC #\n#############\n\nnp.set_printoptions(linewidth=250, precision=3)\nprint(xref)\nprint(fsteps)\n# Crocoddyl solver without constraints on fx, fy and fz\ngaitProblem.updateProblem(fsteps, xref, x0)\n\ngaitProblem.max_iter = 2\n\nstart_time = time.time()\ngaitProblem.runProblem()\n\nprint(\"Temps d execution : %s secondes ---\" % (time.time() - start_time))\n\n# Rearrange the output of the crocoddyl solver\nXs = np.zeros((12, 16))\nUs = np.zeros((12, 16))\nfor i in range(0, 16):\n Xs[:, i] = gaitProblem.ddp.xs[i + 1]\n Us[:, i] = gaitProblem.ddp.us[i]\n\n# Predicted evolution of state variables\nl_t = np.linspace(dt_mpc, T_mpc, np.int(T_mpc / dt_mpc))\nl_str = [\n \"X_osqp\",\n \"Y_osqp\",\n \"Z_osqp\",\n \"Roll_osqp\",\n \"Pitch_osqp\",\n \"Yaw_osqp\",\n \"Vx_osqp\",\n \"Vy_osqp\",\n \"Vz_osqp\",\n \"VRoll_osqp\",\n \"VPitch_osqp\",\n \"VYaw_osqp\",\n]\nl_str2 = [\n \"X_ddp\",\n \"Y_ddp\",\n \"Z_ddp\",\n \"Roll_ddp\",\n \"Pitch_ddp\",\n \"Yaw_ddp\",\n \"Vx_ddp\",\n \"Vy_ddp\",\n \"Vz_ddp\",\n \"VRoll_ddp\",\n \"VPitch_ddp\",\n \"VYaw_ddp\",\n]\n\nindex = [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]\nplt.figure()\nfor i in range(12):\n plt.subplot(3, 4, index[i])\n plt.plot(l_t, Xs[i, :], linewidth=2, marker=\"x\")\n plt.legend([l_str[i]])\n\n# Desired evolution of contact forces\nl_t = np.linspace(dt_mpc, T_mpc, np.int(T_mpc / dt_mpc))\nl_str = [\n \"FL_X_osqp\",\n \"FL_Y_osqp\",\n \"FL_Z_osqp\",\n \"FR_X_osqp\",\n \"FR_Y_osqp\",\n \"FR_Z_osqp\",\n \"HL_X_osqp\",\n \"HL_Y_osqp\",\n \"HL_Z_osqp\",\n \"HR_X_osqp\",\n \"HR_Y_osqp\",\n \"HR_Z_osqp\",\n]\nl_str2 = [\n \"FL_X_ddp\",\n \"FL_Y_ddp\",\n \"FL_Z_ddp\",\n \"FR_X_ddp\",\n \"FR_Y_ddp\",\n \"FR_Z_ddp\",\n \"HL_X_ddp\",\n \"HL_Y_ddp\",\n \"HL_Z_ddp\",\n \"HR_X_ddp\",\n \"HR_Y_ddp\",\n \"HR_Z_ddp\",\n]\nindex = [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]\nplt.figure()\nfor i in range(12):\n plt.subplot(3, 4, index[i])\n plt.plot(l_t, Us[i, :], linewidth=2, marker=\"x\")\n plt.legend([l_str2[i]])\n\nplt.show(block=True)\n"
] |
[
[
"matplotlib.pylab.show",
"numpy.set_printoptions",
"numpy.int",
"matplotlib.pylab.subplot",
"matplotlib.pylab.figure",
"matplotlib.pylab.plot",
"matplotlib.pylab.legend",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
haibinzheng/NeuronFair
|
[
"5f6affd6fb378058bb0d2a0fd0ea413d2c8bd3cf"
] |
[
"nf_model/model_train.py"
] |
[
"import copy\nimport os\nimport numpy as np\nimport sys\n# sys.path.append(\"../\")\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\nfrom nf_data.census import census_data\nfrom nf_data.bank import bank_data\nfrom nf_data.credit import credit_data\nfrom nf_data.compas import compas_data\nfrom nf_data.meps import meps_data\nfrom utils.utils_tf import model_train, model_eval\nfrom nf_model.dnn_models import dnn\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nFLAGS = flags.FLAGS\n\ndef training(dataset, model_path, nb_epochs, batch_size,learning_rate):\n \"\"\"\n Train the model\n :param dataset: the name of testing dataset\n :param model_path: the path to save trained model\n \"\"\"\n data = {\"census\": census_data, \"credit\": credit_data, \"bank\": bank_data,\"compas\":compas_data, \"meps\":meps_data}\n # prepare the data and model\n X, Y, input_shape, nb_classes = data[dataset]()\n tf.set_random_seed(1234)\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.8\n sess = tf.Session(config=config)\n x = tf.placeholder(tf.float32, shape=input_shape)\n y = tf.placeholder(tf.float32, shape=(None, nb_classes))\n model = dnn(input_shape, nb_classes)\n preds = model(x)\n sess.run(tf.global_variables_initializer())\n\n # training parameters\n train_params = {\n 'nb_epochs': nb_epochs,\n 'batch_size': batch_size,\n 'learning_rate': learning_rate,\n 'train_dir': model_path + dataset + \"/dnn/\",\n 'filename': 'best.model'\n }\n\n rng = np.random.RandomState([2021, 3, 15])\n model_train(sess, x, y, preds, X, Y, args=train_params, rng=rng, save=True)\n\n # evaluate the accuracy of trained model\n eval_params = {'batch_size': 128}\n accuracy = model_eval(sess, x, y, preds, X, Y, args=eval_params)\n print('Test accuracy on legitimate test examples: {0}'.format(accuracy))\n\ndef main(argv=None):\n training(dataset = FLAGS.dataset,\n model_path = FLAGS.model_path,\n nb_epochs=FLAGS.nb_epochs,\n batch_size=FLAGS.batch_size,\n learning_rate=FLAGS.learning_rate)\n\nif __name__ == '__main__':\n flags.DEFINE_string(\"dataset\", 'census', \"the name of dataset\")\n flags.DEFINE_string(\"model_path\", \"../models/\", \"the name of path for saving model\")\n flags.DEFINE_integer('nb_epochs', 1000, 'Number of epochs to train model')\n flags.DEFINE_integer('batch_size', 64, 'Size of training batches')\n flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')\n tf.app.run()"
] |
[
[
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.python.platform.flags.DEFINE_float",
"tensorflow.global_variables_initializer",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.python.platform.flags.DEFINE_string",
"numpy.random.RandomState",
"tensorflow.app.run"
]
] |
NejcHirci/material-addon
|
[
"c08e2081413c3319b712c2f7193ac8013f601382",
"c08e2081413c3319b712c2f7193ac8013f601382"
] |
[
"materialGAN/tools/generate_fake_inputs.py",
"materialGAN/PerceptualSimilarity/models/__init__.py"
] |
[
"import os\nimport glob\nimport sys\nimport numpy as np\nsys.path.insert(1, 'materialGAN/src/')\nfrom optim import loadLightAndCamera\nfrom render import *\n\ndef gyListNames(in_dir):\n dir_list = sorted(glob.glob(in_dir))\n fn_list = []\n for dir in dir_list:\n fn_list.append(os.path.split(dir)[1])\n return fn_list\n\ndef renderTexFromTex(textures, tex_res, res, size, lp, cp, L, fn_im):\n if res > tex_res:\n print(\"[Warning in render.py::renderTex()]: request resolution is larger than texture resolution\")\n exit()\n renderObj = Microfacet(res=tex_res, size=size)\n light = th.from_numpy(L).cuda() if th.cuda.is_available() else th.from_numpy(L)\n im = renderObj.eval(textures, lightPos=lp, \\\n cameraPos=cp, light=light)\n im = gyApplyGamma(gyTensor2Array(im[0,:].permute(1,2,0)), 1/2.2)\n im = gyArray2PIL(im)\n if res < tex_res:\n im = im.resize((res, res), Image.LANCZOS)\n if fn_im is not None:\n im.save(fn_im)\n return im\n\ndef renderAndSave(tex, res, size, lp, cp, li, num_render, save_dir):\n for i in range(num_render):\n fn_this = save_dir + '/%02d.png' % i\n render_this = renderTexFromTex(tex, res, res, size, lp[i,:], cp[i,:], li, fn_im=fn_this)\n\n\n\nroot_dir = 'materialGAN/data/'\nin_dir = 'D:\\\\materials\\\\NeuralMaterial\\\\'\nout_dir = 'D:\\\\materials\\\\MaterialGAN\\\\'\n\n\nmat_list = gyListNames(in_dir + '*')\n\n# load light and camera position\nlight_pos, camera_pos, im_size, light = loadLightAndCamera(root_dir)\n\nfor id, mat in enumerate(mat_list):\n\n mat_in_dir = os.path.join(in_dir, mat)\n mat_in_dir = os.path.join(mat_in_dir, 'out')\n mat_out_dir = os.path.join(out_dir, mat)\n\n if os.path.isfile(os.path.join(mat_in_dir, 'tex.jpg')):\n textures, res = png2tex(os.path.join(mat_in_dir, 'tex.jpg'))\n else:\n textures, res = pngs2tex(mat_in_dir)\n\n # save initial texture and rendering\n renderAndSave(textures, res, im_size, light_pos, camera_pos, light, 9,\n mat_out_dir)\n\n np.savetxt(os.path.join(mat_out_dir, 'camera_pos.txt'), camera_pos, delimiter=',', fmt='%.4f')\n np.savetxt(os.path.join(mat_out_dir, 'image_size.txt'), np.array([im_size]), delimiter=',', fmt='%.4f')\n np.savetxt(os.path.join(mat_out_dir, 'light_power.txt'), np.array([1500,1500,1500]).reshape(1,3), delimiter=',', fmt='%.4f')\n\n print(\"Rendered \" + mat)\n \n",
"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom skimage.metrics import structural_similarity\nimport torch\nfrom torch.autograd import Variable\n\nfrom models import dist_model\n\nclass PerceptualLoss(torch.nn.Module):\n def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0], version='0.1'): # VGG using our perceptually-learned weights (LPIPS metric)\n # def __init__(self, model='net', net='vgg', use_gpu=True): # \"default\" way of using VGG as a perceptual loss\n super(PerceptualLoss, self).__init__()\n self.use_gpu = use_gpu\n self.spatial = spatial\n self.gpu_ids = gpu_ids\n self.model = dist_model.DistModel()\n self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids, version=version)\n\n def forward(self, pred, target, normalize=False):\n \"\"\"\n Pred and target are Variables.\n If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]\n If normalize is False, assumes the images are already between [-1,+1]\n\n Inputs pred and target are Nx3xHxW\n Output pytorch Variable N long\n \"\"\"\n\n if normalize:\n target = 2 * target - 1\n pred = 2 * pred - 1\n\n return self.model.forward(target, pred)\n\ndef normalize_tensor(in_feat,eps=1e-10):\n norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))\n return in_feat/(norm_factor+eps)\n\ndef l2(p0, p1, range=255.):\n return .5*np.mean((p0 / range - p1 / range)**2)\n\ndef psnr(p0, p1, peak=255.):\n return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))\n\ndef dssim(p0, p1, range=255.):\n return (1 - structural_similarity(p0, p1, data_range=range, multichannel=True)) / 2.\n\ndef rgb2lab(in_img,mean_cent=False):\n from skimage import color\n img_lab = color.rgb2lab(in_img)\n if(mean_cent):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n return img_lab\n\ndef tensor2np(tensor_obj):\n # change dimension of a tensor object into a numpy array\n return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))\n\ndef np2tensor(np_obj):\n # change dimenion of np array into tensor array\n return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n\ndef tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):\n # image tensor to lab tensor\n from skimage import color\n\n img = tensor2im(image_tensor)\n img_lab = color.rgb2lab(img)\n if(mc_only):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n if(to_norm and not mc_only):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n img_lab = img_lab/100.\n\n return np2tensor(img_lab)\n\ndef tensorlab2tensor(lab_tensor,return_inbnd=False):\n from skimage import color\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n lab = tensor2np(lab_tensor)*100.\n lab[:,:,0] = lab[:,:,0]+50\n\n rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)\n if(return_inbnd):\n # convert back to lab, see if we match\n lab_back = color.rgb2lab(rgb_back.astype('uint8'))\n mask = 1.*np.isclose(lab_back,lab,atol=2.)\n mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])\n return (im2tensor(rgb_back),mask)\n else:\n return im2tensor(rgb_back)\n\ndef rgb2lab(input):\n from skimage import color\n return color.rgb2lab(input / 255.)\n\ndef tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor\n return image_numpy.astype(imtype)\n\ndef im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):\n return torch.Tensor((image / factor - cent)\n [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n\ndef tensor2vec(vector_tensor):\n return vector_tensor.data.cpu().numpy()[:, :, 0, 0]\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\ndef tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):\n# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor\n return image_numpy.astype(imtype)\n\ndef im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):\n# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):\n return torch.Tensor((image / factor - cent)\n [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n"
] |
[
[
"numpy.array"
],
[
"numpy.maximum",
"numpy.arange",
"torch.sum",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.prod",
"numpy.transpose",
"numpy.where",
"numpy.sum",
"numpy.isclose"
]
] |
shgold/apls
|
[
"4ac612e5d26580268e76d8b1a3721de2d7d18c38"
] |
[
"apls/apls.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 29 12:32:19 2017\n\n@author: avanetten\n\n\"\"\"\n\n# from __future__ import print_function\n# from . import apls_utils\n# from . import apls_plots\n# from . import osmnx_funcs\n# import sp_metric\n# import topo_metric\n# import apls_tools\n# import graphTools\nimport networkx as nx\nimport scipy.spatial\nimport scipy.stats\nimport numpy as np\nimport random\nimport utm # pip install utm\nimport copy\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point, LineString\nimport time\nimport math\nimport os\nimport sys\n\nimport argparse\nimport pandas as pd\nimport shapely.wkt\n# import osmnx as ox # https://github.com/gboeing/osmnx\n# import pickle\n# import shutil\n\npath_apls_src = os.path.dirname(os.path.realpath(__file__))\npath_apls = os.path.dirname(path_apls_src)\n# print(\"path_apls:\", path_apls)\n# add path and import graphTools\nsys.path.append(path_apls_src)\nimport apls_utils\nimport apls_plots\nimport osmnx_funcs\nimport graphTools\nimport wkt_to_G\nimport topo_metric\nimport sp_metric\n\n# if in docker, the line below may be necessary\n# matplotlib.use('agg')\n\n\n###############################################################################\ndef add_travel_time(G_, speed_key='inferred_speed_mps', length_key='length',\n travel_time_key='travel_time_s', default_speed=13.41,\n verbose=False):\n \"\"\"\n Compute and add travel time estimaes to each graph edge.\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that includes speed.\n speed_key : str\n Key in the edge properties dictionary to use for the edge speed.\n Defaults to ``'inferred_speed_mps'``.\n length_key : str\n Key in the edge properties dictionary to use for the edge length.\n Defaults to ``'length'`` (asumed to be in meters).\n travel_time_key : str\n Name to assign travel time in the edge properties dictionary.\n Defaults to ``'travel_time_s'``.\n default_speed : float\n Default speed to use if speed_key is not found in edge properties\n Defaults to ``13.41`` (this is in m/s, and corresponds to 30 mph).\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n\n Returns\n -------\n G_ : networkx graph\n Updated graph with travel time attached to each edge.\n \"\"\"\n\n for i, (u, v, data) in enumerate(G_.edges(data=True)):\n if speed_key in data:\n speed = data[speed_key]\n if type(speed) == list:\n speed = np.mean(speed)\n # print(\"speed:\", speed)\n else:\n print(\"speed_key not found:\", speed_key)\n return\n# data['inferred_speed'] = default_speed\n# data[speed_key] = default_speed\n# speed = default_speed\n if verbose:\n print(\"data[length_key]:\", data[length_key])\n print(\"speed:\", speed)\n travel_time_seconds = data[length_key] / speed\n data[travel_time_key] = travel_time_seconds\n\n return G_\n\n\n###############################################################################\ndef create_edge_linestrings(G_, remove_redundant=True, verbose=False):\n \"\"\"\n Ensure all edges have the 'geometry' tag, use shapely linestrings.\n\n Notes\n -----\n If identical edges exist, remove extras.\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that may or may not include 'geometry'.\n remove_redundant : boolean\n Switch to remove identical edges, if they exist.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n\n Returns\n -------\n G_ : networkx graph\n Updated graph with every edge containing the 'geometry' tag.\n \"\"\"\n\n # clean out redundant edges with identical geometry\n edge_seen_set = set([])\n geom_seen = []\n bad_edges = []\n\n # G_ = G_.copy()\n # for i,(u, v, key, data) in enumerate(G_.edges(keys=True, data=True)):\n for i, (u, v, data) in enumerate(G_.edges(data=True)):\n # create linestring if no geometry reported\n if 'geometry' not in data:\n sourcex, sourcey = G_.nodes[u]['x'], G_.nodes[u]['y']\n targetx, targety = G_.nodes[v]['x'], G_.nodes[v]['y']\n line_geom = LineString([Point(sourcex, sourcey),\n Point(targetx, targety)])\n data['geometry'] = line_geom\n\n # get reversed line\n coords = list(data['geometry'].coords)[::-1]\n line_geom_rev = LineString(coords)\n # G_.edges[u][v]['geometry'] = lstring\n else:\n # check which direction linestring is travelling (it may be going\n # from v -> u, which means we need to reverse the linestring)\n # otherwise new edge is tangled\n line_geom = data['geometry']\n # print (u,v,key,\"create_edge_linestrings() line_geom:\", line_geom)\n u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]\n v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]\n geom_p0 = list(line_geom.coords)[0]\n dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)\n dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)\n # print \"dist_to_u, dist_to_v:\", dist_to_u, dist_to_v\n coords = list(data['geometry'].coords)[::-1]\n line_geom_rev = LineString(coords)\n if dist_to_u > dist_to_v:\n # data['geometry'].coords = list(line_geom.coords)[::-1]\n data['geometry'] = line_geom_rev\n # else:\n # continue\n\n # flag redundant edges\n if remove_redundant:\n if i == 0:\n edge_seen_set = set([(u, v)])\n edge_seen_set.add((v, u))\n geom_seen.append(line_geom)\n\n else:\n if ((u, v) in edge_seen_set) or ((v, u) in edge_seen_set):\n # test if geoms have already been seen\n for geom_seen_tmp in geom_seen:\n if (line_geom == geom_seen_tmp) \\\n or (line_geom_rev == geom_seen_tmp):\n bad_edges.append((u, v)) # , key))\n if verbose:\n print(\"\\nRedundant edge:\", u, v) # , key)\n else:\n edge_seen_set.add((u, v))\n geom_seen.append(line_geom)\n geom_seen.append(line_geom_rev)\n\n if remove_redundant:\n if verbose:\n print(\"\\nedge_seen_set:\", edge_seen_set)\n print(\"redundant edges:\", bad_edges)\n for (u, v) in bad_edges:\n if G_.has_edge(u, v):\n G_.remove_edge(u, v) # , key)\n # # for (u,v,key) in bad_edges:\n # try:\n # G_.remove_edge(u, v) # , key)\n # except:\n # if verbose:\n # print(\"Edge DNE:\", u, v) # ,key)\n # pass\n\n return G_\n\n\n###############################################################################\ndef cut_linestring(line, distance, verbose=False):\n \"\"\"\n Cuts a shapely linestring at a specified distance from its starting point.\n\n Notes\n ----\n Return orignal linestring if distance <= 0 or greater than the length of\n the line.\n Reference:\n http://toblerity.org/shapely/manual.html#linear-referencing-methods\n\n Arguments\n ---------\n line : shapely linestring\n Input shapely linestring to cut.\n distanct : float\n Distance from start of line to cut it in two.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n\n Returns\n -------\n [line1, line2] : list\n Cut linestrings. If distance <= 0 or greater than the length of\n the line, return input line.\n \"\"\"\n\n if verbose:\n print(\"Cutting linestring at distance\", distance, \"...\")\n if distance <= 0.0 or distance >= line.length:\n return [LineString(line)]\n\n # iterate through coorda and check if interpolated point has been passed\n # already or not\n coords = list(line.coords)\n for i, p in enumerate(coords):\n pdl = line.project(Point(p))\n if verbose:\n print(i, p, \"line.project point:\", pdl)\n if pdl == distance:\n return [\n LineString(coords[:i+1]),\n LineString(coords[i:])]\n if pdl > distance:\n cp = line.interpolate(distance)\n return [\n LineString(coords[:i] + [(cp.x, cp.y)]),\n LineString([(cp.x, cp.y)] + coords[i:])]\n\n # if we've reached here then that means we've encountered a self-loop and\n # the interpolated point is between the final midpoint and the the original\n # node\n i = len(coords) - 1\n cp = line.interpolate(distance)\n return [\n LineString(coords[:i] + [(cp.x, cp.y)]),\n LineString([(cp.x, cp.y)] + coords[i:])]\n\n\n###############################################################################\ndef get_closest_edge_from_G(G_, point, nearby_nodes_set=set([]),\n verbose=False):\n \"\"\"\n Return closest edge to point, and distance to said edge.\n\n Notes\n -----\n Just discovered a similar function:\n https://github.com/gboeing/osmnx/blob/master/osmnx/utils.py#L501\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that includes the 'geometry' key.\n point : shapely Point\n Shapely point containing (x, y) coordinates.\n nearby_nodes_set : set\n Set of possible edge endpoints to search. If nearby_nodes_set is not\n empty, only edges with a node in this set will be checked (this can\n greatly speed compuation on large graphs). If nearby_nodes_set is\n empty, check all possible edges in the graph.\n Defaults to ``set([])``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n\n Returns\n -------\n best_edge, min_dist, best_geom : tuple\n best_edge is the closest edge to the point\n min_dist is the distance to that edge\n best_geom is the geometry of the ege\n \"\"\"\n\n # get distances from point to lines\n dist_list = []\n edge_list = []\n geom_list = []\n p = point # Point(point_coords)\n for i, (u, v, key, data) in enumerate(G_.edges(keys=True, data=True)):\n # print((\" in get_closest_edge(): u,v,key,data:\", u,v,key,data))\n # print (\" in get_closest_edge(): data:\", data)\n\n # skip if u,v not in nearby nodes\n if len(nearby_nodes_set) > 0:\n if (u not in nearby_nodes_set) and (v not in nearby_nodes_set):\n continue\n if verbose:\n print((\"u,v,key,data:\", u, v, key, data))\n print((\" type data['geometry']:\", type(data['geometry'])))\n try:\n line = data['geometry']\n except KeyError:\n line = data['attr_dict']['geometry']\n geom_list.append(line)\n dist_list.append(p.distance(line))\n edge_list.append([u, v, key])\n # get closest edge\n min_idx = np.argmin(dist_list)\n min_dist = dist_list[min_idx]\n best_edge = edge_list[min_idx]\n best_geom = geom_list[min_idx]\n\n return best_edge, min_dist, best_geom\n\n\n###############################################################################\ndef insert_point_into_G(G_, point, node_id=100000, max_distance_meters=5,\n nearby_nodes_set=set([]), allow_renaming=True,\n verbose=False, super_verbose=False):\n \"\"\"\n Insert a new node in the graph closest to the given point.\n\n Notes\n -----\n If the point is too far from the graph, don't insert a node.\n Assume all edges have a linestring geometry\n http://toblerity.org/shapely/manual.html#object.simplify\n Sometimes the point to insert will have the same coordinates as an\n existing point. If allow_renaming == True, relabel the existing node.\n convert linestring to multipoint?\n https://github.com/Toblerity/Shapely/issues/190\n\n TODO : Implement a version without renaming that tracks which node is\n closest to the desired point.\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that includes the 'geometry' key.\n point : shapely Point\n Shapely point containing (x, y) coordinates\n node_id : int\n Unique identifier of node to insert. Defaults to ``100000``.\n max_distance_meters : float\n Maximum distance in meters between point and graph. Defaults to ``5``.\n nearby_nodes_set : set\n Set of possible edge endpoints to search. If nearby_nodes_set is not\n empty, only edges with a node in this set will be checked (this can\n greatly speed compuation on large graphs). If nearby_nodes_set is\n empty, check all possible edges in the graph.\n Defaults to ``set([])``.\n allow_renameing : boolean\n Switch to allow renaming of an existing node with node_id if the\n existing node is closest to the point. Defaults to ``False``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n G_, node_props, min_dist : tuple\n G_ is the updated graph\n node_props gives the properties of the inserted node\n min_dist is the distance from the point to the graph\n \"\"\"\n\n # check if node_id already exists in G\n # if node_id in set(G_.nodes()):\n # print (\"node_id:\", node_id, \"already in G, cannot insert node!\")\n # return\n\n best_edge, min_dist, best_geom = get_closest_edge_from_G(\n G_, point, nearby_nodes_set=nearby_nodes_set,\n verbose=super_verbose)\n [u, v, key] = best_edge\n G_node_set = set(G_.nodes())\n\n if verbose:\n print(\"Inserting point:\", node_id)\n print(\"best edge:\", best_edge)\n print(\" best edge dist:\", min_dist)\n u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]\n v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]\n print(\"ploc:\", (point.x, point.y))\n print(\"uloc:\", u_loc)\n print(\"vloc:\", v_loc)\n\n if min_dist > max_distance_meters:\n if verbose:\n print(\"min_dist > max_distance_meters, skipping...\")\n return G_, {}, -1, -1\n\n else:\n # updated graph\n\n # skip if node exists already\n if node_id in G_node_set:\n if verbose:\n print(\"Node ID:\", node_id, \"already exists, skipping...\")\n return G_, {}, -1, -1\n\n # G_.edges[best_edge[0]][best_edge[1]][0]['geometry']\n line_geom = best_geom\n\n # Length along line that is closest to the point\n line_proj = line_geom.project(point)\n\n # Now combine with interpolated point on line\n new_point = line_geom.interpolate(line_geom.project(point))\n x, y = new_point.x, new_point.y\n\n #################\n # create new node\n \n try:\n # first get zone, then convert to latlon\n _, _, zone_num, zone_letter = utm.from_latlon(G_.nodes[u]['lat'],\n G_.nodes[u]['lon'])\n # convert utm to latlon\n lat, lon = utm.to_latlon(x, y, zone_num, zone_letter)\n except:\n lat, lon = y, x\n\n # set properties\n # props = G_.nodes[u]\n node_props = {'highway': 'insertQ',\n 'lat': lat,\n 'lon': lon,\n 'osmid': node_id,\n 'x': x,\n 'y': y}\n # add node\n G_.add_node(node_id, **node_props)\n\n # assign, then update edge props for new edge\n _, _, edge_props_new = copy.deepcopy(\n list(G_.edges([u, v], data=True))[0])\n # remove extraneous 0 key\n\n # print (\"edge_props_new.keys():\", edge_props_new)\n # if list(edge_props_new.keys()) == [0]:\n # edge_props_new = edge_props_new[0]\n\n # cut line\n split_line = cut_linestring(line_geom, line_proj)\n # line1, line2, cp = cut_linestring(line_geom, line_proj)\n if split_line is None:\n print(\"Failure in cut_linestring()...\")\n print(\"type(split_line):\", type(split_line))\n print(\"split_line:\", split_line)\n print(\"line_geom:\", line_geom)\n print(\"line_geom.length:\", line_geom.length)\n print(\"line_proj:\", line_proj)\n print(\"min_dist:\", min_dist)\n return G_, {}, 0, 0\n\n if verbose:\n print(\"split_line:\", split_line)\n\n # if cp.is_empty:\n if len(split_line) == 1:\n if verbose:\n print(\"split line empty, min_dist:\", min_dist)\n # get coincident node\n outnode = ''\n outnode_x, outnode_y = -1, -1\n x_p, y_p = new_point.x, new_point.y\n x_u, y_u = G_.nodes[u]['x'], G_.nodes[u]['y']\n x_v, y_v = G_.nodes[v]['x'], G_.nodes[v]['y']\n # if verbose:\n # print \"x_p, y_p:\", x_p, y_p\n # print \"x_u, y_u:\", x_u, y_u\n # print \"x_v, y_v:\", x_v, y_v\n\n # sometimes it seems that the nodes aren't perfectly coincident,\n # so see if it's within a buffer\n buff = 0.05 # meters\n if (abs(x_p - x_u) <= buff) and (abs(y_p - y_u) <= buff):\n outnode = u\n outnode_x, outnode_y = x_u, y_u\n elif (abs(x_p - x_v) <= buff) and (abs(y_p - y_v) <= buff):\n outnode = v\n outnode_x, outnode_y = x_v, y_v\n # original method with exact matching\n # if (x_p == x_u) and (y_p == y_u):\n # outnode = u\n # outnode_x, outnode_y = x_u, y_u\n # elif (x_p == x_v) and (y_p == y_v):\n # outnode = v\n # outnode_x, outnode_y = x_v, y_v\n else:\n print(\"Error in determining node coincident with node: \"\n + str(node_id) + \" along edge: \" + str(best_edge))\n print(\"x_p, y_p:\", x_p, y_p)\n print(\"x_u, y_u:\", x_u, y_u)\n print(\"x_v, y_v:\", x_v, y_v)\n # return\n return G_, {}, 0, 0\n\n # if the line cannot be split, that means that the new node\n # is coincident with an existing node. Relabel, if desired\n if allow_renaming:\n node_props = G_.nodes[outnode]\n # A dictionary with the old labels as keys and new labels\n # as values. A partial mapping is allowed.\n mapping = {outnode: node_id}\n Gout = nx.relabel_nodes(G_, mapping)\n if verbose:\n print(\"Swapping out node ids:\", mapping)\n return Gout, node_props, x_p, y_p\n\n else:\n # new node is already added, presumably at the exact location\n # of an existing node. So just remove the best edge and make\n # an edge from new node to existing node, length should be 0.0\n\n line1 = LineString([new_point, Point(outnode_x, outnode_y)])\n edge_props_line1 = edge_props_new.copy()\n edge_props_line1['length'] = line1.length\n edge_props_line1['geometry'] = line1\n # make sure length is zero\n if line1.length > buff:\n print(\"Nodes should be coincident and length 0!\")\n print(\" line1.length:\", line1.length)\n print(\" x_u, y_u :\", x_u, y_u)\n print(\" x_v, y_v :\", x_v, y_v)\n print(\" x_p, y_p :\", x_p, y_p)\n print(\" new_point:\", new_point)\n print(\" Point(outnode_x, outnode_y):\",\n Point(outnode_x, outnode_y))\n return\n\n # add edge of length 0 from new node to neareest existing node\n G_.add_edge(node_id, outnode, **edge_props_line1)\n return G_, node_props, x, y\n\n # originally, if not renaming nodes,\n # just ignore this complication and return the orignal\n # return G_, node_props, 0, 0\n\n else:\n # else, create new edges\n line1, line2 = split_line\n\n # get distances\n # print (\"insert_point(), G_.nodes[v]:\", G_.nodes[v])\n u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]\n v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]\n # compare to first point in linestring\n geom_p0 = list(line_geom.coords)[0]\n # or compare to inserted point? [this might fail if line is very\n # curved!]\n # geom_p0 = (x,y)\n dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)\n dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)\n # reverse edge order if v closer than u\n if dist_to_v < dist_to_u:\n line2, line1 = split_line\n\n if verbose:\n print(\"Creating two edges from split...\")\n print(\" original_length:\", line_geom.length)\n print(\" line1_length:\", line1.length)\n print(\" line2_length:\", line2.length)\n print(\" u, dist_u_to_point:\", u, dist_to_u)\n print(\" v, dist_v_to_point:\", v, dist_to_v)\n print(\" min_dist:\", min_dist)\n\n # add new edges\n edge_props_line1 = edge_props_new.copy()\n edge_props_line1['length'] = line1.length\n edge_props_line1['geometry'] = line1\n # remove geometry?\n # edge_props_line1.pop('geometry', None)\n # line2\n edge_props_line2 = edge_props_new.copy()\n edge_props_line2['length'] = line2.length\n edge_props_line2['geometry'] = line2\n # remove geometry?\n # edge_props_line1.pop('geometry', None)\n\n # insert edge regardless of direction\n # G_.add_edge(u, node_id, **edge_props_line1)\n # G_.add_edge(node_id, v, **edge_props_line2)\n\n # check which direction linestring is travelling (it may be going\n # from v -> u, which means we need to reverse the linestring)\n # otherwise new edge is tangled\n geom_p0 = list(line_geom.coords)[0]\n dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)\n dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)\n # if verbose:\n # print \"dist_to_u, dist_to_v:\", dist_to_u, dist_to_v\n if dist_to_u < dist_to_v:\n G_.add_edge(u, node_id, **edge_props_line1)\n G_.add_edge(node_id, v, **edge_props_line2)\n else:\n G_.add_edge(node_id, u, **edge_props_line1)\n G_.add_edge(v, node_id, **edge_props_line2)\n\n if verbose:\n print(\"insert edges:\", u, '-', node_id, 'and', node_id, '-', v)\n\n # remove initial edge\n G_.remove_edge(u, v, key)\n\n return G_, node_props, x, y\n\n\n###############################################################################\ndef insert_control_points(G_, control_points, max_distance_meters=10,\n allow_renaming=True,\n n_nodes_for_kd=1000, n_neighbors=20,\n x_coord='x', y_coord='y',\n verbose=True, super_verbose=False):\n \"\"\"\n Wrapper around insert_point_into_G() for all control_points.\n\n Notes\n -----\n control_points are assumed to be of the format:\n [[node_id, x, y], ... ]\n\n TODO : Implement a version without renaming that tracks which node is\n closest to the desired point.\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that includes the 'geometry' key.\n control_points : array\n Points to insert in the graph, assumed to the of the format:\n [[node_id, x, y], ... ]\n max_distance_meters : float\n Maximum distance in meters between point and graph. Defaults to ``5``.\n allow_renameing : boolean\n Switch to allow renaming of an existing node with node_id if the\n existing node is closest to the point. Defaults to ``False``.\n n_nodes_for_kd : int\n Minumu size of graph to render to kdtree to speed node placement.\n Defaults to ``1000``.\n n_neighbors : int\n Number of neigbors to return if building a kdtree. Defaults to ``20``.\n x_coord : str\n Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.\n y_coord : str\n Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n Gout, new_xs, new_ys : tuple\n Gout is the updated graph\n new_xs, new_ys are coordinates of the inserted points\n \"\"\"\n\n t0 = time.time()\n\n # insertion can be super slow so construct kdtree if a large graph\n if len(G_.nodes()) > n_nodes_for_kd:\n # construct kdtree of ground truth\n kd_idx_dic, kdtree, pos_arr = apls_utils.G_to_kdtree(G_)\n # print(\"kd_idx_dic:\", kd_idx_dic)\n # print(\"kdtree:\", kdtree)\n # print(\"pos_arr:\", pos_arr)\n\n Gout = G_.copy()\n new_xs, new_ys = [], []\n if len(G_.nodes()) == 0:\n return Gout, new_xs, new_ys\n\n for i, [node_id, x, y] in enumerate(control_points):\n \n if math.isinf(x) or math.isinf(y):\n print(\"Infinity in coords!:\", x, y)\n return\n \n # if verbose:\n if (i % 20) == 0:\n print(i, \"/\", len(control_points),\n \"Insert control point:\", node_id, \"x =\", x, \"y =\", y)\n point = Point(x, y)\n\n # if large graph, determine nearby nodes\n if len(G_.nodes()) > n_nodes_for_kd:\n # get closest nodes\n node_names, dists_m_refine = apls_utils.nodes_near_point(\n x, y, kdtree, kd_idx_dic, x_coord=x_coord, y_coord=y_coord,\n # radius_m=radius_m,\n n_neighbors=n_neighbors,\n verbose=False)\n nearby_nodes_set = set(node_names)\n else:\n nearby_nodes_set = set([])\n\n # insert point\n Gout, node_props, xnew, ynew = insert_point_into_G(\n Gout, point, node_id=node_id,\n max_distance_meters=max_distance_meters,\n nearby_nodes_set=nearby_nodes_set,\n allow_renaming=allow_renaming,\n verbose=super_verbose)\n # xnew = node_props['x']\n # ynew = node_props['y']\n if (x != 0) and (y != 0):\n new_xs.append(xnew)\n new_ys.append(ynew)\n\n t1 = time.time()\n print(\"Time to run insert_control_points():\", t1-t0, \"seconds\")\n return Gout, new_xs, new_ys\n\n\n###############################################################################\ndef create_graph_midpoints(G_, linestring_delta=50, is_curved_eps=0.03,\n n_id_add_val=1, allow_renaming=True,\n figsize=(0, 0),\n verbose=False, super_verbose=False):\n \"\"\"\n Insert midpoint nodes into long edges on the graph.\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with edges assumed to have a dictioary of\n properties that includes the 'geometry' key.\n linestring_delta : float\n Distance in meters between linestring midpoints. Defaults to ``50``.\n is_curved_eps : float\n Minumum curvature for injecting nodes (if curvature is less than this\n value, no midpoints will be injected). If < 0, always inject points\n on line, regardless of curvature. Defaults to ``0.3``.\n n_id_add_val : int\n Sets min midpoint id above existing nodes\n e.g.: G.nodes() = [1,2,4], if n_id_add_val = 5, midpoints will\n be [9,10,11,...]\n allow_renameing : boolean\n Switch to allow renaming of an existing node with node_id if the\n existing node is closest to the point. Defaults to ``False``.\n figsize : tuple\n Figure size for optional plot. Defaults to ``(0,0)`` (no plot).\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n Gout, xms, yms : tuple\n Gout is the updated graph\n xms, yms are coordinates of the inserted points\n \"\"\"\n\n # midpoint_loc = 0.5 # take the central midpoint for straight lines\n if len(G_.nodes()) == 0:\n return G_, [], []\n\n # midpoints\n xms, yms = [], []\n Gout = G_.copy()\n # midpoint_name_val, midpoint_name_inc = 0.01, 0.01\n midpoint_name_val, midpoint_name_inc = np.max(G_.nodes())+n_id_add_val, 1\n # for u, v, key, data in G_.edges(keys=True, data=True):\n for u, v, data in G_.edges(data=True):\n\n # curved line\n if 'geometry' in data:\n\n # first edge props and get utm zone and letter\n edge_props_init = G_.edges([u, v])\n # _, _, zone_num, zone_letter = utm.from_latlon(G_.nodes[u]['lat'],\n # G_.nodes[u]['lon'])\n\n linelen = data['length']\n line = data['geometry']\n\n xs, ys = line.xy # for plotting\n\n #################\n # check if curved or not\n minx, miny, maxx, maxy = line.bounds\n # get euclidean distance\n dst = scipy.spatial.distance.euclidean([minx, miny], [maxx, maxy])\n # ignore if almost straight\n if np.abs(dst - linelen) / linelen < is_curved_eps:\n # print \"Line straight, skipping...\"\n continue\n #################\n\n #################\n # also ignore super short lines\n if linelen < 0.75*linestring_delta:\n # print \"Line too short, skipping...\"\n continue\n #################\n\n if verbose:\n print(\"create_graph_midpoints()...\")\n print(\" u,v:\", u, v)\n print(\" data:\", data)\n print(\" edge_props_init:\", edge_props_init)\n\n # interpolate midpoints\n # if edge is short, use midpoint, else get evenly spaced points\n if linelen <= linestring_delta:\n interp_dists = [0.5 * line.length]\n else:\n # get evenly spaced points\n npoints = len(np.arange(0, linelen, linestring_delta)) + 1\n interp_dists = np.linspace(0, linelen, npoints)[1:-1]\n if verbose:\n print(\" interp_dists:\", interp_dists)\n\n # create nodes\n node_id_new_list = []\n xms_tmp, yms_tmp = [], []\n for j, d in enumerate(interp_dists):\n if verbose:\n print(\" \", j, \"interp_dist:\", d)\n\n midPoint = line.interpolate(d)\n xm0, ym0 = midPoint.xy\n xm = xm0[-1]\n ym = ym0[-1]\n point = Point(xm, ym)\n xms.append(xm)\n yms.append(ym)\n xms_tmp.append(xm)\n yms_tmp.append(ym)\n if verbose:\n print(\" midpoint:\", xm, ym)\n\n # add node to graph, with properties of u\n node_id = midpoint_name_val\n # node_id = np.round(u + midpoint_name_val,2)\n midpoint_name_val += midpoint_name_inc\n node_id_new_list.append(node_id)\n if verbose:\n print(\" node_id:\", node_id)\n\n # if j > 3:\n # continue\n\n # add to graph\n Gout, node_props, _, _ = insert_point_into_G(\n Gout, point, node_id=node_id,\n allow_renaming=allow_renaming,\n verbose=super_verbose)\n\n # plot, if desired\n if figsize != (0, 0):\n fig, (ax) = plt.subplots(1, 1, figsize=(1*figsize[0], figsize[1]))\n ax.plot(xs, ys, color='#6699cc', alpha=0.7,\n linewidth=3, solid_capstyle='round', zorder=2)\n ax.scatter(xm, ym, color='red')\n ax.set_title('Line Midpoint')\n plt.axis('equal')\n\n return Gout, xms, yms\n\n\n###############################################################################\ndef _clean_sub_graphs(G_, min_length=80, max_nodes_to_skip=100,\n weight='length', verbose=True,\n super_verbose=False):\n \"\"\"\n Remove subgraphs with a max path length less than min_length,\n if the subgraph has more than max_noxes_to_skip, don't check length\n (this step great reduces processing time)\n \"\"\"\n\n if len(G_.nodes()) == 0:\n return G_\n\n if verbose:\n print(\"Running clean_sub_graphs...\")\n sub_graphs = list(nx.connected_component_subgraphs(G_))\n bad_nodes = []\n if verbose:\n print(\" len(G_.nodes()):\", len(G_.nodes()))\n print(\" len(G_.edges()):\", len(G_.edges()))\n if super_verbose:\n print(\"G_.nodes:\", G_.nodes())\n edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]\n print(edge_tmp, \"G.edge props:\", G_.edges[edge_tmp[0]][edge_tmp[1]])\n\n for G_sub in sub_graphs:\n # don't check length if too many nodes in subgraph\n if len(G_sub.nodes()) > max_nodes_to_skip:\n continue\n\n else:\n all_lengths = dict(\n nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))\n if super_verbose:\n print(\" \\nGs.nodes:\", G_sub.nodes())\n print(\" all_lengths:\", all_lengths)\n # get all lenghts\n lens = []\n\n # for u,v in all_lengths.iteritems():\n for u in all_lengths.keys():\n v = all_lengths[u]\n # for uprime, vprime in v.iteritems():\n for uprime in v.keys():\n vprime = v[uprime]\n lens.append(vprime)\n if super_verbose:\n print(\" u, v\", u, v)\n print(\" uprime, vprime:\", uprime, vprime)\n max_len = np.max(lens)\n if super_verbose:\n print(\" Max length of path:\", max_len)\n if max_len < min_length:\n bad_nodes.extend(G_sub.nodes())\n if super_verbose:\n print(\" appending to bad_nodes:\", G_sub.nodes())\n\n # remove bad_nodes\n G_.remove_nodes_from(bad_nodes)\n if verbose:\n print(\" num bad_nodes:\", len(bad_nodes))\n # print (\"bad_nodes:\", bad_nodes)\n print(\" len(G'.nodes()):\", len(G_.nodes()))\n print(\" len(G'.edges()):\", len(G_.edges()))\n if super_verbose:\n print(\" G_.nodes:\", G_.nodes())\n\n return G_\n\n\n###############################################################################\ndef _create_gt_graph(geoJson, im_test_file, network_type='all_private',\n # linestring_delta=50, is_curved_eps=0.012,\n valid_road_types=set([]),\n osmidx=0, osmNodeidx=0,\n # weight='length',\n subgraph_filter_weight='length',\n min_subgraph_length=5,\n travel_time_key='travel_time_s',\n speed_key='inferred_speed_mps',\n use_pix_coords=False,\n verbose=False,\n super_verbose=False):\n '''Ingest graph from geojson file and refine'''\n\n t0 = time.time()\n if verbose:\n print(\"Executing graphTools.create_graphGeoJson()...\")\n G0gt_init = graphTools.create_graphGeoJson(\n geoJson, name='unnamed', retain_all=True,\n network_type=network_type, valid_road_types=valid_road_types,\n osmidx=osmidx, osmNodeidx=osmNodeidx, verbose=verbose)\n t1 = time.time()\n if verbose:\n print(\"Time to run create_graphGeoJson():\", t1 - t0, \"seconds\")\n\n # refine graph\n G_gt = _refine_gt_graph(G0gt_init, im_test_file, \n subgraph_filter_weight=subgraph_filter_weight,\n min_subgraph_length=min_subgraph_length,\n travel_time_key=travel_time_key,\n speed_key=speed_key,\n use_pix_coords=use_pix_coords,\n verbose=verbose,\n super_verbose=super_verbose)\n \n# # save latlon geometry (osmnx overwrites the 'geometry' tag)\n# # also compute pixel geom\n# for i, (u, v, key, data) in enumerate(G0gt_init.edges(keys=True, data=True)):\n# # print (\"apsl.create_gt_graph(): data:\", data)\n# if 'geometry' not in data:\n# # print (\"G0gt_init.nodes[u]:\", G0gt_init.nodes[u])\n# sourcex, sourcey = G0gt_init.nodes[u]['x'], G0gt_init.nodes[u]['y']\n# targetx, targety = G0gt_init.nodes[v]['x'], G0gt_init.nodes[v]['y']\n# line_geom = LineString([Point(sourcex, sourcey),\n# Point(targetx, targety)])\n# else:\n# line_geom = data['geometry']\n# data['geometry_latlon'] = line_geom.wkt\n# \n# # print (\"im_test_file:\", im_test_file)\n# if os.path.exists(im_test_file):\n# # get pixel geom (do this after simplify so that we don't have to\n# # collapse the lines (see wkt_to_G.wkt_to_G)\n# geom_pix = apls_utils.geomGeo2geomPixel(line_geom,\n# input_raster=im_test_file)\n# data['geometry_pix'] = geom_pix.wkt\n# data['length_pix'] = geom_pix.length \n#\n# # print coords\n# # n = G0gt_init.nodes()[0]\n# # print \"n, G0gt_init.nodes[n]:\", n, G0gt_init.nodes[n]\n#\n# if len(G0gt_init.nodes()) == 0:\n# return G0gt_init, G0gt_init # , [], [], [], []\n#\n# G0gt = osmnx_funcs.project_graph(G0gt_init)\n# if verbose:\n# print(\"len G0gt.nodes():\", len(G0gt.nodes()))\n# print(\"len G0gt.edges:\", len(G0gt.edges()))\n# # print an edge?\n# # edge_tmp = list(G0gt.edges())[-1]\n# # print (edge_tmp, \"random edge props:\", G0gt.edges[edge_tmp[0], edge_tmp[1]]) #G.edge[edge_tmp[0]][edge_tmp[1]])\n# # print (edge_tmp, \"random edge props:\", G0gt.edges([edge_tmp[0], edge_tmp[1]])) #G.edge[edge_tmp[0]][edge_tmp[1]])\n#\n# # print coords\n# # n = G0gt.nodes()[0]\n# # print \"n, G0gt.nodes[n]:\", n, G0gt.nodes[n]\n#\n# # G1gt = osmnx_funcs.simplify_graph(G0gt)\n# # G2gt_init = G1gt.to_undirected()\n#\n# if verbose:\n# print(\"Simplifying graph...\")\n# G2gt_init0 = osmnx_funcs.simplify_graph(G0gt).to_undirected()\n# # G2gt_init = osmnx_funcs.simplify_graph(G0gt.to_undirected())\n#\n# # make sure all edges have a geometry assigned to them\n# G2gt_init1 = create_edge_linestrings(\n# G2gt_init0.copy(), remove_redundant=True)\n# t2 = time.time()\n# if verbose:\n# print(\"Time to project, simplify, and create linestrings:\",\n# t2 - t1, \"seconds\")\n#\n# # clean up connected components\n# G2gt_init2 = _clean_sub_graphs(\n# G2gt_init1.copy(), min_length=min_subgraph_length,\n# weight=subgraph_filter_weight,\n# verbose=verbose, super_verbose=super_verbose)\n#\n# # add pixel coords\n# try:\n# if os.path.exists(im_test_file):\n# G_gt_almost, _, gt_graph_coords = apls_utils._set_pix_coords(\n# G2gt_init2.copy(), im_test_file)\n# else:\n# G_gt_almost = G2gt_init2\n# except:\n# pass\n#\n# # !!!!!!!!!!!!!!!\n# # ensure nodes have coorect xpix and ypix since _set_pix_coords is faulty!\n# for j, n in enumerate(G_gt_almost.nodes()):\n# x, y = G_gt_almost.nodes[n]['x'], G_gt_almost.nodes[n]['y']\n# geom_pix = apls_utils.geomGeo2geomPixel(Point(x, y),\n# input_raster=im_test_file)\n# [(xp, yp)] = list(geom_pix.coords)\n# G_gt_almost.nodes[n]['x_pix'] = xp\n# G_gt_almost.nodes[n]['y_pix'] = yp\n#\n#\n# # update pixel and lat lon geometries that get turned into lists upon\n# # simplify() that produces a 'geometry' tag in wmp\n# if verbose:\n# print(\"Merge 'geometry' linestrings...\")\n# keys_tmp = ['geometry_pix', 'geometry_latlon']\n# for i, (u, v, attr_dict) in enumerate(G_gt_almost.edges(data=True)):\n# # if verbose and (i % 10000) == 0:\n# # print (i, u , v)\n# for key_tmp in keys_tmp:\n#\n# if key_tmp not in attr_dict.keys():\n# continue\n#\n# if super_verbose:\n# print(\"Merge\", key_tmp, \"...\")\n# geom = attr_dict[key_tmp]\n#\n# if type(geom) == list:\n# # check if the list items are wkt strings, if so, create\n# # linestrigs\n# # or (type(geom_pix[0]) == unicode):\n# if (type(geom[0]) == str):\n# geom = [shapely.wkt.loads(ztmp) for ztmp in geom]\n# # merge geoms\n# # geom = shapely.ops.linemerge(geom)\n# # attr_dict[key_tmp] = geom\n# attr_dict[key_tmp] = shapely.ops.linemerge(geom)\n# elif type(geom) == str:\n# attr_dict[key_tmp] = shapely.wkt.loads(geom)\n# else:\n# pass\n#\n# # update wkt_pix?\n# if 'wkt_pix' in attr_dict.keys():\n# # print (\"attr_dict['geometry_pix':\", attr_dict['geometry_pix'])\n# attr_dict['wkt_pix'] = attr_dict['geometry_pix'].wkt\n#\n# # update 'length_pix'\n# if 'length_pix' in attr_dict.keys():\n# attr_dict['length_pix'] = np.sum([attr_dict['length_pix']])\n#\n# # check if simplify created various speeds on an edge\n# speed_keys = ['speed_mph', 'inferred_speed_mps']\n# for sk in speed_keys:\n# if sk not in attr_dict.keys():\n# continue\n# if type(attr_dict[sk]) == list:\n# if verbose:\n# print(\" Taking mean of multiple speeds on edge:\", u, v)\n# attr_dict[sk] = np.mean(attr_dict[sk])\n# if verbose:\n# print(\"u, v, speed_key, attr_dict)[speed_key]:\",\n# u, v, sk, attr_dict[sk])\n#\n# # add travel time\n# G_gt = add_travel_time(G_gt_almost.copy(),\n# speed_key=speed_key,\n# travel_time_key=travel_time_key)\n\n return G_gt, G0gt_init\n\n\n###############################################################################\ndef _refine_gt_graph(G0gt_init, im_test_file, \n subgraph_filter_weight='length',\n min_subgraph_length=5,\n travel_time_key='travel_time_s',\n speed_key='inferred_speed_mps',\n use_pix_coords=False,\n verbose=False,\n super_verbose=False):\n \"\"\"refine ground truth graph\"\"\"\n \n t1 = time.time()\n # save latlon geometry (osmnx overwrites the 'geometry' tag)\n # also compute pixel geom\n for i, (u, v, key, data) in enumerate(G0gt_init.edges(keys=True, data=True)):\n # print (\"apsl.create_gt_graph(): data:\", data)\n if 'geometry' not in data:\n # print (\"G0gt_init.nodes[u]:\", G0gt_init.nodes[u])\n sourcex, sourcey = G0gt_init.nodes[u]['x'], G0gt_init.nodes[u]['y']\n targetx, targety = G0gt_init.nodes[v]['x'], G0gt_init.nodes[v]['y']\n line_geom = LineString([Point(sourcex, sourcey),\n Point(targetx, targety)])\n else:\n line_geom = data['geometry']\n data['geometry_latlon'] = line_geom.wkt\n\n # print (\"im_test_file:\", im_test_file)\n if os.path.exists(im_test_file):\n # get pixel geom (do this after simplify so that we don't have to\n # collapse the lines (see wkt_to_G.wkt_to_G)\n geom_pix = apls_utils.geomGeo2geomPixel(line_geom,\n input_raster=im_test_file)\n data['geometry_pix'] = geom_pix.wkt\n data['length_pix'] = geom_pix.length\n\n # print coords\n # n = G0gt_init.nodes()[0]\n # print \"n, G0gt_init.nodes[n]:\", n, G0gt_init.nodes[n]\n\n if len(G0gt_init.nodes()) == 0:\n return G0gt_init\n\n G0gt = osmnx_funcs.project_graph(G0gt_init)\n if verbose:\n print(\"len G0gt.nodes():\", len(G0gt.nodes()))\n print(\"len G0gt.edges:\", len(G0gt.edges()))\n # print an edge?\n # edge_tmp = list(G0gt.edges())[-1]\n # print (edge_tmp, \"random edge props:\", G0gt.edges[edge_tmp[0], edge_tmp[1]]) #G.edge[edge_tmp[0]][edge_tmp[1]])\n # print (edge_tmp, \"random edge props:\", G0gt.edges([edge_tmp[0], edge_tmp[1]])) #G.edge[edge_tmp[0]][edge_tmp[1]])\n\n # print coords\n # n = G0gt.nodes()[0]\n # print \"n, G0gt.nodes[n]:\", n, G0gt.nodes[n]\n\n # G1gt = osmnx_funcs.simplify_graph(G0gt)\n # G2gt_init = G1gt.to_undirected()\n\n if verbose:\n print(\"Simplifying graph...\")\n try:\n G2gt_init0 = osmnx_funcs.simplify_graph(G0gt).to_undirected()\n # G2gt_init = osmnx_funcs.simplify_graph(G0gt.to_undirected())\n except:\n print(\"Cannot simplify graph, using original\")\n G2gt_init0 = G0gt\n \n # make sure all edges have a geometry assigned to them\n G2gt_init1 = create_edge_linestrings(\n G2gt_init0.copy(), remove_redundant=True)\n t2 = time.time()\n if verbose:\n print(\"Time to project, simplify, and create linestrings:\",\n t2 - t1, \"seconds\")\n\n # clean up connected components\n G2gt_init2 = _clean_sub_graphs(\n G2gt_init1.copy(), min_length=min_subgraph_length,\n weight=subgraph_filter_weight,\n verbose=verbose, super_verbose=super_verbose)\n\n # add pixel coords\n try:\n if os.path.exists(im_test_file):\n G_gt_almost, _, gt_graph_coords = apls_utils._set_pix_coords(\n G2gt_init2.copy(), im_test_file)\n else:\n G_gt_almost = G2gt_init2\n except:\n pass\n\n # !!!!!!!!!!!!!!!\n # ensure nodes have coorect xpix and ypix since _set_pix_coords is faulty!\n for j, n in enumerate(G_gt_almost.nodes()):\n x, y = G_gt_almost.nodes[n]['x'], G_gt_almost.nodes[n]['y']\n geom_pix = apls_utils.geomGeo2geomPixel(Point(x, y),\n input_raster=im_test_file)\n [(xp, yp)] = list(geom_pix.coords)\n G_gt_almost.nodes[n]['x_pix'] = xp\n G_gt_almost.nodes[n]['y_pix'] = yp\n\n # update pixel and lat lon geometries that get turned into lists upon\n # simplify() that produces a 'geometry' tag in wmp\n if verbose:\n print(\"Merge 'geometry' linestrings...\")\n keys_tmp = ['geometry_pix', 'geometry_latlon']\n for i, (u, v, attr_dict) in enumerate(G_gt_almost.edges(data=True)):\n # if verbose and (i % 10000) == 0:\n # print (i, u , v)\n for key_tmp in keys_tmp:\n\n if key_tmp not in attr_dict.keys():\n continue\n\n if super_verbose:\n print(\"Merge\", key_tmp, \"...\")\n geom = attr_dict[key_tmp]\n\n if type(geom) == list:\n # check if the list items are wkt strings, if so, create\n # linestrigs\n # or (type(geom_pix[0]) == unicode):\n if (type(geom[0]) == str):\n geom = [shapely.wkt.loads(ztmp) for ztmp in geom]\n # merge geoms\n # geom = shapely.ops.linemerge(geom)\n # attr_dict[key_tmp] = geom\n attr_dict[key_tmp] = shapely.ops.linemerge(geom)\n elif type(geom) == str:\n attr_dict[key_tmp] = shapely.wkt.loads(geom)\n else:\n pass\n\n # update wkt_pix?\n if 'wkt_pix' in attr_dict.keys():\n # print (\"attr_dict['geometry_pix':\", attr_dict['geometry_pix'])\n attr_dict['wkt_pix'] = attr_dict['geometry_pix'].wkt\n\n # update 'length_pix'\n if 'length_pix' in attr_dict.keys():\n attr_dict['length_pix'] = np.sum([attr_dict['length_pix']])\n\n # check if simplify created various speeds on an edge\n speed_keys = [speed_key, 'inferred_speed_mph', 'inferred_speed_mps']\n for sk in speed_keys:\n if sk not in attr_dict.keys():\n continue\n if type(attr_dict[sk]) == list:\n if verbose:\n print(\" Taking mean of multiple speeds on edge:\", u, v)\n attr_dict[sk] = np.mean(attr_dict[sk])\n if verbose:\n print(\"u, v, speed_key, attr_dict)[speed_key]:\",\n u, v, sk, attr_dict[sk])\n\n # add travel time\n G_gt = add_travel_time(G_gt_almost.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n return G_gt\n\n\n###############################################################################\ndef make_graphs(G_gt_, G_p_,\n weight='length',\n speed_key='inferred_speed_mps',\n travel_time_key='travel_time_s',\n max_nodes_for_midpoints=500,\n linestring_delta=50,\n is_curved_eps=0.012,\n max_snap_dist=4,\n allow_renaming=True,\n verbose=False,\n super_verbose=False):\n \"\"\"\n Match nodes in ground truth and propsal graphs, and get paths.\n\n Notes\n -----\n The path length dictionaries returned by this function will be fed into\n compute_metric().\n\n Arguments\n ---------\n G_gt_ : networkx graph\n Ground truth graph.\n G_p_ : networkd graph\n Proposal graph over the same region.\n weight : str\n Key in the edge properties dictionary to use for the path length\n weight. Defaults to ``'length'``.\n speed_key : str\n Key in the edge properties dictionary to use for the edge speed.\n Defaults to ``'inferred_speed_mps'``.\n travel_time_key : str\n Name to assign travel time in the edge properties dictionary.\n Defaults to ``'travel_time_s'``.\n max_nodes_for_midpoints : int\n Maximum number of gt nodes to inject midpoints. If there are more\n gt nodes than this, skip midpoints and use this number of points\n to comput APLS.\n linestring_delta : float\n Distance in meters between linestring midpoints.\n If len gt nodes > max_nodes_for_midppoints this argument is ignored.\n Defaults to ``50``.\n is_curved_eps : float\n Minumum curvature for injecting nodes (if curvature is less than this\n value, no midpoints will be injected). If < 0, always inject points\n on line, regardless of curvature.\n If len gt nodes > max_nodes_for_midppoints this argument is ignored.\n Defaults to ``0.012``.\n max_snap_dist : float\n Maximum distance a node can be snapped onto a graph.\n Defaults to ``4``.\n allow_renameing : boolean\n Switch to allow renaming of an existing node with node_id if the\n existing node is closest to the point. Defaults to ``False``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Return\n ------\n G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime : tuple\n G_gt_cp is ground truth with control points inserted\n G_p_cp is proposal with control points inserted\n G_gt_cp_prime is ground truth with control points from prop inserted\n G_p_cp_prime is proposal with control points from gt inserted\n all_pairs_lengths_gt_native is path length dict corresponding to G_gt_cp\n all_pairs_lengths_prop_native is path length dict corresponding to G_p_cp\n all_pairs_lengths_gt_prime is path length dict corresponding to G_gt_cp_prime\n all_pairs_lenfgths_prop_prime is path length dict corresponding to G_p_cp_prime \n \"\"\"\n\n t0 = time.time()\n print(\"Executing make_graphs()...\")\n\n print(\"Ensure\", weight, \"in gt graph prpperties\")\n print(\"type(G_gt_)\", type(G_gt_))\n for i, (u, v, data) in enumerate(G_gt_.edges(keys=False, data=True)):\n # print(\"G_gt_.edges[u, v]:\", G_gt_.edges[u, v])\n # print(\"G_gt_.edges[u, v][weight]:\", G_gt_.edges[u, v][weight])\n if weight not in data.keys():\n print(\"Error!\", weight, \"not in G_gt_ edge u, v, data:\", u, v, data)\n return\n\n print(\"Ensure G_gt 'geometry' is a shapely geometry, not a linestring...\")\n for i, (u, v, key, data) in enumerate(G_gt_.edges(keys=True, data=True)):\n if i == 0:\n print((\"u,v,key,data:\", u, v, key, data))\n print((\" type data['geometry']:\", type(data['geometry'])))\n try:\n line = data['geometry']\n except KeyError:\n line = data[0]['geometry']\n if type(line) == str: # or type(line) == unicode:\n data['geometry'] = shapely.wkt.loads(line)\n\n # create graph with midpoints\n G_gt0 = create_edge_linestrings(G_gt_.to_undirected())\n # create graph with linestrings?\n # G_gt_cp = G_gt.to_undirected()\n # G_gt_cp = create_edge_linestrings(G_gt.to_undirected())\n\n if verbose:\n print(\"len G_gt.nodes():\", len(list(G_gt0.nodes())))\n print(\"len G_gt.edges():\", len(list(G_gt0.edges())))\n\n if verbose:\n print(\"Creating gt midpoints\")\n G_gt_cp0, xms, yms = create_graph_midpoints(\n G_gt0.copy(),\n linestring_delta=linestring_delta,\n figsize=(0, 0),\n is_curved_eps=is_curved_eps,\n verbose=False)\n # add travel time\n G_gt_cp = add_travel_time(G_gt_cp0.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n # get ground truth control points\n control_points_gt = []\n for n in G_gt_cp.nodes():\n u_x, u_y = G_gt_cp.nodes[n]['x'], G_gt_cp.nodes[n]['y']\n control_points_gt.append([n, u_x, u_y])\n if verbose:\n print(\"len control_points_gt:\", len(control_points_gt))\n\n # get ground truth paths\n if verbose:\n print(\"Get ground truth paths...\")\n all_pairs_lengths_gt_native = dict(\n nx.shortest_path_length(G_gt_cp, weight=weight))\n # all_pairs_lengths_gt_native = dict(\n # nx.all_pairs_dijkstra_path_length(G_gt_cp, weight=weight))\n ###############\n\n ###############\n # Proposal\n\n print(\"Ensure\", weight, \"in prop graph prpperties\")\n print(\"type(G_p_)\", type(G_p_))\n for i, (u, v, data) in enumerate(G_p_.edges(keys=False, data=True)):\n if weight not in data.keys():\n print(\"Error!\", weight, \"not in G_p_ edge u, v, data:\", u, v, data)\n return\n\n # get proposal graph with native midpoints\n print(\"Ensure G_p 'geometry' is a shapely geometry, not a linestring...\")\n for i, (u, v, key, data) in enumerate(G_p_.edges(keys=True, data=True)):\n if i == 0:\n print((\"u,v,key,data:\", u, v, key, data))\n print((\" type data['geometry']:\", type(data['geometry'])))\n try:\n line = data['geometry']\n except:\n line = data[0]['geometry']\n if type(line) == str: # or type(line) == unicode:\n data['geometry'] = shapely.wkt.loads(line)\n\n G_p0 = create_edge_linestrings(G_p_.to_undirected())\n # add travel time\n G_p = add_travel_time(G_p0.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n if verbose:\n print(\"len G_p.nodes():\", len(G_p.nodes()))\n print(\"len G_p.edges():\", len(G_p.edges()))\n\n if verbose:\n print(\"Creating proposal midpoints\")\n G_p_cp0, xms_p, yms_p = create_graph_midpoints(\n G_p.copy(),\n linestring_delta=linestring_delta,\n figsize=(0, 0),\n is_curved_eps=is_curved_eps,\n verbose=False)\n # add travel time\n G_p_cp = add_travel_time(G_p_cp0.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n if verbose:\n print(\"len G_p_cp.nodes():\", len(G_p_cp.nodes()))\n print(\"len G_p_cp.edges():\", len(G_p_cp.edges()))\n\n # set proposal control nodes, originally just all nodes in G_p_cp\n # original method sets proposal control points as all nodes in G_p_cp\n # get proposal control points\n control_points_prop = []\n for n in G_p_cp.nodes():\n u_x, u_y = G_p_cp.nodes[n]['x'], G_p_cp.nodes[n]['y']\n control_points_prop.append([n, u_x, u_y])\n\n # get paths\n all_pairs_lengths_prop_native = dict(\n nx.shortest_path_length(G_p_cp, weight=weight))\n # nx.all_pairs_dijkstra_path_length(G_p_cp, weight=weight))\n\n ###############\n # insert gt control points into proposal\n if verbose:\n print(\"Inserting\", len(control_points_gt),\n \"control points into G_p...\")\n print(\"G_p.nodes():\", G_p.nodes())\n G_p_cp_prime0, xn_p, yn_p = insert_control_points(\n G_p.copy(), control_points_gt,\n max_distance_meters=max_snap_dist,\n allow_renaming=allow_renaming,\n verbose=super_verbose)\n # add travel time\n G_p_cp_prime = add_travel_time(G_p_cp_prime0.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n# G_p_cp, xn_p, yn_p = insert_control_points(G_p_cp, control_points_gt,\n# max_distance_meters=max_snap_dist,\n# allow_renaming=allow_renaming,\n# verbose=verbose)\n\n ###############\n # now insert control points into ground truth\n if verbose:\n print(\"\\nInserting\", len(control_points_prop),\n \"control points into G_gt...\")\n # permit renaming of inserted nodes if coincident with existing node\n G_gt_cp_prime0, xn_gt, yn_gt = insert_control_points(\n G_gt_,\n control_points_prop,\n max_distance_meters=max_snap_dist,\n allow_renaming=allow_renaming,\n verbose=super_verbose)\n # add travel time\n G_gt_cp_prime = add_travel_time(G_gt_cp_prime0.copy(),\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n ###############\n # get paths\n all_pairs_lengths_gt_prime = dict(\n nx.shortest_path_length(G_gt_cp_prime, weight=weight))\n # nx.all_pairs_dijkstra_path_length(G_gt_cp_prime, weight=weight))\n all_pairs_lengths_prop_prime = dict(\n nx.shortest_path_length(G_p_cp_prime, weight=weight))\n # nx.all_pairs_dijkstra_path_length(G_p_cp_prime, weight=weight))\n\n tf = time.time()\n print(\"Time to run make_graphs in apls.py:\", tf - t0, \"seconds\")\n\n return G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime\n\n\n###############################################################################\ndef make_graphs_yuge(G_gt, G_p,\n weight='length',\n speed_key='inferred_speed_mps',\n travel_time_key='travel_time_s',\n max_nodes=500,\n max_snap_dist=4,\n allow_renaming=True,\n verbose=True, super_verbose=False):\n \"\"\"\n Match nodes in large ground truth and propsal graphs, and get paths.\n\n Notes\n -----\n Skip midpoint injection and only select a subset of routes to compare.\n The path length dictionaries returned by this function will be fed into\n compute_metric().\n\n Arguments\n ---------\n G_gt : networkx graph\n Ground truth graph.\n G_p : networkd graph\n Proposal graph over the same region.\n weight : str\n Key in the edge properties dictionary to use for the path length\n weight. Defaults to ``'length'``.\n speed_key : str\n Key in the edge properties dictionary to use for the edge speed.\n Defaults to ``'inferred_speed_mps'``.\n travel_time_key : str\n Name to assign travel time in the edge properties dictionary.\n Defaults to ``'travel_time_s'``.\n max_nodess : int\n Maximum number of gt nodes to inject midpoints. If there are more\n gt nodes than this, skip midpoints and use this number of points\n to comput APLS.\n max_snap_dist : float\n Maximum distance a node can be snapped onto a graph.\n Defaults to ``4``.\n allow_renameing : boolean\n Switch to allow renaming of an existing node with node_id if the\n existing node is closest to the point. Defaults to ``False``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Return\n ------\n G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime : tuple\n G_gt_cp is ground truth with control points inserted\n G_p_cp is proposal with control points inserted\n G_gt_cp_prime is ground truth with control points from prop inserted\n G_p_cp_prime is proposal with control points from gt inserted\n all_pairs_lengths_gt_native is path length dict corresponding to G_gt_cp\n all_pairs_lengths_prop_native is path length dict corresponding to G_p_cp\n all_pairs_lengths_gt_prime is path length dict corresponding to G_gt_cp_prime\n all_pairs_lenfgths_prop_prime is path length dict corresponding to G_p_cp_prime \n \"\"\"\n \n t0 = time.time()\n print(\"Executing make_graphs_yuge()...\")\n\n print(\"Ensure G_gt 'geometry' is a shapely geometry, not a linestring...\")\n for i, (u, v, key, data) in enumerate(G_gt.edges(keys=True, data=True)):\n if i == 0:\n try:\n print((\"u,v,key,data:\", u, v, key, data))\n except:\n pass\n print((\" type data['geometry']:\", type(data['geometry'])))\n try:\n line = data['geometry']\n except:\n line = data[0]['geometry']\n if type(line) == str: # or type(line) == unicode:\n data['geometry'] = shapely.wkt.loads(line)\n\n print(\"Ensure G_p 'geometry' is a shapely geometry, not a linestring...\")\n for i, (u, v, key, data) in enumerate(G_p.edges(keys=True, data=True)):\n if i == 0:\n print((\"u,v,key,data:\", u, v, key, data))\n print((\" type data['geometry']:\", type(data['geometry'])))\n try:\n line = data['geometry']\n except:\n line = data[0]['geometry']\n if type(line) == str: # or type(line) == unicode:\n data['geometry'] = shapely.wkt.loads(line)\n\n # create graph with linestrings?\n G_gt_cp = G_gt.to_undirected()\n # G_gt_cp = create_edge_linestrings(G_gt.to_undirected())\n if verbose:\n print(\"len(G_gt.nodes()):\", len(G_gt_cp.nodes()))\n print(\"len(G_gt.edges()):\", len(G_gt_cp.edges()))\n # print(\"G_gt.nodes():\", G_gt_cp.nodes())\n # print(\"G_gt.edges()):\", G_gt_cp.edges())\n # gt node and edge props\n node = random.choice(list(G_gt.nodes()))\n print(\"node:\", node, \"G_gt random node props:\", G_gt.nodes[node])\n edge_tmp = random.choice(list(G_gt.edges()))\n print(\"G_gt edge_tmp:\", edge_tmp)\n try:\n print(\"edge:\", edge_tmp, \"G_gt random edge props:\",\n G_gt.edges[edge_tmp[0]][edge_tmp[1]])\n except:\n try:\n print(\"edge:\", edge_tmp, \"G_gt random edge props:\",\n G_gt.edges[edge_tmp[0], edge_tmp[1], 0])\n except:\n pass\n # prop node and edge props\n node = random.choice(list(G_p.nodes()))\n print(\"node:\", node, \"G_p random node props:\", G_p.nodes[node])\n edge_tmp = random.choice(list(G_p.edges()))\n print(\"G_p edge_tmp:\", edge_tmp)\n try:\n print(\"edge:\", edge_tmp, \"G_p random edge props:\",\n G_p.edges[edge_tmp[0]][edge_tmp[1]])\n except:\n try:\n print(\"edge:\", edge_tmp, \"G_p random edge props:\",\n G_p.edges[edge_tmp[0], edge_tmp[1], 0])\n except:\n pass\n\n # get ground truth control points, which will be a subset of nodes\n sample_size = min(max_nodes, len(G_gt_cp.nodes()))\n rand_nodes_gt = random.sample(G_gt_cp.nodes(), sample_size)\n rand_nodes_gt_set = set(rand_nodes_gt)\n control_points_gt = []\n for itmp,n in enumerate(rand_nodes_gt):\n if verbose and (i % 20) == 0:\n print (\"control_point\", itmp, \":\", n, \":\", G_gt_cp.nodes[n])\n u_x, u_y = G_gt_cp.nodes[n]['x'], G_gt_cp.nodes[n]['y']\n control_points_gt.append([n, u_x, u_y])\n if verbose:\n print(\"len control_points_gt:\", len(control_points_gt))\n # add travel time\n G_gt_cp = add_travel_time(G_gt_cp,\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n # get route lengths between all control points\n # gather all paths from nodes of interest, keep only routes to control nodes\n tt = time.time()\n if verbose:\n print(\"Computing all_pairs_lengths_gt_native...\")\n all_pairs_lengths_gt_native = {}\n for itmp, source in enumerate(rand_nodes_gt):\n if verbose and ((itmp % 50) == 0):\n print((itmp, \"source:\", source))\n paths_tmp = nx.single_source_dijkstra_path_length(\n G_gt_cp, source, weight=weight)\n # delete items\n for k in list(paths_tmp.keys()):\n if k not in rand_nodes_gt_set:\n del paths_tmp[k]\n all_pairs_lengths_gt_native[source] = paths_tmp\n if verbose:\n print((\"Time to compute all source routes for\",\n sample_size, \"nodes:\", time.time() - tt, \"seconds\"))\n\n # get individual routes (super slow!)\n #t0 = time.time()\n #all_pairs_lengths_gt_native = {}\n # for source in rand_nodes_gt:\n # print (\"source:\", source)\n # source_dic = {}\n # for target in rand_nodes_gt:\n # print (\"target:\", target)\n # p = nx.dijkstra_path_length(G_gt_init, source, target, weight=weight)\n # source_dic[target] = p\n # all_pairs_lengths_gt_native[source] = source_dic\n #print (\"Time to compute all source routes:\", time.time() - t0, \"seconds\")\n ## ('Time to compute all source routes:', 9.418055057525635, 'seconds')\n\n #all_pairs_lengths_gt_native = nx.all_pairs_dijkstra_path_length(G_gt_cp, weight=weight)\n ###############\n\n ###############\n # get proposal graph with native midpoints\n G_p_cp = G_p.to_undirected()\n #G_p_cp = create_edge_linestrings(G_p.to_undirected())\n if verbose:\n print(\"len G_p_cp.nodes():\", len(G_p_cp.nodes()))\n print(\"G_p_cp.edges():\", len(G_p_cp.edges()))\n\n # get control points, which will be a subset of nodes\n # (original method sets proposal control points as all nodes in G_p_cp)\n sample_size = min(max_nodes, len(G_p_cp.nodes()))\n rand_nodes_p = random.sample(G_p_cp.nodes(), sample_size)\n rand_nodes_p_set = set(rand_nodes_p)\n control_points_prop = []\n for n in rand_nodes_p:\n u_x, u_y = G_p_cp.nodes[n]['x'], G_p_cp.nodes[n]['y']\n control_points_prop.append([n, u_x, u_y])\n # add travel time\n G_p_cp = add_travel_time(G_p_cp,\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n # get paths\n # gather all paths from nodes of interest, keep only routes to control nodes\n tt = time.time()\n if verbose:\n print(\"Computing all_pairs_lengths_prop_native...\")\n all_pairs_lengths_prop_native = {}\n for itmp, source in enumerate(rand_nodes_p):\n if verbose and ((itmp % 50) == 0):\n print((itmp, \"source:\", source))\n paths_tmp = nx.single_source_dijkstra_path_length(\n G_p_cp, source, weight=weight)\n # delete items\n for k in list(paths_tmp.keys()):\n if k not in rand_nodes_p_set:\n del paths_tmp[k]\n all_pairs_lengths_prop_native[source] = paths_tmp\n if verbose:\n print((\"Time to compute all source routes for\",\n max_nodes, \"nodes:\", time.time() - tt, \"seconds\"))\n\n ###############\n # insert gt control points into proposal\n if verbose:\n print(\"Inserting\", len(control_points_gt),\n \"control points into G_p...\")\n print(\"len G_p.nodes():\", len(G_p.nodes()))\n G_p_cp_prime, xn_p, yn_p = insert_control_points(\n G_p.copy(), control_points_gt, max_distance_meters=max_snap_dist,\n allow_renaming=allow_renaming, verbose=super_verbose)\n # add travel time\n G_p_cp_prime = add_travel_time(G_p_cp_prime,\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n ###############\n # now insert control points into ground truth\n if verbose:\n print(\"\\nInserting\", len(control_points_prop),\n \"control points into G_gt...\")\n # permit renaming of inserted nodes if coincident with existing node\n G_gt_cp_prime, xn_gt, yn_gt = insert_control_points(\n G_gt, control_points_prop, max_distance_meters=max_snap_dist,\n allow_renaming=allow_renaming, verbose=super_verbose)\n G_gt_cp_prime = add_travel_time(G_gt_cp_prime,\n speed_key=speed_key,\n travel_time_key=travel_time_key)\n\n ###############\n # get paths for graphs_prime\n # gather all paths from nodes of interest, keep only routes to control nodes\n # gt_prime\n tt = time.time()\n all_pairs_lengths_gt_prime = {}\n if verbose:\n print(\"Computing all_pairs_lengths_gt_prime...\")\n G_gt_cp_prime_nodes_set = set(G_gt_cp_prime.nodes())\n# for source in G_gt_cp_prime_nodes_set:\n# if source in G_gt_cp_prime_nodes_set:\n# paths_tmp = nx.single_source_dijkstra_path_length(G_gt_cp_prime, source, weight=weight)\n for itmp, source in enumerate(rand_nodes_p_set):\n if verbose and ((itmp % 50) == 0):\n print((itmp, \"source:\", source))\n if source in G_gt_cp_prime_nodes_set:\n paths_tmp = nx.single_source_dijkstra_path_length(\n G_gt_cp_prime, source, weight=weight)\n # delete items\n for k in list(paths_tmp.keys()):\n if k not in rand_nodes_p_set:\n del paths_tmp[k]\n all_pairs_lengths_gt_prime[source] = paths_tmp\n if verbose:\n print((\"Time to compute all source routes for\",\n max_nodes, \"nodes:\", time.time() - tt, \"seconds\"))\n\n # prop_prime\n tt = time.time()\n all_pairs_lengths_prop_prime = {}\n if verbose:\n print(\"Computing all_pairs_lengths_prop_prime...\")\n G_p_cp_prime_nodes_set = set(G_p_cp_prime.nodes())\n# for source in G_p_cp_prime_nodes_set:\n# if source in G_p_cp_prime_nodes_set:\n# paths_tmp = nx.single_source_dijkstra_path_length(G_p_cp_prime, source, weight=weight)\n for itmp, source in enumerate(rand_nodes_gt_set):\n if verbose and ((itmp % 50) == 0):\n print((itmp, \"source:\", source))\n if source in G_p_cp_prime_nodes_set:\n paths_tmp = nx.single_source_dijkstra_path_length(\n G_p_cp_prime, source, weight=weight)\n # delete items\n for k in list(paths_tmp.keys()):\n if k not in rand_nodes_gt_set:\n del paths_tmp[k]\n all_pairs_lengths_prop_prime[source] = paths_tmp\n if verbose:\n print((\"Time to compute all source routes for\",\n max_nodes, \"nodes:\", time.time() - tt, \"seconds\"))\n\n #all_pairs_lengths_gt_prime = nx.all_pairs_dijkstra_path_length(G_gt_cp_prime, weight=weight)\n #all_pairs_lengths_prop_prime = nx.all_pairs_dijkstra_path_length(G_p_cp_prime, weight=weight)\n\n ###############\n tf = time.time()\n print(\"Time to run make_graphs_yuge in apls.py:\", tf - t0, \"seconds\")\n\n return G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime\n\n\n###############################################################################\ndef single_path_metric(len_gt, len_prop, diff_max=1):\n \"\"\"\n Compute APLS metric for single path.\n\n Notes\n -----\n Compute normalize path difference metric, if len_prop < 0, return diff_max\n\n Arguments\n ---------\n len_gt : float\n Length of ground truth edge.\n len_prop : float\n Length of proposal edge.\n diff_max : float\n Maximum value to return. Defaults to ``1``.\n\n Returns\n -------\n metric : float\n Normalized path difference.\n \"\"\"\n\n if len_gt <= 0:\n return 0\n elif len_prop < 0 and len_gt > 0:\n return diff_max\n else:\n diff_raw = np.abs(len_gt - len_prop) / len_gt\n return np.min([diff_max, diff_raw])\n\n\n###############################################################################\ndef path_sim_metric(all_pairs_lengths_gt, all_pairs_lengths_prop,\n control_nodes=[], min_path_length=10,\n diff_max=1, missing_path_len=-1, normalize=True,\n verbose=False):\n \"\"\"\n Compute metric for multiple paths.\n\n Notes\n -----\n Assume nodes in ground truth and proposed graph have the same names.\n Assume graph is undirected so don't evaluate routes in both directions\n control_nodes is the list of nodes to actually evaluate; if empty do all\n in all_pairs_lenghts_gt\n min_path_length is the minimum path length to evaluate\n https://networkx.github.io/documentation/networkx-2.2/reference/algorithms/shortest_paths.html\n\n Parameters\n ----------\n all_pairs_lengths_gt : dict\n Dictionary of path lengths for ground truth graph.\n all_pairs_lengths_prop : dict\n Dictionary of path lengths for proposal graph.\n control_nodes : list\n List of control nodes to evaluate.\n min_path_length : float\n Minimum path length to evaluate.\n diff_max : float\n Maximum value to return. Defaults to ``1``.\n missing_path_len : float\n Value to assign a missing path. Defaults to ``-1``.\n normalize : boolean\n Switch to normalize outputs. Defaults to ``True``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n\n Returns\n -------\n C, diffs, routes, diff_dic\n C is the APLS score\n diffs is a list of the the route differences\n routes is a list of routes\n diff_dic is a dictionary of path differences\n \"\"\"\n\n diffs = []\n routes = []\n diff_dic = {}\n gt_start_nodes_set = set(all_pairs_lengths_gt.keys())\n prop_start_nodes_set = set(all_pairs_lengths_prop.keys())\n t0 = time.time()\n\n print()\n if len(gt_start_nodes_set) == 0:\n return 0, [], [], {}\n\n # set nodes to inspect\n if len(control_nodes) == 0:\n good_nodes = list(all_pairs_lengths_gt.keys())\n else:\n good_nodes = control_nodes\n\n if verbose:\n print(\"\\nComputing path_sim_metric()...\")\n print(\"good_nodes:\", good_nodes)\n\n # iterate overall start nodes\n # for start_node, paths in all_pairs_lengths.iteritems():\n for start_node in good_nodes:\n if verbose:\n print(\"start node:\", start_node)\n node_dic_tmp = {}\n\n # if we are not careful with control nodes, it's possible that the\n # start node will not be in all_pairs_lengths_gt, in this case use max\n # diff for all routes to that node\n # if the start node is missing from proposal, use maximum diff for\n # all possible routes to that node\n if start_node not in gt_start_nodes_set:\n print(\"for ss, node\", start_node, \"not in set\")\n print(\" skipping N paths:\", len(\n list(all_pairs_lengths_prop[start_node].keys())))\n for end_node, len_prop in all_pairs_lengths_prop[start_node].items():\n diffs.append(diff_max)\n routes.append([start_node, end_node])\n node_dic_tmp[end_node] = diff_max\n return\n\n paths = all_pairs_lengths_gt[start_node]\n\n # CASE 1\n # if the start node is missing from proposal, use maximum diff for\n # all possible routes to the start node\n if start_node not in prop_start_nodes_set:\n for end_node, len_gt in paths.items():\n if (end_node != start_node) and (end_node in good_nodes):\n diffs.append(diff_max)\n routes.append([start_node, end_node])\n node_dic_tmp[end_node] = diff_max\n diff_dic[start_node] = node_dic_tmp\n # print (\"start_node missing:\", start_node)\n continue\n\n # else get proposed paths\n else:\n paths_prop = all_pairs_lengths_prop[start_node]\n\n # get set of all nodes in paths_prop, and missing_nodes\n end_nodes_gt_set = set(paths.keys()).intersection(good_nodes)\n # end_nodes_gt_set = set(paths.keys()) # old version with all nodes\n\n end_nodes_prop_set = set(paths_prop.keys())\n missing_nodes = end_nodes_gt_set - end_nodes_prop_set\n if verbose:\n print(\"missing nodes:\", missing_nodes)\n\n # iterate over all paths from node\n for end_node in end_nodes_gt_set:\n # for end_node, len_gt in paths.iteritems():\n\n len_gt = paths[end_node]\n # skip if too short\n if len_gt < min_path_length:\n continue\n\n # get proposed path\n if end_node in end_nodes_prop_set:\n # CASE 2, end_node in both paths and paths_prop, so\n # valid path exists\n len_prop = paths_prop[end_node]\n else:\n # CASE 3: end_node in paths but not paths_prop, so assign\n # length as diff_max\n len_prop = missing_path_len\n\n if verbose:\n print(\"end_node:\", end_node)\n print(\" len_gt:\", len_gt)\n print(\" len_prop:\", len_prop)\n\n # compute path difference metric\n diff = single_path_metric(len_gt, len_prop, diff_max=diff_max)\n diffs.append(diff)\n routes.append([start_node, end_node])\n node_dic_tmp[end_node] = diff\n\n diff_dic[start_node] = node_dic_tmp\n\n if len(diffs) == 0:\n return 0, [], [], {}\n\n # compute Cost\n diff_tot = np.sum(diffs)\n if normalize:\n norm = len(diffs)\n diff_norm = diff_tot / norm\n C = 1. - diff_norm\n else:\n C = diff_tot\n\n print(\"Time to compute metric (score = \", C, \") for \", len(diffs),\n \"routes:\", time.time() - t0, \"seconds\")\n\n return C, diffs, routes, diff_dic\n\n\n###############################################################################\ndef compute_apls_metric(all_pairs_lengths_gt_native,\n all_pairs_lengths_prop_native,\n all_pairs_lengths_gt_prime,\n all_pairs_lengths_prop_prime,\n control_points_gt, control_points_prop,\n res_dir='', min_path_length=10,\n verbose=False, super_verbose=False):\n \"\"\"\n Compute APLS metric and plot results (optional)\n\n Notes\n -----\n Computes APLS and creates plots in res_dir (if it is not empty)\n\n Arguments\n ---------\n all_pairs_lengths_gt_native : dict\n Dict of paths for gt graph.\n all_pairs_lengths_prop_native : dict\n Dict of paths for prop graph.\n all_pairs_lengths_gt_prime : dict\n Dict of paths for gt graph with control points from prop.\n all_pairs_lengths_prop_prime : dict\n Dict of paths for prop graph with control points from gt.\n control_points_gt : list\n Array of control points.\n control_points_prop : list\n Array of control points.\n res_dir : str\n Output dir for plots. Defaults to ``''`` (no plotting).\n min_path_length : float\n Minimum path length to evaluate.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n C_tot, C_gt_onto_prop, C_prop_onto_gt : tuple\n C_tot is the total APLS score\n C_gt_onto_prop is the score when inserting gt control nodes onto prop\n C_prop_onto_gt is the score when inserting prop control nodes onto gt\n \"\"\"\n\n t0 = time.time()\n\n # return 0 if no paths\n if (len(list(all_pairs_lengths_gt_native.keys())) == 0) \\\n or (len(list(all_pairs_lengths_prop_native.keys())) == 0):\n print(\"len(all_pairs_lengths_gt_native.keys()) == 0)\")\n return 0, 0, 0\n\n ####################\n # compute metric (gt to prop)\n # if verbose:\n print(\"Compute metric (gt snapped onto prop)\")\n # control_nodes = all_pairs_lengths_gt_native.keys()\n control_nodes = [z[0] for z in control_points_gt]\n if verbose:\n print((\"control_nodes_gt:\", control_nodes))\n C_gt_onto_prop, diffs, routes, diff_dic = path_sim_metric(\n all_pairs_lengths_gt_native,\n all_pairs_lengths_prop_prime,\n control_nodes=control_nodes,\n min_path_length=min_path_length,\n diff_max=1, missing_path_len=-1, normalize=True,\n verbose=super_verbose)\n dt1 = time.time() - t0\n if verbose:\n print(\"len(diffs):\", len(diffs))\n if len(diffs) > 0:\n print(\" max(diffs):\", np.max(diffs))\n print(\" min(diffs)\", np.min(diffs))\n if len(res_dir) > 0:\n scatter_png = os.path.join(\n res_dir, 'all_pairs_paths_diffs_gt_to_prop.png')\n hist_png = os.path.join(\n res_dir, 'all_pairs_paths_diffs_hist_gt_to_prop.png')\n # can't plot route names if there are too many...\n if len(routes) > 100:\n routes_str = []\n else:\n routes_str = [str(z[0]) + '-' + str(z[1]) for z in routes]\n apls_plots.plot_metric(\n C_gt_onto_prop, diffs, routes_str=routes_str,\n figsize=(10, 5), scatter_alpha=0.8, scatter_size=8,\n scatter_png=scatter_png,\n hist_png=hist_png)\n ######################\n\n ####################\n # compute metric (prop to gt)\n # if verbose:\n print(\"Compute metric (prop snapped onto gt)\")\n t1 = time.time()\n # control_nodes = all_pairs_lengths_prop_native.keys()\n control_nodes = [z[0] for z in control_points_prop]\n if verbose:\n print(\"control_nodes:\", control_nodes)\n C_prop_onto_gt, diffs, routes, diff_dic = path_sim_metric(\n all_pairs_lengths_prop_native,\n all_pairs_lengths_gt_prime,\n control_nodes=control_nodes,\n min_path_length=min_path_length,\n diff_max=1, missing_path_len=-1, normalize=True,\n verbose=super_verbose)\n dt2 = time.time() - t1\n if verbose:\n print(\"len(diffs):\", len(diffs))\n if len(diffs) > 0:\n print(\" max(diffs):\", np.max(diffs))\n print(\" min(diffs)\", np.min(diffs))\n if len(res_dir) > 0:\n scatter_png = os.path.join(\n res_dir, 'all_pairs_paths_diffs_prop_to_gt.png')\n hist_png = os.path.join(\n res_dir, 'all_pairs_paths_diffs_hist_prop_to_gt.png')\n if len(routes) > 100:\n routes_str = []\n else:\n routes_str = [str(z[0]) + '-' + str(z[1]) for z in routes]\n apls_plots.plot_metric(\n C_prop_onto_gt, diffs, routes_str=routes_str,\n figsize=(10, 5), scatter_alpha=0.8, scatter_size=8,\n scatter_png=scatter_png,\n hist_png=hist_png)\n\n ####################\n\n ####################\n # Total\n\n print(\"C_gt_onto_prop, C_prop_onto_gt:\", C_gt_onto_prop, C_prop_onto_gt)\n if (C_gt_onto_prop <= 0) or (C_prop_onto_gt <= 0) \\\n or (np.isnan(C_gt_onto_prop)) or (np.isnan(C_prop_onto_gt)):\n C_tot = 0\n else:\n C_tot = scipy.stats.hmean([C_gt_onto_prop, C_prop_onto_gt])\n if np.isnan(C_tot):\n C_tot = 0\n print(\"Total APLS Metric = Mean(\", np.round(C_gt_onto_prop, 2), \"+\",\n np.round(C_prop_onto_gt, 2),\n \") =\", np.round(C_tot, 2))\n print(\"Total time to compute metric:\", str(dt1 + dt2), \"seconds\")\n\n return C_tot, C_gt_onto_prop, C_prop_onto_gt\n\n\n###############################################################################\ndef gather_files(test_method, truth_dir, prop_dir,\n im_dir='',\n im_prefix='',\n gt_wkt_file='',\n prop_wkt_file='',\n max_files=1000,\n gt_subgraph_filter_weight='length',\n gt_min_subgraph_length=5,\n prop_subgraph_filter_weight='length_pix',\n prop_min_subgraph_length=10,\n use_pix_coords=True,\n speed_key='inferred_speed_mps',\n travel_time_key='travel_time_s',\n wkt_weight_key='travel_time_s',\n default_speed=13.41,\n verbose=False, super_verbose=False):\n \"\"\"\n Build lists of ground truth and proposal graphs\n\n Arguments\n ---------\n test_method : str\n Method for identifying and extraction graphs.\n Options:\n gt_pkl_prop_pkl = ground truth pickle, proposal pickle\n gt_json_prop_pkl = ground truth json, proposal pickle\n gt_json_prop_json = ground truth json, proposal json\n gt_json_prop_wkt = ground truth json, proposal csv in wkt format\n gt_wkt_prop_wkt = ground truth wkt, proposal csv in wkt format\n truth_dir : str\n Location of ground truth graphs.\n prop_dir : str\n Location of proposal graphs.\n im_dir : str\n Location of image files. Defaults to ``''``.\n im_prefix : str\n Prefix to prepend to image files. Defaults to ``''``.\n gt_wkt_file : str\n Location of ground truth wkt file if analyzing wkt predictions.\n Defaults to ``''``.\n prop_wkt_file : str\n Location of proposal wkt file if analyzing wkt predictions.\n Defaults to ``''``.\n max_files : int\n Maximum number of files to analyze. Defaults to ``1000``.\n gt_subgraph_filter_weight : str\n Edge key for filtering ground truth edge length.\n Defaults to ``'length'``.\n gt_min_subgraph_length : float\n Minimum length of the edge. Defaults to ``5``.\n prop_subgraph_filter_weight : str\n Edge key for filtering proposal edge length.\n Defaults to ``'length_pix'``.\n prop_min_subgraph_length : float\n Minimum length of the edge. Defaults to ``10``.\n use_pix_coords : boolean\n Switch to use pixel coords for calculations. Defaults to ``True``.\n speed_key : str\n Edge key for speed. Defaults to ``'inferred_speed_mps'``.\n travel_time_key : str\n Edge key for travel time. Defaults to ``'travel_time_s'``.\n default_speed : float\n Default speed for edge in m/s. Defaults to ``13.41`` (30 mph).\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n gt_list, gt_raw_list, gp_list, root_list, im_loc_list : tuple\n gt_list is a list of ground truth graphs.\n gp_list is a list of proposal graphs\n root_list is a list of names\n im_loc_list is the location of the images corresponding to root_list\n \"\"\"\n\n ###################\n # Get ground truth and proposal graphs\n print(\"Gathering files...\")\n\n ###################\n gt_list, gp_list, root_list, im_loc_list = [], [], [], []\n # gt_raw_list = []\n\n ###################\n # import a ground truth geojson and a pickled propoal graph\n if test_method == 'gt_pkl_prop_pkl':\n\n name_list = np.sort(os.listdir(truth_dir))\n for i, f in enumerate(name_list):\n if i >= max_files:\n break\n\n # skip non-pkl files\n if not f.endswith(('.gpickle', '.pkl')):\n continue\n\n # define values\n outroot = f.split('.')[0]\n print((\"outroot:\", outroot))\n gt_file = os.path.join(truth_dir, f)\n print(\"gt_file:\", gt_file)\n print(os.path.exists(gt_file))\n prop_file = os.path.join(prop_dir, outroot + '.gpickle')\n if not os.path.exists(prop_file):\n prop_file = os.path.join(prop_dir, 'fold0_RGB-PanSharpen_' \\\n + outroot + '.gpickle')\n im_file = os.path.join(im_dir, outroot + '.tif')\n # im_file = os.path.join(im_dir, im_prefix + outroot + '.tif')\n # Naming convention is inconsistent\n if not os.path.exists(prop_file):\n print(\"missing prop file:\", prop_file)\n continue\n\n # ground truth\n G_gt_init = nx.read_gpickle(gt_file)\n try:\n edge_tmp = list(G_gt_init.edges())[-1]\n print(\"gt random edge props for edge:\", edge_tmp, \" = \",\n G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0])\n except UnicodeEncodeError:\n # pick another random edge\n edge_tmp = list(G_gt_init.edges())[0]\n print(\"gt random edge props for edge:\", edge_tmp, \" = \",\n G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0])\n # for i,(u,v,attr_dict) in enumerate(G_gt_init.edges(data=True)):\n # print \"\\n\\natrr_dictk.keys():\", attr_dict.keys()\n # print (\" attr_dict;\", attr_dict)\n # return\n\n # proposal\n G_p_init = nx.read_gpickle(prop_file)\n edge_tmp = list(G_p_init.edges())[-1]\n print(\"prop random edge props for edge:\", edge_tmp, \" = \",\n G_p_init.edges[edge_tmp[0], edge_tmp[1], 0])\n\n# #########################\n# # OPTIONAL - simplify propposal\n# G_p_init1 = osmnx_funcs.simplify_graph(G_p_init.to_directed())\n# G_p_init2 = G_p_init1.to_undirected()\n# G_p_init = osmnx_funcs.project_graph(G_p_init2)\n# G_p_init = create_edge_linestrings(\n# G_p_init, remove_redundant=True, verbose=False)\n# # for i,(u,v,attr_dict) in enumerate(G_p_init.edges(data=True)):\n# # print \"\\n\\n\", u, v, \"atrr_dictk.keys():\", attr_dict.keys()\n# # print (\" attr_dict;\", attr_dict)\n# # print (\" attr_dict[geometry];\", attr_dict['geometry'])\n# #########################\n\n if G_gt_init.graph['crs'] != G_p_init.graph['crs']:\n print(\"Difference crs:\")\n print(\" G_gt_init.graph['crs']:\", G_gt_init.graph['crs'])\n print(\" G_p_init.graph['crs']:\", G_p_init.graph['crs'])\n\n # append to lists\n gt_list.append(G_gt_init)\n # gt_raw_list.append('')\n gp_list.append(G_p_init)\n root_list.append(outroot)\n im_loc_list.append(im_file)\n\n # use ground truth spacenet geojsons, and submission pkl files\n elif test_method == 'gt_json_prop_pkl':\n\n valid_road_types = set([]) # assume no road type in geojsons\n name_list = sorted(os.listdir(truth_dir))\n for i, f in enumerate(name_list):\n # skip non-geojson files\n if not f.endswith('.geojson'):\n continue\n\n if i >= max_files:\n break\n\n # define values\n if f.startswith('spacenetroads'):\n outroot = f.split('spacenetroads_')[-1].split('.')[0]\n prop_file = os.path.join(\n prop_dir, im_prefix + outroot + '.gpickle')\n im_file = os.path.join(im_dir, im_prefix + outroot + '.tif')\n elif f.startswith('osmroads'):\n outroot = f.split('osmroads_')[-1].split('.')[0]\n prop_file = os.path.join(\n prop_dir, im_prefix + outroot + '.gpickle')\n im_file = os.path.join(im_dir, im_prefix + outroot + '.tif')\n elif f.startswith('SN'):\n outroot = f.split('.')[0].replace('geojson_roads_speed_', '')\n prop_file = os.path.join(prop_dir, f.replace('geojson_roads_speed', 'PS-RGB')).replace('.geojson', '.gpickle')\n im_file = os.path.join(im_dir,\n f.split('.')[0].replace('geojson_roads_speed', 'PS-RGB') + '.tif')\n else:\n print(\"Naming convention for geojsons and pkls unknown\")\n return\n \n # if verbose:\n print(\"\\n\", i, \"outroot:\", outroot)\n gt_file = os.path.join(truth_dir, f)\n # im_file = os.path.join(im_dir, im_prefix + outroot + '.tif')\n #prop_file = os.path.join(\n # prop_dir, im_prefix + outroot + '.gpickle')\n if not os.path.exists(prop_file):\n print(\"prop file DNE, skipping:\", prop_file)\n continue\n\n #########\n # ground truth\n osmidx, osmNodeidx = 10000, 10000\n # G_gt_init, G_gt_cp, control_points, gt_graph_coords, midpoints_gt = \\\n G_gt_init, G_gt_raw = \\\n _create_gt_graph(gt_file, im_file, network_type='all_private',\n valid_road_types=valid_road_types,\n subgraph_filter_weight=gt_subgraph_filter_weight,\n min_subgraph_length=gt_min_subgraph_length,\n use_pix_coords=use_pix_coords,\n osmidx=osmidx, osmNodeidx=osmNodeidx,\n speed_key=speed_key,\n travel_time_key=travel_time_key,\n verbose=verbose)\n # skip empty ground truth graphs\n if len(G_gt_init.nodes()) == 0:\n continue\n if verbose:\n # print a node\n node = list(G_gt_init.nodes())[-1]\n print(node, \"gt random node props:\", G_gt_init.nodes[node])\n # print an edge\n edge_tmp = list(G_gt_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n try:\n props = G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0]\n except:\n props = G_gt_init.edges[edge_tmp[0], edge_tmp[1], \"0\"]\n print(\"gt random edge props for edge:\", edge_tmp, \" = \",\n props)\n\n # # # optional: save to pickle\n # outpickle = 'tmp.gpickle'\n # nx.write_gpickle(G_gt_init, outpickle)\n\n #########\n # proposal\n # print (\"load proposal...\")\n G_p_init = nx.read_gpickle(prop_file)\n # print a few values\n if verbose:\n # print a node\n try:\n node = list(G_p_init.nodes())[-1]\n print(node, \"prop random node props:\",\n G_p_init.nodes[node])\n # print an edge\n edge_tmp = list(G_p_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n print(\"prop random edge props for edge:\", edge_tmp,\n \" = \", G_p_init.edges[edge_tmp[0], edge_tmp[1], 0])\n # print (edge_tmp, \"random edge props:\", G_p_init.edges([edge_tmp[0], edge_tmp[1]])) #G.edge[edge_tmp[0]][edge_tmp[1]])\n except:\n print(\"Empty proposal graph\")\n\n # append to lists\n gt_list.append(G_gt_init)\n # gt_raw_list.append(G_gt_raw)\n gp_list.append(G_p_init)\n root_list.append(outroot)\n im_loc_list.append(im_file)\n\n ###################\n # ingest multiple ground truth and propoal geojsons in a folder\n if test_method == 'gt_json_prop_json':\n\n name_list = os.listdir(truth_dir)\n for f in name_list:\n # skip non-geojson files\n if not f.endswith('.geojson'):\n continue\n\n # define values\n outroot = f.split('.')[0]\n print(\"\\n\\noutroot:\", outroot)\n gt_file = os.path.join(truth_dir, f)\n prop_file = os.path.join(prop_dir, outroot + '.geojson')\n # Naming convention is inconsistent\n if not os.path.exists(prop_file):\n prop_file = os.path.join(prop_dir, outroot + 'prop.geojson')\n im_file = ''\n valid_road_types = set([]) # assume no road type in geojsons\n\n # ground truth\n osmidx, osmNodeidx = 0, 0\n G_gt_init, G_gt_raw = \\\n _create_gt_graph(gt_file, im_file, network_type='all_private',\n valid_road_types=valid_road_types,\n use_pix_coords=use_pix_coords,\n osmidx=osmidx,\n osmNodeidx=osmNodeidx,\n speed_key=speed_key,\n travel_time_key=travel_time_key,\n verbose=verbose)\n\n # skip empty ground truth graphs\n if len(G_gt_init.nodes()) == 0:\n continue\n\n # proposal\n osmidx, osmNodeidx = 500, 500\n G_p_init, G_p_raw = \\\n _create_gt_graph(prop_file, im_file,\n network_type='all_private',\n # linestring_delta=args.linestring_delta,\n # is_curved_eps=args.is_curved_eps,\n valid_road_types=valid_road_types,\n use_pix_coords=use_pix_coords,\n osmidx=osmidx,\n osmNodeidx=osmNodeidx,\n verbose=verbose)\n # append to lists\n gt_list.append(G_gt_init)\n # gt_raw_list.append(G_gt_raw)\n gp_list.append(G_p_init)\n root_list.append(outroot)\n \n ###################\n # use ground truth spacenet geojsons, and submission wkt files\n elif test_method == 'gt_json_prop_wkt':\n\n name_list = os.listdir(truth_dir)\n for i, f in enumerate(sorted(name_list)):\n # skip non-geojson files\n if not f.endswith('.geojson'):\n continue\n\n if i >= max_files:\n break\n\n # define values\n outroot = f.split('spacenetroads_')[-1].split('.')[0]\n if verbose:\n print(\"\\n\", i, \"outroot:\", outroot)\n gt_file = os.path.join(truth_dir, f)\n\n im_file = os.path.join(im_dir,\n f.split('.')[0].replace('geojson_roads_speed', 'PS-RGB') + '.tif')\n\n # im_file = os.path.join(im_dir, im_prefix + outroot + '.tif')\n\n # print(\"im_dir:\", im_dir)\n # print(\"im_prefix:\", im_prefix)\n # print(\"outroot:\", outroot)\n valid_road_types = set([]) # assume no road type in geojsons\n\n #########\n # ground truth\n osmidx, osmNodeidx = 0, 0\n G_gt_init, G_gt_raw = \\\n _create_gt_graph(gt_file, im_file, network_type='all_private',\n # linestring_delta=args.linestring_delta,\n # is_curved_eps=args.is_curved_eps,\n valid_road_types=valid_road_types,\n subgraph_filter_weight=gt_subgraph_filter_weight,\n min_subgraph_length=gt_min_subgraph_length,\n use_pix_coords=use_pix_coords,\n osmidx=osmidx,\n osmNodeidx=osmNodeidx,\n speed_key=speed_key,\n travel_time_key=travel_time_key,\n verbose=verbose)\n # skip empty ground truth graphs\n if len(G_gt_init.nodes()) == 0:\n continue\n if verbose:\n # print a node\n node = list(G_gt_init.nodes())[-1]\n print(node, \"random node props:\", G_gt_init.nodes[node])\n # print an edge\n edge_tmp = list(G_gt_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n print(\"random edge props for edge:\", edge_tmp, \" = \",\n G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0])\n \n #########\n print(\"Retrieve proposal graph...\")\n # proposal\n # adapted from wkt_to_G.main()\n # read in wkt list\n df_wkt = pd.read_csv(prop_wkt_file)\n # columns=['ImageId', 'WKT_Pix'])\n \n # original version\n # image_id = outroot\n \n # SN5 version\n AOI_root = 'AOI' + f.split('AOI')[-1]\n image_id = AOI_root.split('.')[0].replace('geojson_roads_speed_', '')\n print(\"image_id\", image_id)\n\n # filter\n df_filt = df_wkt['WKT_Pix'][df_wkt['ImageId'] == image_id]\n wkt_list = df_filt.values\n weight_list = df_wkt[wkt_weight_key][df_wkt['ImageId'] == image_id].values\n\n # print a few values\n if verbose:\n print (i, \"/\", len(name_list), \"num linestrings:\", len(wkt_list))\n print(\"image_file:\", im_file, \"wkt_list[:2]\", wkt_list[:2])\n print(\"weight_list[:2]\", weight_list[:2])\n\n if (len(wkt_list) == 0) or (wkt_list[0] == 'LINESTRING EMPTY'):\n continue\n\n t1 = time.time()\n node_iter, edge_iter = 1000, 1000\n G_p_init = wkt_to_G.wkt_to_G(\n wkt_list, weight_list=weight_list, im_file=im_file,\n prop_subgraph_filter_weight=prop_subgraph_filter_weight,\n min_subgraph_length=prop_min_subgraph_length,\n node_iter=node_iter,\n edge_iter=edge_iter,\n verbose=super_verbose)\n t2 = time.time()\n # add travel time\n if 'time' in wkt_weight_key:\n for i, (u, v, data) in enumerate(G_p_init.edges(data=True)):\n data[travel_time_key] = data['weight']\n # if we're not weighting based on the wkt file, use generic\n else:\n G_p_init = add_travel_time(G_p_init, speed_key=speed_key,\n travel_time_key=travel_time_key,\n default_speed=default_speed)\n if verbose:\n print(\"Time to create graph:\", t2-t1, \"seconds\")\n # print a node\n node = list(G_p_init.nodes())[-1]\n print(node, \"random node props:\", G_p_init.nodes[node])\n # print an edge\n edge_tmp = list(G_p_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n print(\"random edge props for edge:\", edge_tmp, \" = \",\n G_p_init.edges[edge_tmp[0], edge_tmp[1], 0])\n\n # append to lists\n gt_list.append(G_gt_init)\n # gt_raw_list.append(G_gt_raw)\n gp_list.append(G_p_init)\n root_list.append(outroot)\n im_loc_list.append(im_file)\n\n # use ground truth spacenet wkts, and submission wkt files\n elif test_method == 'gt_wkt_prop_wkt':\n\n im_list = os.listdir(im_dir)\n for i, f in enumerate(sorted(im_list)):\n # skip non-tif files\n if not f.endswith('.tif'):\n continue\n\n if i >= max_files:\n break\n\n im_file = os.path.join(im_dir, f)\n outroot = 'AOI' + f.split('.')[0].split('AOI')[-1].replace('PS-RGB_', '')\n \n # gt file\n df_wkt_gt = pd.read_csv(gt_wkt_file)\n image_id = outroot\n# # SN5 version\n# AOI_root = 'AOI' + f.split('AOI')[-1]\n# image_id = AOI_root.split('.')[0].replace('geojson_roads_speed_', '')\n# print(\"image_id\", image_id)\n\n # filter\n df_filt = df_wkt_gt['WKT_Pix'][df_wkt_gt['ImageId'] == image_id]\n wkt_list = df_filt.values\n weight_list = df_wkt_gt[wkt_weight_key][df_wkt_gt['ImageId'] == image_id].values\n\n # print a few values\n if verbose:\n print(\"\\n\", i, \"/\", len(im_list), \"num linestrings:\", len(wkt_list))\n print(\"image_file:\", im_file, \"wkt_list[:2]\", wkt_list[:2])\n print(\"weight_list[:3]\", weight_list[:3])\n print(\"image_id:\", image_id)\n\n\n if (len(wkt_list) == 0) or (wkt_list[0] == 'LINESTRING EMPTY'):\n continue\n\n t1 = time.time()\n node_iter, edge_iter = 10000, 10000\n G_gt_init0 = wkt_to_G.wkt_to_G(\n wkt_list, weight_list=weight_list, im_file=im_file,\n prop_subgraph_filter_weight=prop_subgraph_filter_weight,\n min_subgraph_length=prop_min_subgraph_length,\n node_iter=node_iter,\n edge_iter=edge_iter,\n verbose=super_verbose)\n # refine G_gt?\n G_gt_init = _refine_gt_graph(G_gt_init0, im_file, \n subgraph_filter_weight=gt_subgraph_filter_weight,\n min_subgraph_length=gt_min_subgraph_length,\n travel_time_key=travel_time_key,\n speed_key=speed_key,\n use_pix_coords=use_pix_coords,\n verbose=verbose,\n super_verbose=super_verbose)\n\n # add travel time\n if 'time' in wkt_weight_key:\n for i, (u, v, data) in enumerate(G_gt_init.edges(data=True)):\n data[travel_time_key] = data['weight']\n # if we're not weighting based on the wkt file, use generic\n else:\n G_gt_init = add_travel_time(G_gt_init, speed_key=speed_key,\n travel_time_key=travel_time_key,\n default_speed=default_speed)\n\n t2 = time.time()\n# # add travel time\n# G_gt_init = add_travel_time(G_gt_init0, speed_key=speed_key,\n# travel_time_key=travel_time_key,\n# default_speed=default_speed)\n\n # skip empty ground truth graphs\n if len(G_gt_init.nodes()) == 0:\n continue\n if verbose:\n # print a node\n node = list(G_gt_init.nodes())[-1]\n print(node, \"random node props:\", G_gt_init.nodes[node])\n # print an edge\n edge_tmp = list(G_gt_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n print(\"random edge props for edge:\", edge_tmp, \" = \",\n G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0])\n\n\n #########\n print(\"Retrieve proposal graph...\")\n # proposal\n # adapted from wkt_to_G.main()\n # read in wkt list\n df_wkt = pd.read_csv(prop_wkt_file)\n # columns=['ImageId', 'WKT_Pix'])\n\n # original version\n # image_id = outroot\n\n # # SN5 version\n # AOI_root = 'AOI' + f.split('AOI')[-1]\n # image_id = AOI_root.split('.')[0].replace('geojson_roads_speed_', '')\n # print(\"image_id\", image_id)\n\n # filter\n df_filt = df_wkt['WKT_Pix'][df_wkt['ImageId'] == image_id]\n wkt_list = df_filt.values\n weight_list = df_wkt[wkt_weight_key][df_wkt['ImageId'] == image_id].values\n\n # print a few values\n if verbose:\n print(i, \"/\", len(im_list), \"num linestrings:\", len(wkt_list))\n print(\"image_file:\", im_file, \"wkt_list[:2]\", wkt_list[:2])\n\n if (len(wkt_list) == 0) or (wkt_list[0] == 'LINESTRING EMPTY'):\n continue\n\n t1 = time.time()\n node_iter, edge_iter = 1000, 1000\n G_p_init = wkt_to_G.wkt_to_G(\n wkt_list, weight_list=weight_list, im_file=im_file,\n prop_subgraph_filter_weight=prop_subgraph_filter_weight,\n min_subgraph_length=prop_min_subgraph_length,\n node_iter=node_iter,\n edge_iter=edge_iter,\n verbose=super_verbose)\n\n t2 = time.time()\n # add travel time\n if 'time' in wkt_weight_key:\n for i, (u, v, data) in enumerate(G_p_init.edges(data=True)):\n data[travel_time_key] = data['weight']\n # if we're not weighting based on the wkt file, use generic\n else:\n G_p_init = add_travel_time(G_p_init, speed_key=speed_key,\n travel_time_key=travel_time_key,\n default_speed=default_speed)\n if verbose:\n print(\"Time to create graph:\", t2-t1, \"seconds\")\n # print a node\n node = list(G_p_init.nodes())[-1]\n print(node, \"random node props:\", G_p_init.nodes[node])\n # print an edge\n edge_tmp = list(G_p_init.edges())[-1]\n # G.edge[edge_tmp[0]][edge_tmp[1]])\n print(\"random edge props for edge:\", edge_tmp, \" = \",\n G_p_init.edges[edge_tmp[0], edge_tmp[1], 0])\n\n # append to lists\n gt_list.append(G_gt_init)\n # gt_raw_list.append(G_gt_raw)\n gp_list.append(G_p_init)\n root_list.append(outroot)\n im_loc_list.append(im_file)\n\n return gt_list, gp_list, root_list, im_loc_list\n\n\n###############################################################################\ndef execute(output_name, gt_list, gp_list, root_list, im_loc_list=[],\n weight='length',\n speed_key='inferred_speed_mps',\n travel_time_key='travel_time_s',\n test_method='gt_json_prop_json',\n max_files=1000,\n linestring_delta=50,\n is_curved_eps=10**3,\n max_snap_dist=4,\n max_nodes=500,\n n_plots=10,\n min_path_length=10,\n topo_hole_size=4,\n topo_subgraph_radius=150,\n topo_interval=30,\n sp_length_buffer=0.05,\n use_pix_coords=False,\n allow_renaming=True,\n verbose=True,\n super_verbose=False):\n \"\"\"\n Compute APLS for the input data in gt_list, gp_list\n\n Arguments\n ---------\n output_name : str\n Output name in apls/outputs\n weight : str\n Edge key determining path length weights. Defaults to ``'length'``.\n speed_key : str\n Edge key for speed. Defaults to ``'inferred_speed_mps'``.\n travel_time_key : str\n Edge key for travel time. Defaults to ``'travel_time_s'``.\n max_files : int\n Maximum number of files to analyze. Defaults to ``1000``.\n linestring_delta : float\n Distance in meters between linestring midpoints. Defaults to ``50``.\n is_curved_eps : float\n Minumum curvature for injecting nodes (if curvature is less than this\n value, no midpoints will be injected). If < 0, always inject points\n on line, regardless of curvature. Defaults to ``0.3``.\n max_snap_dist : float\n Maximum distance a node can be snapped onto a graph.\n Defaults to ``4``.\n max_nodes : int\n Maximum number of gt nodes to inject midpoints. If there are more\n gt nodes than this, skip midpoints and use this number of points\n to comput APLS.\n n_plots : int\n Number of graphs to create plots for. Defaults to ``10``.\n min_path_length : float\n Mimumum path length to consider for APLS. Defaults to ``10``.\n topo_hole_size : float\n Hole size for TOPO in meters. Defaults to ``4``.\n topo_subgraph_radius : float\n Radius to search for TOPO, in meters. Defaults to ``150.``\n topo_interval : float\n Spacing of points in meters to inject for TOPO. Defaults to ``30``.\n sp_length_buffer : float\n Fractional difference in lengths for SP metric. Defaults to ``0.05``.\n use_pix_coords : boolean\n Switch to use pixel coords for colculating lengths.\n Defaults to ``False``.\n allow_renaming : boolean\n Switch to rename nodes when injecting nodes into graphs.\n Defaulst to ``True``.\n verbose : boolean\n Switch to print relevant values to screen. Defaults to ``False``.\n super_verbose : boolean\n Switch to print mucho values to screen. Defaults to ``False``.\n\n Returns\n -------\n None\n \"\"\"\n\n # now compute results\n print(\"\\n\\n\\nCompute Results...\")\n C_arr = [[\"outroot\", \"APLS\", \"APLS_gt_onto_prop\", \"APLS_prop_onto_gt\",\n \"topo_tp_tot\", \"topo_fp_tot\", \"topo_fn_tot\", \"topo_precision\",\n \"topo_recall\", \"topo_f1\",\n \"sp_metric\", \"tot_meters_gt\", \"tot_meters_p\"]]\n\n # plot settings\n title_fontsize = 4\n dpi = 200\n show_plots = False\n show_node_ids = True\n fig_height, fig_width = 6, 6\n # path settings\n route_linewidth = 4\n source_color = 'red'\n target_color = 'green'\n # if using create_gt_graph, use the following road types\n # (empty set uses all road types)\n # https://wiki.openstreetmap.org/wiki/Key:highway\n valid_road_types = set([])\n # valid_road_types = set(['motorway', 'trunk', 'primary', 'secondary',\n # 'tertiary',\n # 'motorway_link', 'trunk_link', 'primary_link',\n # 'secondary_link', 'tertiary_link',\n # 'unclassified', 'residential', 'service'])\n ###################\n\n ##################\n # make dirs\n outdir_base = os.path.join(path_apls, 'outputs')\n print (\"Outdir base:\", outdir_base)\n outdir_base2 = os.path.join(outdir_base, str(output_name),\n 'weight=' + str(weight),\n test_method)\n print (\"Outdir with weight:\", outdir_base2)\n d_list = [outdir_base, outdir_base2]\n for p in d_list:\n if not os.path.exists(p):\n os.makedirs(p)\n\n ##################\n t0 = time.time()\n for i, [outroot, G_gt_init, G_p_init] in enumerate(zip(root_list, gt_list, gp_list)):\n\n if i >= max_files:\n break\n\n # copy image file to output dir, if desired\n if len(im_loc_list) > 0:\n # print (\"im_loc_list:\", im_loc_list)\n im_loc = im_loc_list[i]\n else:\n im_loc = ''\n\n print(\"\\n\\n\\n\", i+1, \"/\", len(root_list), \"Computing:\", outroot)\n t1 = time.time()\n\n # print a few properties\n print(\"len(G_gt_init.nodes():)\", len(G_gt_init.nodes()))\n print(\"len(G_gt_init.edges():)\", len(G_gt_init.edges()))\n print(\"len(G_p_init.nodes():)\", len(G_p_init.nodes()))\n print(\"len(G_p_init.edges():)\", len(G_p_init.edges()))\n\n# ##################\n# # make dirs\n# outdir_base = os.path.join(path_apls, 'outputs')\n# outdir_base2 = os.path.join(\n# outdir_base, output_name, 'weight=' + weight)\n# outdir = os.path.join(outdir_base2, outroot)\n# print(\"output dir:\", outdir)\n# os.makedirs(outdir, exist_ok=True)\n# # d_list = [outdir_base, outdir_base2, outdir]\n# # for p in d_list:\n# # #if not os.path.exists(p) and make_plots:\n# # if not os.path.exists(p):\n# # os.makedirs(p)\n# ##################\n\n # get graphs with midpoints and geometry (if small graph)\n print(\"\\nMake gt, prop graphs...\")\n if len(G_gt_init.nodes()) < 500: # 2000:\n G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime \\\n = make_graphs(G_gt_init, G_p_init,\n weight=weight,\n speed_key=speed_key,\n travel_time_key=travel_time_key,\n linestring_delta=linestring_delta,\n is_curved_eps=is_curved_eps,\n max_snap_dist=max_snap_dist,\n allow_renaming=allow_renaming,\n verbose=verbose)\n\n # get large graphs and paths\n else:\n G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \\\n control_points_gt, control_points_prop, \\\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \\\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime \\\n = make_graphs_yuge(G_gt_init, G_p_init,\n weight=weight,\n speed_key=speed_key,\n travel_time_key=travel_time_key,\n max_nodes=max_nodes,\n max_snap_dist=max_snap_dist,\n allow_renaming=allow_renaming,\n verbose=verbose,\n super_verbose=super_verbose)\n\n if verbose:\n print(\"\\nlen control_points_gt:\", len(control_points_gt))\n # print \"control_points_gt:\", control_points_gt\n if len(G_gt_init.nodes()) < 200:\n print(\"G_gt_init.nodes():\", G_gt_init.nodes())\n print(\"len G_gt_init.edges():\", len(G_gt_init.edges()))\n # for e in G_gt_init.edges():\n # print \" G_gt_init edge:\", e, G_gt_init.edges[e[0]][e[1]][0]['length']\n if len(G_gt_cp.nodes()) < 200:\n print(\"G_gt_cp.nodes():\", G_gt_cp.nodes())\n print(\"len G_gt_cp.nodes():\", len(G_gt_cp.nodes()))\n print(\"len G_gt_cp.edges():\", len(G_gt_cp.edges()))\n # for e in G_gt_cp.edges():\n # print \" G_gt_cp edge:\", e, G_gt_cp.edges[e[0]][e[1]][0]['length']\n print(\"len G_gt_cp_prime.nodes():\", len(G_gt_cp_prime.nodes()))\n print(\"len G_gt_cp_prime.edges():\", len(G_gt_cp_prime.edges()))\n\n print(\"\\nlen control_points_prop:\", len(control_points_prop))\n # print \"control_points_prop:\", control_points_prop\n if len(G_p_init.nodes()) < 200:\n print(\"G_p_init.nodes():\", G_p_init.nodes())\n print(\"len G_p_init.edges():\", len(G_p_init.edges()))\n if len(G_p_cp.nodes()) < 200:\n print(\"G_p_cp.nodes():\", G_p_cp.nodes())\n print(\"len G_p_cp.nodes():\", len(G_p_cp.nodes()))\n print(\"len G_p_cp.edges():\", len(G_p_cp.edges()))\n\n print(\"len G_p_cp_prime.nodes():\", len(G_p_cp_prime.nodes()))\n if len(G_p_cp_prime.nodes()) < 200:\n print(\"G_p_cp_prime.nodes():\", G_p_cp_prime.nodes())\n print(\"len G_p_cp_prime.edges():\", len(G_p_cp_prime.edges()))\n # print(\"G_p_cp_prime.edges():\", G_p_cp_prime.edges())\n\n print(\"len all_pairs_lengths_gt_native:\",\n len(dict(all_pairs_lengths_gt_native)))\n # for ktmp,vtmp in all_pairs_lengths_gt_native.iteritems():\n # print (\" key:\", ktmp, \"len(all_pairs_lengths_gt_native[key]):\", len(vtmp))\n print(\"len all_pairs_lengths_gt_prime:\",\n len(dict(all_pairs_lengths_gt_prime)))\n # for ktmp,vtmp in all_pairs_lengths_gt_prime.iteritems():\n # print (\" key:\", ktmp, \"len(all_pairs_lengths_gt_prime[key]):\", len(vtmp))\n print(\"len all_pairs_lengths_prop_native\",\n len(dict(all_pairs_lengths_prop_native)))\n # for ktmp,vtmp in all_pairs_lengths_prop_native.iteritems():\n # print (\" key:\", ktmp, \"len(all_pairs_lengths_prop_native[key]):\", len(vtmp))\n print(\"len all_pairs_lengths_prop_prime\",\n len(dict(all_pairs_lengths_prop_prime)))\n # for ktmp,vtmp in all_pairs_lengths_prop_prime.iteritems():\n # print (\" key:\", ktmp, \"len(all_pairs_lengths_prop_prime[key]):\", len(vtmp))\n\n #########################\n # Metric\n if i < n_plots:\n res_dir = outdir\n else:\n res_dir = ''\n C, C_gt_onto_prop, C_prop_onto_gt = compute_apls_metric(\n all_pairs_lengths_gt_native, all_pairs_lengths_prop_native,\n all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime,\n control_points_gt, control_points_prop,\n min_path_length=min_path_length,\n verbose=verbose, res_dir=res_dir)\n print(\"APLS Metric = \", C)\n\n print(\"\\nComputing TOPO Metric...\")\n n_measurement_nodes = max_nodes\n topo_vals = topo_metric.compute_topo(\n G_gt_init, G_p_init,\n subgraph_radius=topo_subgraph_radius,\n interval=topo_interval,\n hole_size=topo_hole_size,\n n_measurement_nodes=n_measurement_nodes,\n x_coord='x', y_coord='y',\n allow_multi_hole=False,\n make_plots=False, verbose=False)\n topo_tp_tot, topo_fp_tot, topo_fn_tot, topo_precision, topo_recall, topo_f1 = topo_vals\n print(\"TOPO Metric subgraph_radius, interval:\",\n topo_subgraph_radius, topo_interval)\n print(\"TOPO Metric =\", topo_vals, \"for\", n_measurement_nodes,\n \"nodes, subgraph_radius =\", topo_subgraph_radius)\n\n print(\"\\nComputing sp Metric...\")\n sp_n_routes = max_nodes\n _, sp = sp_metric.compute_sp(\n G_gt_init, G_p_init,\n x_coord='x', y_coord='y',\n weight=weight, query_radius=max_snap_dist,\n length_buffer=sp_length_buffer, n_routes=sp_n_routes,\n verbose=False, make_plots=False)\n print(\"sp_length_buffer:\", sp_length_buffer)\n print(\"sp Metric =\", sp, \"for\", sp_n_routes,\n \"routes, length buffer =\", sp_length_buffer)\n\n # get total length of edges\n # ground truth\n tot_meters_gt = 0\n for itmp, (u, v, attr_dict) in enumerate(G_gt_init.edges(data=True)):\n tot_meters_gt += attr_dict['length']\n print(\"Ground truth total length of edges (km):\", tot_meters_gt/1000)\n G_gt_init.graph['Tot_edge_km'] = tot_meters_gt/1000\n tot_meters_p = 0\n for itmp, (u, v, attr_dict) in enumerate(G_p_init.edges(data=True)):\n tot_meters_p += attr_dict['length']\n print(\"Proposal total length of edges (km):\", tot_meters_p/1000)\n G_p_init.graph['Tot_edge_km'] = tot_meters_p/1000\n\n # save scores\n f = open(os.path.join(outdir_base2, str(output_name) + '_'\n + 'weight=' + str(weight) + '_'\n + test_method\n + 'output__max_snap='\n + str(np.round(max_snap_dist, 2)) + 'm'\n + '_hole='\n + str(np.round(topo_hole_size, 2)) + 'm'\n + '.txt'), 'w')\n f.write(\"Ground Truth Nodes Snapped Onto Proposal Score: \" +\n str(C_gt_onto_prop) + \"\\n\")\n f.write(\"Proposal Nodes Snapped Onto Ground Truth Score: \" +\n str(C_prop_onto_gt) + \"\\n\")\n f.write(\"Total APLS Score: \" + str(C) + \"\\n\")\n f.write(\"TOPO vals - topo_tp_tot, topo_fp_tot, topo_fn_tot, topo_precision, topo_recall, topo_f1: \" + str(topo_vals) + \"\\n\")\n f.write(\"SP: \" + str(sp))\n f.close()\n\n t2 = time.time()\n print(\"Total time to create graphs and compute metric:\",\n t2-t1, \"seconds\")\n C_arr.append([outroot, C, C_gt_onto_prop, C_prop_onto_gt,\n # 0, 0, 0, 0, 0, 0,\n topo_tp_tot, topo_fp_tot, topo_fn_tot, topo_precision, topo_recall, topo_f1,\n # 0,\n sp,\n tot_meters_gt, tot_meters_p])\n\n ##################\n # PLOTS\n if i < n_plots:\n\n # # plot init0\n # if test_method == 'osmnx':\n # fig, ax = osmnx_funcs.plot_graph(G0, show=show_plots, close=False,\n # fig_height=fig_height,\n # fig_width=fig_width)\n # ax.set_title('Raw Ground Truth Graph', fontsize=title_fontsize)\n # plt.savefig(os.path.join(outdir, 'gt_graph_raw.png'), dpi=dpi)\n # plt.close('all')\n\n # skip plots if no nodes\n if (len(G_gt_cp.nodes()) == 0) or (len(G_p_cp.nodes()) == 0):\n continue\n\n # set graph size\n max_extent = max(fig_height, fig_width)\n xmin, xmax, ymin, ymax, dx, dy = apls_utils._get_graph_extent(\n G_gt_cp)\n if dx <= dy:\n fig_height = max_extent\n fig_width = max(1, 1. * max_extent * dx / dy)\n else:\n fig_width = max_extent\n fig_height = max(1, 1. * max_extent * dy / dx)\n if verbose:\n print(\"fig_width, fig_height:\", fig_width, fig_height)\n\n # plot ground truth\n fig, ax = osmnx_funcs.plot_graph(\n G_gt_init, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax = apls_plots.plot_node_ids(\n G_gt_init, ax, fontsize=4) # node ids\n ax.set_title('Ground Truth Graph', fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(outdir, 'gt_graph.png'), dpi=dpi)\n # plt.clf()\n # plt.cla()\n plt.close('all')\n\n # gt midpoints\n fig0, ax0 = osmnx_funcs.plot_graph(\n G_gt_cp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax0 = apls_plots.plot_node_ids(\n G_gt_cp, ax0, fontsize=4) # node ids\n ax0.set_title('Ground Truth With Midpionts',\n fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'gt_graph_midpoints.png'), dpi=dpi)\n plt.close('all')\n\n # plot ground truth nodes from prop\n fig, ax = osmnx_funcs.plot_graph(\n G_gt_cp_prime, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax = apls_plots.plot_node_ids(\n G_gt_cp_prime, ax, fontsize=4) # node ids\n ax.set_title(\n 'Ground Truth Graph with Proposal Control Nodes',\n fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'gt_graph_prop_control_points.png'), dpi=dpi)\n # plt.clf()\n # plt.cla()\n plt.close('all')\n\n # remove geometry to test whether we correctly added midpoints and edges\n Gtmp = G_gt_cp.copy() # G_gt_cp_prime.copy()\n for itmp, (u, v, key, data) in enumerate(Gtmp.edges(keys=True, data=True)):\n try:\n #line = data['geometry']\n data.pop('geometry', None)\n except:\n data[0].pop('geometry', None)\n fig, ax = osmnx_funcs.plot_graph(Gtmp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n ax.set_title(\n 'Ground Truth Graph (cp) without any geometry', size='x-small')\n # plt.tight_layout()\n plt.savefig(os.path.join(outdir, 'gt_without_geom.png'), dpi=dpi)\n plt.close('all')\n\n # plot proposal\n fig, ax = osmnx_funcs.plot_graph(\n G_p_init, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax = apls_plots.plot_node_ids(\n G_p_init, ax, fontsize=4) # node ids\n ax.set_title('Proposal Graph', fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(outdir, 'prop_graph.png'), dpi=dpi)\n plt.close('all')\n\n # proposal midpoints\n fig0, ax0 = osmnx_funcs.plot_graph(\n G_p_cp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax = apls_plots.plot_node_ids(\n G_p_cp, ax0, fontsize=4) # node ids\n ax0.set_title('Proposal With Midpionts', fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'prop_graph_midpoints.png'), dpi=dpi)\n plt.close('all')\n\n # proposal midpoints\n fig0, ax0 = osmnx_funcs.plot_graph(\n G_p_cp_prime, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n if show_node_ids:\n ax = apls_plots.plot_node_ids(\n G_p_cp_prime, ax0, fontsize=4) # node ids\n ax0.set_title('Proposal With Midpionts from GT',\n fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'prop_graph_midpoints_gt_control_points.png'), dpi=dpi)\n plt.close('all')\n\n # plot ground truth buffer and proposal graph\n # make sure geometry is in G_p_init\n G_tmp = G_p_init.copy()\n # for i,(u,v,attr_dict) in enumerate(G_tmp.edges(data=True)):\n # if 'geometry_wkt' in attr_dict.keys():\n # attr_dict['geometry'] = attr_dict['geometry_wkt']\n # print \"attr_dict:\", attr_dict\n fig, ax3 = osmnx_funcs.plot_graph(\n G_tmp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n try:\n apls_plots._plot_buff(G_gt_init, ax3, buff=max_snap_dist,\n color='yellow', alpha=0.3,\n title='',\n title_fontsize=title_fontsize, outfile='',\n verbose=False)\n except:\n print(\"Cannot make buffer plot...\")\n ax3.set_title('Propoal Graph with Ground Truth Buffer',\n fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'prop_graph_plus_gt_buff.png'), dpi=dpi)\n # plt.clf()\n # plt.cla()\n plt.close('all')\n\n # plot proposal buffer and ground truth graph\n fig, ax4 = osmnx_funcs.plot_graph(\n G_gt_init, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n try:\n apls_plots._plot_buff(G_p_init, ax4, buff=max_snap_dist,\n color='yellow', alpha=0.3,\n title='',\n title_fontsize=title_fontsize, outfile='',\n verbose=False)\n except:\n print(\"Cannot make buffer plot...\")\n ax4.set_title('Ground Graph with Proposal Buffer',\n fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'gt_graph_plus_prop_buff.png'), dpi=dpi)\n # plt.clf()\n # plt.cla()\n plt.close('all')\n\n # remove geometry to test whether we correctly added midpoints and edges\n Gtmp = G_p_cp.copy() # G_gt_cp_prime.copy()\n for itmp, (u, v, key, data) in enumerate(Gtmp.edges(keys=True, data=True)):\n try:\n #line = data['geometry']\n data.pop('geometry', None)\n except:\n data[0].pop('geometry', None)\n fig, ax = osmnx_funcs.plot_graph(\n Gtmp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n ax.set_title(\n 'Proposal Graph (cp) without any geometry', size='x-small')\n # plt.tight_layout()\n plt.savefig(os.path.join(\n outdir, 'prop_cp_without_geom.png'), dpi=dpi)\n plt.close('all')\n\n # remove geometry to test whether we correctly added midpoints and edges\n Gtmp = G_p_init.copy() # G_gt_cp_prime.copy()\n for itmp, (u, v, key, data) in enumerate(Gtmp.edges(keys=True, data=True)):\n try:\n #line = data['geometry']\n data.pop('geometry', None)\n except:\n data[0].pop('geometry', None)\n fig, ax = osmnx_funcs.plot_graph(\n Gtmp, show=show_plots, close=False,\n fig_height=fig_height, fig_width=fig_width)\n ax.set_title('Proposal Graph without any geometry', size='x-small')\n # plt.tight_layout()\n plt.savefig(os.path.join(outdir, 'prop_without_geom.png'), dpi=dpi)\n plt.close('all')\n\n ###################################\n # plot some paths...\n # get source and target nodes\n\n # use idxs?\n #source_idx = np.random.randint(0,len(G_gt_cp.nodes()))\n #target_idx = np.random.randint(0,len(G_gt_cp.nodes()))\n # specify source and target node, if desired\n # if len(gt_file) == 0:\n ## source_idx = 27\n ## target_idx = 36\n # print \"source_idx:\", source_idx\n # print \"target_idx:\", target_idx\n #source = G_gt_cp.nodes()[source_idx]\n #target = G_gt_cp.nodes()[target_idx]\n\n # get a random source and target that are in both ground truth and prop\n if len(G_gt_cp.nodes()) < 200:\n print(\"G_gt_cp.nodes():\", G_gt_cp.nodes())\n if len(G_gt_cp_prime.nodes()) < 200:\n print(\"G_p_cp_prime.nodes():\", G_gt_cp_prime.nodes())\n possible_sources = set(G_gt_cp.nodes()).intersection(\n set(G_p_cp_prime.nodes()))\n if len(possible_sources) == 0:\n continue\n source = random.choice(list(possible_sources))\n possible_targets = set(G_gt_cp.nodes()).intersection(\n set(G_p_cp_prime.nodes())) - set([source])\n if len(possible_targets) == 0:\n continue\n target = random.choice(list(possible_targets))\n print(\"source, target:\", source, target)\n\n # compute paths to node of interest, and plot\n t0 = time.time()\n lengths, paths = nx.single_source_dijkstra(\n G_gt_cp, source=source, weight=weight)\n print(\"Time to calculate:\", len(lengths),\n \"paths:\", time.time() - t0, \"seconds\")\n\n # plot a single route\n try:\n fig, ax = osmnx_funcs.plot_graph_route(G_gt_cp, paths[target],\n route_color='yellow',\n route_alpha=0.8,\n orig_dest_node_alpha=0.3,\n orig_dest_node_size=120,\n route_linewidth=route_linewidth,\n orig_dest_node_color=target_color,\n show=show_plots,\n close=False,\n fig_height=fig_height,\n fig_width=fig_width)\n plen = np.round(lengths[target], 2)\n except:\n print(\"Proposal route not possible\")\n fig, ax = plt.subplots()\n plen = -1\n title = \"Ground Truth Graph, L = \" + str(plen)\n source_x = G_gt_cp.nodes[source]['x']\n source_y = G_gt_cp.nodes[source]['y']\n ax.scatter(source_x, source_y, color=source_color, s=75)\n t_x = G_gt_cp.nodes[target]['x']\n t_y = G_gt_cp.nodes[target]['y']\n ax.scatter(t_x, t_y, color=target_color, s=75)\n ax.set_title(title, fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'single_source_route_ground_truth.png'), dpi=dpi)\n plt.close('all')\n\n # get all paths from source for proposal graph\n lengths_prop, paths_prop = nx.single_source_dijkstra(\n G_p_cp_prime, source=source, weight=weight)\n gt_set = set(lengths.keys())\n prop_set = set(lengths_prop.keys())\n missing_nodes = gt_set - prop_set\n print(\"Proposal route missing nodes:\", missing_nodes)\n\n ##############\n # compute path to node of interest\n t0 = time.time()\n lengths_ptmp, paths_ptmp = nx.single_source_dijkstra(\n G_p_cp_prime, source=source, weight=weight)\n print(\"Time to calculate:\", len(lengths),\n \"paths:\", time.time() - t0, \"seconds\")\n\n # plot a single route\n try:\n fig, ax = osmnx_funcs.plot_graph_route(\n G_p_cp_prime, paths_ptmp[target],\n route_color='yellow',\n route_alpha=0.8,\n orig_dest_node_alpha=0.3,\n orig_dest_node_size=120,\n route_linewidth=route_linewidth,\n orig_dest_node_color=target_color,\n show=show_plots,\n close=False,\n fig_height=fig_height,\n fig_width=fig_width)\n #title= \"Source-\" + source_color + \" \" + str(source) + \" Target: \" + str(target)\n plen = np.round(lengths_ptmp[target], 2)\n except:\n print(\"Proposal route not possible\")\n fig, ax = plt.subplots()\n plen = -1\n title = \"Proposal Graph, L = \" + str(plen)\n source_x = G_p_cp_prime.nodes[source]['x']\n source_y = G_p_cp_prime.nodes[source]['y']\n ax.scatter(source_x, source_y, color=source_color, s=75)\n t_x = G_p_cp_prime.nodes[target]['x']\n t_y = G_p_cp_prime.nodes[target]['y']\n ax.scatter(t_x, t_y, color=target_color, s=75)\n ax.set_title(title, fontsize=title_fontsize)\n # plt.show()\n plt.savefig(os.path.join(\n outdir, 'single_source_route_prop.png'), dpi=dpi)\n plt.close('all')\n\n # overlay plot on image\n if len(im_loc_list) > 0:\n # print (\"im_loc_list:\", im_loc_list)\n image_path = im_loc_list[i]\n if os.path.exists(image_path):\n\n # copy image file to output dir, if desired\n # shutil.copy(image_path, outdir)\n\n # plot graphs overlaid on image\n # width_key, width_mult = 'speed_mph', 0.3\n gt_color, prop_color = 'cyan', 'lime'\n image_name = outroot\n figname = os.path.join(outdir, 'overlaid.png')\n _ = apls_plots._plot_gt_prop_graphs(\n G_gt_init, G_p_init, image_path,\n figsize=(16, 8), show_endnodes=True,\n width_key=2, # width_key,\n width_mult=1,\n gt_color=gt_color, prop_color=prop_color,\n default_node_size=20,\n title=image_name, adjust=False,\n figname=figname, verbose=super_verbose)\n\n #############\n t2 = time.time()\n print(\"Total time to create graphs, compute metric, and plot:\",\n t2-t0, \"seconds\")\n\n # print and save total cost\n print((\"C_arr:\", C_arr))\n tf = time.time()\n print((\"Time to compute metric:\", tf - t0, \"seconds\"))\n print((\"N input images:\", len(root_list)))\n #df = pd.DataFrame(C_arr[1:], columns=C_arr[0])\n #df.to_csv(os.path.join(outdir_base2, 'scores.csv'))\n #print((\"len df:\", len(df)))\n\n # Compute mean of C\n #print (\"np.array(C_arr):\", np.array(C_arr))\n means = np.mean(np.array(C_arr)[1:, 1:].astype(float), axis=0)\n C_arr.append(['means'] + list(means))\n stds = np.std(np.array(C_arr)[1:, 1:].astype(float), axis=0)\n C_arr.append(['stds'] + list(stds))\n\n # save to csv\n path_csv = os.path.join(outdir_base2,\n 'scores__max_snap='\n + str(np.round(max_snap_dist, 2)) + 'm'\n + '_hole='\n + str(np.round(topo_hole_size, 2)) + 'm'\n + '.csv')\n print(\"Save to csv:\", path_csv)\n df = pd.DataFrame(C_arr[1:], columns=C_arr[0])\n print(\"len df:\", len(df))\n df.to_csv(path_csv)\n\n print((\"Tot APLS = np.mean(APLS_arr:\", np.mean(df['APLS'].values)))\n\n return\n\n\n###############################################################################\ndef main():\n '''Explore'''\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_name', default='apls_test0', type=str,\n help='Output folder name in apls/outputs')\n parser.add_argument('--test_method', default='gt_pkl_prop_pkl', type=str,\n help=\"Method for creating ground truth and proposal \"\n + \"graphs. Options:\"\n + \" gt_pkl_prop_pkl = ground truth pickle, proposal pickle\"\n + \" gt_json_prop_pkl = ground truth json, proposal pickle\"\n + \"gt_json_prop_json = ground truth json, proposal json\"\n + \"gt_json_prop_wkt = ground truth json, proposal csv in wkt format\"\n + \"gt_wkt_prop_wkt = ground truth wkt, proposal wkt\"\n )\n parser.add_argument('--truth_dir', default='', type=str,\n help='Location of ground truth graphs')\n parser.add_argument('--prop_dir', default='', type=str,\n help='Location of proposal graphs')\n parser.add_argument('--im_dir', default='', type=str,\n help='Location of images (optional)')\n parser.add_argument('--im_prefix', default='RGB-PanSharpen_', type=str,\n help='Prefix of image files')\n parser.add_argument('--gt_wkt_file', default='', type=str,\n help='Location of ground truth wkt file')\n parser.add_argument('--prop_wkt_file', default='', type=str,\n help='Location of prediction wkt file')\n parser.add_argument('--max_snap_dist', default=4, type=int,\n help='Buffer distance (meters) around graph')\n parser.add_argument('--topo_hole_size', default=4, type=float,\n help='hole size in meters for TOPO metric')\n parser.add_argument('--topo_subgraph_radius', default=150, type=float,\n help='Subgraph radius for TOPO')\n parser.add_argument('--topo_interval', default=30, type=float,\n help='Hole spacing for TOPO')\n parser.add_argument('--sp_length_buffer', default=0.05, type=float,\n help='Fractional length differnence for SP metric')\n parser.add_argument('--linestring_delta', default=50, type=int,\n help='Distance between midpoints on edges')\n parser.add_argument('--is_curved_eps', default=-1, type=float,\n help='Line curvature above which midpoints will be'\n ' injected, (< 0 to inject midpoints on straight'\n ' lines). 0.12 is a good value if not all lines are '\n ' to be used')\n parser.add_argument('--min_path_length', default=0.001, type=float,\n help='Minimum path length to consider for metric')\n parser.add_argument('--max_nodes', default=1000, type=int,\n help='Maximum number of nodes to compare for APLS'\n ' metric')\n parser.add_argument('--max_files', default=100, type=int,\n help='Maximum number of graphs to analyze')\n parser.add_argument('--weight', default='length', type=str,\n help='Weight for APLS metric [length, travel_time_s')\n parser.add_argument('--speed_key', default='inferred_speed_mps', type=str,\n help='Key in edge properties for speed')\n parser.add_argument('--travel_time_key', default='travel_time_s', type=str,\n help='Key in edge properties for travel_time')\n parser.add_argument('--wkt_weight_key', default='travel_time_s', type=str,\n help='Key in wkt files for edge weights')\n parser.add_argument('--default_speed', default=13.41, type=float,\n help='Default speed of edge in m/s'\n ' (13.41 m/s = 30 mph)')\n parser.add_argument('--n_plots', default=12, type=int,\n help='Number of plots to make when computing APLS')\n parser.add_argument('--use_pix_coords', default=1, type=int,\n help='Switch to use pix coords, defaults to 1 (True)')\n parser.add_argument('--allow_renaming', default=1, type=int,\n help='Switch to rename nodes. Defaults to 1 (True)')\n\n args = parser.parse_args()\n\n # Filtering parameters (shouldn't need changed)\n args.gt_subgraph_filter_weight = 'length'\n args.gt_min_subgraph_length = 5\n args.prop_subgraph_filter_weight = 'length_pix'\n args.prop_min_subgraph_length = 10 # GSD = 0.3\n\n # general settings\n verbose = True\n super_verbose = False\n\n # Gather files\n gt_list, gp_list, root_list, im_loc_list = gather_files(\n args.test_method,\n args.truth_dir,\n args.prop_dir,\n im_dir=args.im_dir,\n im_prefix=args.im_prefix,\n gt_wkt_file=args.gt_wkt_file,\n prop_wkt_file=args.prop_wkt_file,\n max_files=args.max_files,\n gt_subgraph_filter_weight=args.gt_subgraph_filter_weight,\n gt_min_subgraph_length=args.gt_min_subgraph_length,\n prop_subgraph_filter_weight=args.prop_subgraph_filter_weight,\n prop_min_subgraph_length=args.prop_min_subgraph_length,\n use_pix_coords=bool(args.use_pix_coords),\n speed_key=args.speed_key,\n travel_time_key=args.travel_time_key,\n wkt_weight_key=args.wkt_weight_key,\n default_speed=args.default_speed,\n verbose=verbose,\n super_verbose=super_verbose)\n\n # Compute\n execute(\n args.output_name, gt_list, gp_list, root_list,\n im_loc_list=im_loc_list,\n test_method=args.test_method,\n weight=args.weight,\n speed_key=args.speed_key,\n travel_time_key=args.travel_time_key,\n max_files=args.max_files,\n linestring_delta=args.linestring_delta,\n is_curved_eps=args.is_curved_eps,\n max_snap_dist=args.max_snap_dist,\n max_nodes=args.max_nodes,\n n_plots=args.n_plots,\n min_path_length=args.min_path_length,\n topo_hole_size=args.topo_hole_size,\n topo_subgraph_radius=args.topo_subgraph_radius,\n topo_interval=args.topo_interval,\n sp_length_buffer=args.sp_length_buffer,\n use_pix_coords=bool(args.use_pix_coords),\n allow_renaming=bool(args.allow_renaming),\n verbose=verbose,\n super_verbose=super_verbose)\n\n\n###############################################################################\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv",
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.isnan",
"numpy.arange",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.round",
"numpy.max",
"numpy.argmin",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.sum"
]
] |
sbhaktha/ruletaker-1
|
[
"61ab221c3bd409d4aa46def30cb788042cc56969"
] |
[
"theory_generator.py"
] |
[
"import argparse\nimport common\nfrom common import (\n Example,\n Fact,\n Rule,\n Theory,\n TheoryAssertionInstance,\n TheoryAssertionRepresentationWithLabel,\n)\nimport json\n\nimport nltk\nfrom nltk import Nonterminal, PCFG\nfrom numpy.random import choice\nimport random\n\nimport problog\nfrom problog.program import PrologString\nfrom problog.core import ProbLog\nfrom problog import get_evaluatable\nfrom problog.engine import NonGroundProbabilisticClause, UnknownClause\nfrom problog.engine_stack import NegativeCycle\nfrom problog.formula import LogicFormula, LogicDAG\nfrom problog.sdd_formula import SDD\n\nfrom tqdm.auto import tqdm\n\nimport utils\nfrom utils import parse_fact, parse_rule\n\n\nclass TheoremProverConfig:\n \"\"\"Config for the theory generation, read from a json input config file.\n Sample config:\n {\n \"theory\": {\n \"num_examples\": 200,\n \"statement_types_per_example\": [ {\n \"start_symbol\": \"Fact\",\n \"num_statements_range\": [1, 16]\n },\n {\n \"start_symbol\": \"Rule\",\n \"num_statements_range\": [1, 8]\n }\n ],\n \"theorem_prover\": {\n \"fact_nonterminals\": [\"Fact\"],\n \"rule_nonterminals\": [\"Rule\"],\n \"predicate_nonterminals\": [\"Attribute\", \"Relation\"],\n \"variable_nonterminals\": [\"Variable\"],\n \"constant_nonterminals\": [\"Entity\"]\n }\n },\n \"assertion\": {\n \"start_symbol\": \"Fact\"\n }\n }\n \"\"\"\n\n def __init__(self, grammar, **config_args):\n def expand_nonterminal(nonterminal, grammar):\n \"\"\"Generate sentences for a given grammar production, identified by the LHS Nonterminal.\n Return a collection of strings.\"\"\"\n productions = [\n item\n for item in grammar.productions()\n if item.lhs().symbol() == nonterminal\n ]\n generated_sentences_for_noterminal = []\n for production in productions:\n rhs = production.rhs()\n sentences = []\n for item in rhs:\n if isinstance(item, Nonterminal):\n curr_sentences = expand_nonterminal(item.symbol(), grammar)\n else:\n curr_sentences = [item]\n if len(sentences) == 0:\n sentences += curr_sentences\n else:\n new_sentences = []\n for sentence in sentences:\n for curr_sentence in curr_sentences:\n new_sentences.append(f\"{sentence} {curr_sentence}\")\n sentences = new_sentences\n generated_sentences_for_noterminal.extend(sentences)\n return generated_sentences_for_noterminal\n\n def initialize_terms(grammar):\n \"\"\"Enumerate all the terms- predicates, variables and constants in the given grammar.\"\"\"\n for nt in self.predicate_nonterminals:\n predicate_terms = expand_nonterminal(nt, grammar)\n predicate_terms = [utils.predicatize(term) for term in predicate_terms]\n self.predicates = predicate_terms\n for nt in self.variable_nonterminals:\n variable_terms = expand_nonterminal(nt, grammar)\n variable_terms = [utils.variablize(term) for term in variable_terms]\n self.variables = variable_terms\n for nt in self.constant_nonterminals:\n constant_terms = expand_nonterminal(nt, grammar)\n constant_terms = [utils.constantize(term) for term in constant_terms]\n self.constants = constant_terms\n\n self.grammar = grammar\n self.predicates = []\n self.variables = []\n self.constants = []\n self.fact_nonterminals = []\n self.rule_nonterminals = []\n self.predicate_nonterminals = []\n self.variable_nonterminals = []\n self.constant_nonterminals = []\n for key, value in config_args.items():\n setattr(self, key, value)\n initialize_terms(grammar)\n\n\ndef choose_production(grammar, nonterminal):\n \"\"\"Choose a production with specified nonterminal as LHS based on the probability distibution\n of the grammar.\"\"\"\n productions = [\n item for item in grammar.productions() if item.lhs().symbol() == nonterminal\n ]\n if len(productions) == 0:\n raise ValueError(f\"Nonterminal {nonterminal} not found in the grammar!\")\n probabilities = [production.prob() for production in productions]\n chosen_production = choice(productions, p=probabilities)\n return chosen_production\n\n\ndef generate_random_statement(grammar, nonterminal, theorem_prover_config):\n \"\"\"Generate a random statement from the given nonterminal LHS in the grammar.\"\"\"\n chosen_production = choose_production(grammar, nonterminal)\n rhs = chosen_production.rhs()\n sentence = \"\"\n for item in rhs:\n if isinstance(item, Nonterminal):\n item_generated_statement = generate_random_statement(\n grammar, item.symbol(), theorem_prover_config\n )\n else:\n if nonterminal in theorem_prover_config.predicate_nonterminals:\n item = utils.predicatize(item)\n elif nonterminal in theorem_prover_config.variable_nonterminals:\n item = utils.variablize(item)\n elif nonterminal in theorem_prover_config.constant_nonterminals:\n item = utils.constantize(item)\n item_generated_statement = item\n if len(sentence) > 0:\n sentence += \" \"\n sentence += item_generated_statement\n return sentence\n\n\ndef run_theory_in_problog(theory, assertion):\n \"\"\"Run the given theory and assertion through ProbLog engine to obtain a True/False label.\n If an exception is encountered, return None so that this example will not be part of output.\"\"\"\n theorem_prover = \"problog\"\n try:\n program = theory.program(theorem_prover, assertion)\n lf = LogicFormula.create_from(program) # ground the program\n dag = LogicDAG.create_from(lf) # break cycles in the ground program\n sdd = SDD.create_from(dag)\n result = sdd.evaluate()\n result_tuples = [(k, v) for k, v in result.items()]\n if len(result_tuples) == 0:\n return False\n return result_tuples[0][1] != 0.0\n except (NegativeCycle, NonGroundProbabilisticClause, UnknownClause) as e:\n return None\n return None\n\n\ndef get_truth_label(theory, assertion, theorem_prover_config, theorem_prover):\n \"\"\"Get a truth label for a given theory and assertion by running it through\n specified theorem prover.\"\"\"\n label = None\n if theorem_prover.lower() == \"problog\":\n label = run_theory_in_problog(theory, assertion)\n return label\n\n\ndef generate_random_example(\n grammar,\n theorem_prover_config,\n statement_types,\n assertion_start_symbol,\n theorem_prover,\n):\n example = None\n\n generated_facts = set()\n generated_rules = set()\n generated_statements = set()\n\n predicates_in_rule_consequents = set()\n arguments_in_generated_statements = set()\n\n # Generate examples for every required type of statement (Start Symbol type)\n for statement_type in statement_types:\n start_symbol = statement_type[\"start_symbol\"]\n num_statements_range = statement_type[\"num_statements_range\"]\n req_num_statements = random.randint(\n num_statements_range[0], num_statements_range[1]\n )\n num_generated_statements = 0\n max_generation_attempts = 20\n num_generation_attempts = 0\n while num_generated_statements < req_num_statements:\n generated_statement = generate_random_statement(\n grammar, start_symbol, theorem_prover_config\n )\n if generated_statement in generated_statements:\n if num_generation_attempts == max_generation_attempts:\n break\n num_generation_attempts += 1\n else:\n if start_symbol in theorem_prover_config.rule_nonterminals:\n # If the current start symbol for generation is supposed to be a rule,\n # parse the generated statement as a rule.\n generated_rule = parse_rule(generated_statement)\n\n # Constrain rule such that:\n # All non-first entities appear earlier in the rule.\n # This means that if the randomly generated rule does NOT conform to\n # this requirement, then retry. If not, it is a valid rule, so update\n # the set of generated statements by adding the rule.\n rule_constraint_statisfied = True\n first_entity = generated_rule.lhs[0].arguments[0]\n first_fact_remaining_arguments = generated_rule.lhs[0].arguments[1:]\n remaining_arguments = []\n for fact in generated_rule.lhs[1:]:\n remaining_arguments.extend(fact.arguments)\n remaining_arguments.extend(generated_rule.rhs.arguments)\n used_entities = set()\n used_entities.add(first_entity)\n for entity in remaining_arguments:\n if entity not in used_entities:\n rule_constraint_statisfied = False\n break\n else:\n used_entities.add(entity)\n\n if rule_constraint_statisfied:\n generated_rules.add(generated_rule)\n for f in generated_rule.lhs:\n arguments_in_generated_statements.update(f.arguments)\n predicates_in_rule_consequents.add(generated_rule.rhs.predicate)\n arguments_in_generated_statements.update(\n generated_rule.rhs.arguments\n )\n generated_statements.add(generated_statement)\n num_generated_statements += 1\n num_generation_attempts = 0\n\n elif start_symbol in theorem_prover_config.fact_nonterminals:\n # If the current start symbol for generation is supposed to be a fact,\n # parse the generated statement as a fact.\n generated_fact = parse_fact(generated_statement)\n generated_facts.add(generated_fact)\n arguments_in_generated_statements.update(generated_fact.arguments)\n generated_statements.add(generated_statement)\n num_generated_statements += 1\n num_generation_attempts = 0\n\n theory = Theory(\n list(generated_facts), list(generated_rules), list(generated_statements)\n )\n\n # Constrain the generation of the assertion so that:\n # 1. A statement in the theory is not just repeated as an assertion as is.\n # 2. The assertion is valid only if it contains arguments that appear somewhere in the theory and\n # a predicate that appears on the RHS of some rule.\n assertion = None\n max_valid_assertion_generation_attempts = 20\n num_attempts = 0\n generated_valid_assertion = False\n while not generated_valid_assertion and (\n num_attempts < max_valid_assertion_generation_attempts\n ):\n assertion_statement = generate_random_statement(\n grammar, assertion_start_symbol, theorem_prover_config\n )\n assertion = parse_fact(assertion_statement)\n if (\n assertion_statement not in generated_statements\n and assertion.predicate in predicates_in_rule_consequents\n and len(\n set(assertion.arguments).intersection(arguments_in_generated_statements)\n )\n > 0\n ):\n generated_valid_assertion = True\n num_attempts += 1\n\n if assertion is not None:\n # Plug the generated statements into theorem prover\n label = get_truth_label(\n theory, assertion, theorem_prover_config, theorem_prover\n )\n\n if label is not None:\n # Construct example with label\n example = Example(TheoryAssertionInstance(theory, assertion, label))\n\n return example\n\n\ndef generate_theory(\n grammar,\n config,\n theory_op_file,\n theorem_prover,\n):\n \"\"\"Generate a theory with specified properties per config file specifications, using the\n specified grammar.\n Arguments:\n theory_op_file: Output jsonl file containing the generated examples.\n \"\"\"\n\n # Get Theorem Prover Config and initialize Theorem Prover\n theorem_prover_config = TheoremProverConfig(\n grammar, **config[\"theory\"][\"theorem_prover\"]\n )\n\n num_examples = config[\"theory\"][\"num_examples\"]\n statement_types = config[\"theory\"][\"statement_types_per_example\"]\n assertion_start_symbol = config[\"assertion\"][\"start_symbol\"]\n\n # Generate examples for every required type of statement (Start Symbol type)\n num_true_labels = 0\n num_false_labels = 0\n curr_num_examples = 0\n progress_tracker = tqdm(total=num_examples)\n progress_tracker.set_description(desc=\"Generating Examples...\")\n while curr_num_examples < num_examples:\n example = generate_random_example(\n grammar,\n theorem_prover_config,\n statement_types,\n assertion_start_symbol,\n theorem_prover,\n )\n if example is not None:\n if example.theory_assertion_instance.label:\n num_true_labels += 1\n else:\n num_false_labels += 1\n json.dump(example.to_json(), theory_op_file)\n theory_op_file.write(\"\\n\")\n\n theory_assertion_instance_representation_labeled = (\n TheoryAssertionRepresentationWithLabel(\n example.logical_forms.theory_statements,\n example.logical_forms.assertion_statement,\n example.theory_assertion_instance.label,\n )\n )\n\n curr_num_examples += 1\n progress_tracker.update()\n progress_tracker.close()\n print(f\"Generated {curr_num_examples} examples.\")\n print(f\" No. with True label: {num_true_labels}\")\n print(f\" No. with False label: {num_false_labels}\")\n\n\ndef preprocess_pcfg(grammar_file):\n \"\"\"Preprocesses given PCFG grammar file to return a collection of strings representing\n all the productions in the grammar. Expected grammar file format: NLTK PCFG format,\n for e.g.:\n Statement -> Fact\n Fact -> Polarity '(' Attribute Entity ')'\n Entity -> 'cat' | 'dog' | 'bald eagle' | 'rabbit' | 'mouse'\n Attribute -> 'red' | 'blue' | 'green' | 'kind' | 'nice' | 'big'\n Polarity -> '+' [0.8] | '-' [0.2]\n \"\"\"\n # Iterate through the lines and collect productions in a dictionary, keyed by\n # the nonterminals. So if there are two lines, one with S -> NP VP | VP and another\n # with S -> NP VP PP on two different lines, the dictionary will contain a key 'S'\n # with value 'NP VP | VP | NP VP PP'.\n productions = []\n nonterminal_dict = {}\n for line in grammar_file.readlines():\n production_parts = line.strip().split(\"->\", 1)\n if len(production_parts) == 2:\n lhs = production_parts[0].strip()\n rhs = production_parts[1]\n if lhs not in nonterminal_dict:\n nonterminal_dict[lhs] = rhs\n else:\n nonterminal_dict[lhs] += \" | \" + rhs\n\n # Iterate through the productions and check if each possible RHS has a probability\n # associated with it, expected to be specified like [0.5].\n productions = []\n for nonterminal in nonterminal_dict:\n rhs = nonterminal_dict[nonterminal]\n rhs_parts = [rhs_part.strip() for rhs_part in rhs.split(\"|\")]\n num_parts = len(rhs_parts)\n found_probs = True\n for rhs_part in rhs_parts:\n rhs_part_items = rhs_part.split(\" \")\n rhs_part_last_item = rhs_part_items[-1]\n if not (\n rhs_part_last_item.startswith(\"[\") and rhs_part_last_item.endswith(\"]\")\n ):\n found_probs = False\n break\n # If any of the RHS part items did not have an associated probability, assign all of them equal\n # probability.\n if not found_probs:\n prob = 1.0 / num_parts\n rhs_parts_with_probs = []\n for rhs_part in rhs_parts:\n rhs_part_mod = rhs_part + \" \" + \"[\" + str(prob) + \"]\"\n rhs_parts_with_probs.append(rhs_part_mod)\n rhs_parts = rhs_parts_with_probs\n final_rhs = \" | \".join(rhs_parts)\n production = f\"{nonterminal} -> {final_rhs}\"\n productions.append(production)\n return productions\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Theory Generator.\")\n parser.add_argument(\"--grammar\", required=True, help=\"Grammar (CFG) for theory\")\n parser.add_argument(\n \"--config-json\",\n required=True,\n help=\"Json format config file with parameters to generate theory\",\n )\n parser.add_argument(\n \"--op-theory-jsonl\",\n help=\"Output Jsonl file containing an example json object per line. Json object has the format of the TheoryAssertionInstance class\",\n )\n parser.add_argument(\n \"--theorem-prover\",\n choices=common.supported_theorem_provers,\n default=common.default_theorem_prover,\n help=\"Thorem proving engine to use. Only supported one right now is problog.\",\n )\n args = parser.parse_args()\n\n with open(args.grammar, \"r\") as grammar_file, open(\n args.config_json, \"r\"\n ) as config_json_file:\n theory_op_file = open(args.op_theory_jsonl, \"w\")\n config = json.load(config_json_file)\n production_strs = preprocess_pcfg(grammar_file)\n grammar_str = \"\\n\".join(production_strs)\n grammar = PCFG.fromstring(grammar_str)\n generate_theory(\n grammar,\n config,\n theory_op_file,\n args.theorem_prover,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.random.choice"
]
] |
jgolebiowski/kitkopt
|
[
"0b46d38004b75799dd1e8603a445b1d711c03735",
"0b46d38004b75799dd1e8603a445b1d711c03735"
] |
[
"examples/random-minimize/control.py",
"tst/test_bayesian_optimizer.py"
] |
[
"import numpy as np\n\nfrom kitkopt.random_optimizer import minimize_function\nfrom kitkopt.hyper_parameter import HyperParameter\nfrom kitkopt.kernels import rbf\n\n\ndef funct(x):\n return np.sum(np.square(x))\n\n\ndef main():\n # ------ Define hyperparameters with bounds and stepsize\n hyperparam_config = [\n HyperParameter(-5, 5, 1),\n HyperParameter(-5, 5, 1)\n ]\n\n # ------ Find the minimum and the value at the minumum\n best_point, best_value = minimize_function(funct, hyperparam_config,\n extra_function_args=(),\n tolerance=1e-2,\n max_iterations=100,\n seed=123)\n print(\"Best point {point} with the value of {value}\".format(point=best_point, value=best_value))\n\n\nif (__name__ == '__main__'):\n main()\n",
"import numpy as np\nimport unittest\n\nfrom kitkopt.gaussian_process import GaussianProcessRegression\nfrom kitkopt.hyper_parameter import HyperParameter\nfrom kitkopt.kernels import rbf\nfrom kitkopt.hypergrid import not_in_array, get_hypergrid\nfrom kitkopt.bayesian_optimizer import propose_points, minimize_function\nfrom kitkopt.acquisition import _get_single_point_Thompson\nfrom kitkopt.utilities import debugtool, OptimizerError\n\nclass BayesianOptimizerTestUCB(unittest.TestCase):\n def test_minimize_UCB(self):\n def funct(x):\n return np.sum(np.square(x))\n\n hyperparam_config = [\n HyperParameter(-5, 5, 1),\n HyperParameter(-5, 5, 1)\n ]\n\n best_point, best_value = minimize_function(funct, hyperparam_config,\n extra_function_args=(),\n tolerance=1e-2,\n max_iterations=100,\n acquisition_function=\"UCB\",\n seed=123)\n np.testing.assert_allclose(best_point, np.array([0, 0]), atol=1e-5)\n np.testing.assert_allclose(best_value, np.array([0]), atol=1e-5)\n\n def test_optimizer_UCB(self):\n hyperparam_config = [\n HyperParameter(0, 4, 1),\n HyperParameter(0, 4, 1)\n ]\n tested_points = np.array([\n [0, 0],\n [0, 4],\n [4, 0],\n [4, 4],\n [1, 2],\n [2, 1]\n ], dtype=float)\n values = np.array([\n 2,\n 2,\n 2,\n 2,\n 0,\n 0\n ], dtype=float)\n gp_settings = dict(\n kernel=rbf,\n kernel_params=(0.1, 0.2),\n noise=1e-6\n )\n\n draw = propose_points(tested_points, values, hyperparam_config, 1, acquisition_function=\"UCB\",\n gp_settings=gp_settings)\n\n np.testing.assert_allclose(draw.squeeze(), np.array([2, 2]), atol=2e-1)\n\n\nclass BayesianOptimizerTestThompson(unittest.TestCase):\n def test_minimize_Thompson(self):\n def funct(x):\n return np.sum(np.square(x))\n\n hyperparam_config = [\n HyperParameter(-5, 5, 1),\n HyperParameter(-5, 5, 1)\n ]\n\n best_point, best_value = minimize_function(funct, hyperparam_config,\n extra_function_args=(),\n tolerance=1e-2,\n max_iterations=100,\n acquisition_function=\"Thompson\",\n seed=123)\n np.testing.assert_allclose(best_point, np.array([0, 0]), atol=1e-5)\n np.testing.assert_allclose(best_value, np.array([0]), atol=1e-5)\n\n def test_optimizer_Thompson(self):\n hyperparam_config = [\n HyperParameter(0, 5, 1),\n HyperParameter(0, 5, 1)\n ]\n tested_points = np.array([\n [0, 0],\n [0, 4],\n [2, 2],\n [4, 4],\n [4, 0],\n [1, 1]\n ], dtype=float)\n values = np.array([\n 2,\n 2,\n 2,\n 2,\n 2,\n 0\n ], dtype=float)\n gp_settings = dict(\n kernel=rbf,\n kernel_params=(0.1, 0.2),\n noise=1e-6\n )\n\n N = 200\n draws = np.empty((N, 2))\n for idx in range(N):\n draws[idx, :] = propose_points(tested_points, values, hyperparam_config, 1, acquisition_function=\"Thompson\",\n gp_settings=gp_settings)\n\n empiricammean = np.mean(draws, axis=0)\n np.testing.assert_allclose(empiricammean, np.array([1, 1]), atol=2e-1)\n\n def test_propose_points_Thompson(self):\n hyperparam_config = [\n HyperParameter(0, 3, 1),\n HyperParameter(0, 5, 2)\n ]\n tested_points = np.array([\n [0, 2],\n [2, 0],\n [1, 4],\n ], dtype=float)\n target = np.array([[2., 4.],\n [0., 0.],\n [3., 0.],\n [1., 2.],\n [3., 2.],\n [1., 0.],\n [0., 4.],\n [3., 4.],\n [2., 2.]])\n values = np.array([1, 2, 3], dtype=float)\n result = propose_points(tested_points, values, hyperparam_config, 9, acquisition_function=\"Thompson\", seed=123)\n # print(repr(result))\n np.testing.assert_almost_equal(result, target, decimal=5)\n\n target = np.array([[2., 4.],\n [0., 0.],\n [3., 0.],\n [1., 2.]])\n values = np.array([1, 2, 3], dtype=float)\n result = propose_points(tested_points, values, hyperparam_config, 4, seed=123)\n # print(repr(result))\n\n np.testing.assert_almost_equal(result, target, decimal=5)\n\n # Check error\n with self.assertRaises(OptimizerError):\n propose_points(tested_points, values, hyperparam_config, 20, seed=123)\n\n"
] |
[
[
"numpy.square"
],
[
"numpy.square",
"numpy.testing.assert_almost_equal",
"numpy.mean",
"numpy.array",
"numpy.empty"
]
] |
zxChouSean/crispr_bedict_reproduce
|
[
"8590ddaeaefedd370a60c1f61043333371526766",
"8590ddaeaefedd370a60c1f61043333371526766"
] |
[
"criscas/model.py",
"tools/statistic.py"
] |
[
"import numpy as np\nimport torch\nfrom torch import nn\n\nclass SH_SelfAttention(nn.Module):\n \"\"\" single head self-attention module\n \"\"\"\n def __init__(self, input_size):\n \n super().__init__()\n # define query, key and value transformation matrices\n # usually input_size is equal to embed_size\n self.embed_size = input_size\n self.Wq = nn.Linear(input_size, self.embed_size, bias=False)\n self.Wk = nn.Linear(input_size, self.embed_size, bias=False)\n self.Wv = nn.Linear(input_size, self.embed_size, bias=False)\n self.softmax = nn.Softmax(dim=2) # normalized across feature dimension\n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, sequence length, input_size)\n \"\"\"\n X_q = self.Wq(X) # queries\n X_k = self.Wk(X) # keys\n X_v = self.Wv(X) # values\n \n # scaled queries and keys by forth root \n X_q_scaled = X_q / (self.embed_size ** (1/4))\n X_k_scaled = X_k / (self.embed_size ** (1/4))\n \n attn_w = torch.bmm(X_q_scaled, X_k_scaled.transpose(1,2))\n # (batch, sequence length, sequence length)\n attn_w_normalized = self.softmax(attn_w)\n # print('attn_w_normalized.shape', attn_w_normalized.shape)\n \n # reweighted value vectors\n z = torch.bmm(attn_w_normalized, X_v)\n \n return z, attn_w_normalized\n \n\nclass MH_SelfAttention(nn.Module):\n \"\"\" multi head self-attention module\n \"\"\"\n def __init__(self, input_size, num_attn_heads):\n \n super().__init__()\n \n layers = [SH_SelfAttention(input_size) for i in range(num_attn_heads)]\n \n self.multihead_pipeline = nn.ModuleList(layers)\n embed_size = input_size\n self.Wz = nn.Linear(num_attn_heads*embed_size, embed_size)\n \n \n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, sequence length, input_size)\n \"\"\"\n \n out = []\n bsize, num_positions, inp_dim = X.shape\n attn_tensor = X.new_zeros((bsize, num_positions, num_positions))\n for SH_layer in self.multihead_pipeline:\n z, attn_w = SH_layer(X)\n out.append(z)\n attn_tensor += attn_w\n # concat on the feature dimension\n out = torch.cat(out, -1) \n attn_tensor = attn_tensor/len(self.multihead_pipeline)\n\n # return a unified vector mapping of the different self-attention blocks\n return self.Wz(out), attn_tensor\n \n\nclass TransformerUnit(nn.Module):\n \n def __init__(self, input_size, num_attn_heads, mlp_embed_factor, nonlin_func, pdropout):\n \n super().__init__()\n \n embed_size = input_size\n self.multihead_attn = MH_SelfAttention(input_size, num_attn_heads)\n \n self.layernorm_1 = nn.LayerNorm(embed_size)\n\n # also known as position wise feed forward neural network\n self.MLP = nn.Sequential(\n nn.Linear(embed_size, embed_size*mlp_embed_factor),\n nonlin_func,\n nn.Linear(embed_size*mlp_embed_factor, embed_size)\n )\n \n self.layernorm_2 = nn.LayerNorm(embed_size)\n \n self.dropout = nn.Dropout(p=pdropout)\n \n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, sequence length, input_size)\n \"\"\"\n # z is tensor of size (batch, sequence length, input_size)\n z, attn_mhead_tensor = self.multihead_attn(X)\n # layer norm with residual connection\n z = self.layernorm_1(z + X)\n z = self.dropout(z)\n z_ff= self.MLP(z)\n z = self.layernorm_2(z_ff + z)\n z = self.dropout(z)\n \n return z, attn_mhead_tensor\n\n\"\"\"\n implement position encoder based on cosine and sine approach proposed \n by original Transformers paper ('Attention is all what you need')\n\"\"\"\nclass NucleoPosEncoding(nn.Module):\n def __init__(self, num_nucleotides, seq_len, embed_dim, pdropout=0.1):\n super().__init__()\n self.nucleo_emb = nn.Embedding(num_nucleotides, embed_dim)\n self.dropout = nn.Dropout(p=pdropout)\n # positional encoding matrix\n base_pow = 10000\n PE_matrix = torch.zeros((1, seq_len, embed_dim))\n i_num = torch.arange(0., seq_len).reshape(-1, 1) # i iterates over sequence length (i.e. sequence items)\n j_denom = torch.pow(base_pow, torch.arange(0., embed_dim, 2.) / embed_dim) # j iterates over embedding dimension\n PE_matrix[:, :, 0::2] = torch.sin(i_num/j_denom)\n PE_matrix[:, :, 1::2] = torch.cos(i_num/j_denom)\n self.register_buffer('PE_matrix', PE_matrix)\n \n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, int64, (batch, sequence length)\n \"\"\"\n X_emb = self.nucleo_emb(X)\n # (batch, sequence length, embedding dim)\n X_embpos = X_emb + self.PE_matrix\n return self.dropout(X_embpos)\n\nclass NucleoPosEmbedder(nn.Module):\n def __init__(self, num_nucleotides, seq_length, embedding_dim):\n super().__init__()\n self.nucleo_emb = nn.Embedding(num_nucleotides, embedding_dim)\n self.pos_emb = nn.Embedding(seq_length, embedding_dim)\n\n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, int64, (batch, sequence length)\n \"\"\"\n X_emb = self.nucleo_emb(X)\n bsize, seqlen, featdim = X_emb.size()\n device = X_emb.device\n positions = torch.arange(seqlen).to(device)\n positions_emb = self.pos_emb(positions)[None, :, :].expand(bsize, seqlen, featdim)\n # (batch, sequence length, embedding dim)\n X_embpos = X_emb + positions_emb\n return X_embpos\n\nclass PerBaseFeatureEmbAttention(nn.Module):\n \"\"\" Per base feature attention module\n \"\"\"\n def __init__(self, input_dim, seq_len):\n \n super().__init__()\n # define query, key and value transformation matrices\n # usually input_size is equal to embed_size\n self.embed_size = input_dim\n self.Q = nn.Parameter(torch.randn((seq_len, input_dim), dtype=torch.float32), requires_grad=True)\n self.softmax = nn.Softmax(dim=-1) # normalized across feature dimension\n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, sequence length, input_size)\n \"\"\"\n bsize, seqlen, featdim = X.shape\n X_q = self.Q[None, :, :].expand(bsize, seqlen, featdim) # queries\n X_k = X\n X_v = X\n # scaled queries and keys by forth root \n X_q_scaled = X_q / (self.embed_size ** (1/4))\n X_k_scaled = X_k / (self.embed_size ** (1/4))\n # print(X_q_scaled.shape)\n # print(X_k_scaled.shape)\n \n attn_w = torch.bmm(X_q_scaled, X_k_scaled.transpose(1,2))\n # attn_w = X_q_scaled.matmul(X_k_scaled.transpose(1,0))\n # (batch, sequence length, sequence length)\n attn_w_normalized = self.softmax(attn_w)\n # print('attn_w_normalized.shape', attn_w_normalized.shape)\n \n # reweighted value vectors\n z = torch.bmm(attn_w_normalized, X_v)\n # print('z.shape', z.shape)\n \n return z, attn_w_normalized\n\nclass FeatureEmbAttention(nn.Module):\n def __init__(self, input_dim):\n '''\n Args:\n input_dim: int, size of the input vector (i.e. feature vector)\n '''\n\n super().__init__()\n self.input_dim = input_dim\n # use this as query vector against the transformer outputs\n self.queryv = nn.Parameter(torch.randn(input_dim, dtype=torch.float32), requires_grad=True)\n self.softmax = nn.Softmax(dim=1) # normalized across seqlen\n\n def forward(self, X):\n '''Performs forward computation\n Args:\n X: torch.Tensor, (bsize, seqlen, feature_dim), dtype=torch.float32\n '''\n\n X_scaled = X / (self.input_dim ** (1/4))\n queryv_scaled = self.queryv / (self.input_dim ** (1/4))\n # using matmul to compute tensor vector multiplication\n \n # (bsize, seqlen)\n attn_weights = X_scaled.matmul(queryv_scaled)\n\n # softmax\n attn_weights_norm = self.softmax(attn_weights)\n\n # reweighted value vectors (in this case reweighting the original input X)\n # unsqueeze attn_weights_norm to get (bsize, 1, seqlen)\n # perform batch multiplication with X that has shape (bsize, seqlen, feat_dim)\n # result will be (bsize, 1, feat_dim)\n # squeeze the result to obtain (bsize, feat_dim)\n z = attn_weights_norm.unsqueeze(1).bmm(X).squeeze(1)\n \n # returns (bsize, feat_dim), (bsize, seqlen)\n return z, attn_weights_norm\n\n\nclass Categ_CrisCasTransformer(nn.Module):\n\n def __init__(self, input_size=64, num_nucleotides=4, \n seq_length=20, num_attn_heads=8, \n mlp_embed_factor=2, nonlin_func=nn.ReLU(), \n pdropout=0.3, num_transformer_units=12, \n pooling_mode='attn', num_classes=2, per_base=False):\n\n \n super().__init__()\n \n embed_size = input_size\n\n self.nucleopos_embedder = NucleoPosEmbedder(num_nucleotides, seq_length, embed_size)\n # self.nucleopos_embedder = NucleoPosEncoding(num_nucleotides, seq_length, embed_size)\n \n trfunit_layers = [TransformerUnit(input_size, num_attn_heads, mlp_embed_factor, nonlin_func, pdropout) \n for i in range(num_transformer_units)]\n # self.trfunit_layers = trfunit_layers\n self.trfunit_pipeline = nn.ModuleList(trfunit_layers)\n # self.trfunit_pipeline = nn.Sequential(*trfunit_layers)\n self.per_base = per_base\n \n if not per_base:\n self.pooling_mode = pooling_mode\n if pooling_mode == 'attn':\n self.pooling = FeatureEmbAttention(input_size)\n elif pooling_mode == 'mean':\n self.pooling = torch.mean\n self.Wy = nn.Linear(embed_size, num_classes, bias=True)\n\n else:\n self.pooling_mode = pooling_mode\n if pooling_mode == 'attn':\n self.pooling = PerBaseFeatureEmbAttention(input_size, seq_length)\n self.bias = nn.Parameter(torch.randn((seq_length, num_classes), dtype=torch.float32), requires_grad=True)\n self.Wy = nn.Linear(embed_size, num_classes, bias=False)\n\n # perform log softmax on the feature dimension\n self.log_softmax = nn.LogSoftmax(dim=-1)\n self._init_params_()\n \n def _init_params_(self):\n for p_name, p in self.named_parameters():\n param_dim = p.dim()\n if param_dim > 1: # weight matrices\n nn.init.xavier_uniform_(p)\n elif param_dim == 1: # bias parameters\n if p_name.endswith('bias'):\n nn.init.uniform_(p, a=-1.0, b=1.0)\n # nn.init.xavier_uniform_(p)\n\n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, int64, (batch, sequence length)\n \"\"\"\n # (batch, seqlen, embedding dim)\n X_embpos = self.nucleopos_embedder(X)\n # z is tensor of size (batch, seqlen, embedding dim)\n bsize, num_positions, inp_dim = X_embpos.shape\n attn_tensor = X_embpos.new_zeros((bsize, num_positions, num_positions))\n xinput = X_embpos\n for trfunit in self.trfunit_pipeline:\n z, attn_mhead_tensor = trfunit(xinput)\n xinput = z\n attn_tensor += attn_mhead_tensor\n attn_tensor = attn_tensor/len(self.trfunit_pipeline)\n\n # pool across seqlen vectors\n if not self.per_base:\n if self.pooling_mode == 'attn':\n z, fattn_w_norm = self.pooling(z)\n # Note: z.mean(dim=1) or self.pooling(z, dim=1) will change shape of z to become (batch, embedding dim)\n # we can keep dimension by running z.mean(dim=1, keepdim=True) to have (batch, 1, embedding dim)\n elif self.pooling_mode == 'mean':\n z = self.pooling(z, dim=1)\n fattn_w_norm = None\n y = self.Wy(z)\n else:\n if self.pooling_mode == 'attn':\n z, fattn_w_norm = self.pooling(z)\n y = self.Wy(z) + self.bias\n \n return self.log_softmax(y), fattn_w_norm, attn_tensor",
"'''\nAuthor: Xin Zhou\nDate: 17 Sep, 2021\n'''\n\nimport csv\nimport matplotlib.pyplot as plt\n\n#่ฎก็ฎๅนณๅๆฐ\ndef averagenum(num):\n nsum = 0\n for i in range(len(num)):\n nsum += num[i]\n return nsum / len(num)\n\n#่ฎก็ฎไธญไฝๆฐ\ndef mediannum(num):\n listnum = [num[i] for i in range(len(num))]\n listnum.sort()\n lnum = len(num)\n if lnum % 2 == 1:\n i = int((lnum + 1) / 2)-1\n return listnum[i]\n else:\n i = int(lnum / 2)-1\n return (listnum[i] + listnum[i + 1]) / 2\n\n#่ฎก็ฎไผๆฐ\ndef publicnum(num, d = 0):\n dictnum = {}\n for i in range(len(num)):\n if num[i] in dictnum.keys():\n dictnum[num[i]] += 1\n else:\n dictnum.setdefault(num[i], 1)\n maxnum = 0\n maxkey = 0\n for k, v in dictnum.items():\n if v > maxnum:\n maxnum = v\n maxkey = k\n return maxkey\n\ndef sum(list):\n s = 0\n for a in list:\n s+=a\n return s\n\neditor='Target-AID'\nline = 0\nnums = []\nnum0 = 0\nplot = [0 for a in range(101)]\nplot_nozero = [0 for a in range(101)]\nwith open('/home/data/bedict_reproduce/data/test_data/'+editor+'/perbase.csv', 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n line += 1\n if line == 1:\n continue\n nums += [float(a) for a in row[4:23]]\n for num in nums:\n if num == 0.0:\n num0 += 1\n else:\n # print(int(num))\n plot[int(num)] += 1\n if int(num) != 0:\n plot_nozero[int(num)] += 1\n plt.xlabel('ground truth')\n plt.ylabel('count')\n for a in range(100):\n plt.scatter(a,plot[a])\n plt.legend()\n # plt.title((\"Total:%d|%d. Median:%d. Public:%d. Zero:%d.\".format(len(nums)-num0, len(nums), mediannum(nums), publicnum(nums), num0)))\n # plt.title(\"Total As:\"+str(len(nums)-num0)+\"|\"+str(len(nums)) +\". Zero:\"+str(num0) +\". Probs>1:\"+str(sum(plot_nozero))+\". Median(no0):\"+str(mediannum(plot_nozero))+\". Public(no0):\"+str(publicnum(plot_nozero)))\n plt.title(str(len(nums)-num0)+\"|\"+str(len(nums)) +\". \"+str(num0) +\". \"+str(sum(plot_nozero)) +\". \"+str(mediannum(plot_nozero))+\". \"+str(publicnum(plot_nozero)))\n plt.savefig('/home/data/bedict_reproduce/data/test_data/'+editor+'count.png')\n plt.show()\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.LogSoftmax",
"torch.nn.init.uniform_",
"torch.cat",
"torch.zeros",
"torch.sin",
"torch.nn.ModuleList",
"torch.randn",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.bmm",
"torch.arange",
"torch.nn.ReLU",
"torch.cos"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
goru97/horovod
|
[
"56afed8666e0ec8b54d84ac202890393c0c1a8a7"
] |
[
"setup.py"
] |
[
"# Copyright 2017 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import print_function\n\nimport os\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.errors import CompileError, DistutilsError, DistutilsPlatformError, LinkError\nfrom distutils.version import LooseVersion\nimport shlex\nimport subprocess\nimport sys\nimport textwrap\nimport traceback\nimport re\n\nfrom horovod import __version__\n\n\ntensorflow_mpi_lib = Extension('horovod.tensorflow.mpi_lib', [])\ntorch_mpi_lib = Extension('horovod.torch.mpi_lib', [])\ntorch_mpi_lib_impl = Extension('horovod.torch.mpi_lib_impl', [])\ntorch_mpi_lib_v2 = Extension('horovod.torch.mpi_lib_v2', [])\n\n\ndef is_build_action():\n if len(sys.argv) <= 1:\n return False\n\n if sys.argv[1].startswith('build'):\n return True\n\n if sys.argv[1].startswith('bdist'):\n return True\n\n if sys.argv[1].startswith('install'):\n return True\n\n\ndef check_tf_version():\n try:\n import tensorflow as tf\n if LooseVersion(tf.__version__) < LooseVersion('1.1.0'):\n raise DistutilsPlatformError(\n 'Your TensorFlow version %s is outdated. '\n 'Horovod requires tensorflow>=1.1.0' % tf.__version__)\n except ImportError:\n raise DistutilsPlatformError(\n 'import tensorflow failed, is it installed?\\n\\n%s' % traceback.format_exc())\n except AttributeError:\n # This means that tf.__version__ was not exposed, which makes it *REALLY* old.\n raise DistutilsPlatformError(\n 'Your TensorFlow version is outdated. Horovod requires tensorflow>=1.1.0')\n\n\ndef get_cpp_flags(build_ext):\n last_err = None\n default_flags = ['-std=c++11', '-fPIC', '-O2']\n if sys.platform == 'darwin':\n # Darwin most likely will have Clang, which has libc++.\n flags_to_try = [default_flags + ['-stdlib=libc++'], default_flags]\n else:\n flags_to_try = [default_flags, default_flags + ['-stdlib=libc++']]\n for cpp_flags in flags_to_try:\n try:\n test_compile(build_ext, 'test_cpp_flags', extra_compile_preargs=cpp_flags,\n code=textwrap.dedent('''\\\n #include <unordered_map>\n void test() {\n }\n '''))\n\n return cpp_flags\n except (CompileError, LinkError):\n last_err = 'Unable to determine C++ compilation flags (see error above).'\n except Exception:\n last_err = 'Unable to determine C++ compilation flags. ' \\\n 'Last error:\\n\\n%s' % traceback.format_exc()\n\n raise DistutilsPlatformError(last_err)\n\n\ndef get_link_flags(build_ext):\n last_err = None\n libtool_flags = ['-Wl,-exported_symbols_list,horovod.exp']\n ld_flags = ['-Wl,--version-script=horovod.lds']\n if sys.platform == 'darwin':\n flags_to_try = [libtool_flags, ld_flags]\n else:\n flags_to_try = [ld_flags, libtool_flags]\n for link_flags in flags_to_try:\n try:\n test_compile(build_ext, 'test_link_flags', extra_link_preargs=link_flags,\n code=textwrap.dedent('''\\\n void test() {\n }\n '''))\n\n return link_flags\n except (CompileError, LinkError):\n last_err = 'Unable to determine C++ link flags (see error above).'\n except Exception:\n last_err = 'Unable to determine C++ link flags. ' \\\n 'Last error:\\n\\n%s' % traceback.format_exc()\n\n raise DistutilsPlatformError(last_err)\n\n\ndef get_tf_include_dirs():\n import tensorflow as tf\n tf_inc = tf.sysconfig.get_include()\n return [tf_inc, '%s/external/nsync/public' % tf_inc]\n\n\ndef get_tf_lib_dirs():\n import tensorflow as tf\n tf_lib = tf.sysconfig.get_lib()\n return [tf_lib]\n\n\ndef get_tf_libs(build_ext, lib_dirs, cpp_flags):\n last_err = None\n for tf_libs in [['tensorflow_framework'], []]:\n try:\n lib_file = test_compile(build_ext, 'test_tensorflow_libs',\n library_dirs=lib_dirs, libraries=tf_libs,\n extra_compile_preargs=cpp_flags,\n code=textwrap.dedent('''\\\n void test() {\n }\n '''))\n\n from tensorflow.python.framework import load_library\n load_library.load_op_library(lib_file)\n\n return tf_libs\n except (CompileError, LinkError):\n last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'\n except Exception:\n last_err = 'Unable to determine -l link flags to use with TensorFlow. ' \\\n 'Last error:\\n\\n%s' % traceback.format_exc()\n\n raise DistutilsPlatformError(last_err)\n\n\ndef get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):\n last_err = None\n cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'\n for cxx11_abi in ['0', '1']:\n try:\n lib_file = test_compile(build_ext, 'test_tensorflow_abi',\n macros=[(cxx11_abi_macro, cxx11_abi)],\n include_dirs=include_dirs, library_dirs=lib_dirs,\n libraries=libs, extra_compile_preargs=cpp_flags,\n code=textwrap.dedent('''\\\n #include <string>\n #include \"tensorflow/core/framework/op.h\"\n #include \"tensorflow/core/framework/op_kernel.h\"\n #include \"tensorflow/core/framework/shape_inference.h\"\n void test() {\n auto ignore = tensorflow::strings::StrCat(\"a\", \"b\");\n }\n '''))\n\n from tensorflow.python.framework import load_library\n load_library.load_op_library(lib_file)\n\n return cxx11_abi_macro, cxx11_abi\n except (CompileError, LinkError):\n last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'\n except Exception:\n last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \\\n 'Last error:\\n\\n%s' % traceback.format_exc()\n\n raise DistutilsPlatformError(last_err)\n\n\ndef get_tf_flags(build_ext, cpp_flags):\n import tensorflow as tf\n try:\n return tf.sysconfig.get_compile_flags(), tf.sysconfig.get_link_flags()\n except AttributeError:\n # fallback to the previous logic\n tf_include_dirs = get_tf_include_dirs()\n tf_lib_dirs = get_tf_lib_dirs()\n tf_libs = get_tf_libs(build_ext, tf_lib_dirs, cpp_flags)\n tf_abi = get_tf_abi(build_ext, tf_include_dirs,\n tf_lib_dirs, tf_libs, cpp_flags)\n\n compile_flags = []\n for include_dir in tf_include_dirs:\n compile_flags.append('-I%s' % include_dir)\n if tf_abi:\n compile_flags.append('-D%s=%s' % tf_abi)\n\n link_flags = []\n for lib_dir in tf_lib_dirs:\n link_flags.append('-L%s' % lib_dir)\n for lib in tf_libs:\n link_flags.append('-l%s' % lib)\n\n return compile_flags, link_flags\n\n\ndef get_mpi_flags():\n show_command = os.environ.get('HOROVOD_MPICXX_SHOW', 'mpicxx -show')\n try:\n mpi_show_output = subprocess.check_output(\n shlex.split(show_command), universal_newlines=True).strip()\n mpi_show_args = shlex.split(mpi_show_output)\n if not mpi_show_args[0].startswith('-'):\n # Open MPI and MPICH print compiler name as a first word, skip it\n mpi_show_args = mpi_show_args[1:]\n # strip off compiler call portion and always escape each arg\n return ' '.join(['\"' + arg.replace('\"', '\"\\'\"\\'\"') + '\"'\n for arg in mpi_show_args])\n except Exception:\n raise DistutilsPlatformError(\n '%s failed (see error below), is MPI in $PATH?\\n'\n 'Note: If your version of MPI has a custom command to show compilation flags, '\n 'please specify it with the HOROVOD_MPICXX_SHOW environment variable.\\n\\n'\n '%s' % (show_command, traceback.format_exc()))\n\n\ndef test_compile(build_ext, name, code, libraries=None, include_dirs=None, library_dirs=None,\n macros=None, extra_compile_preargs=None, extra_link_preargs=None):\n test_compile_dir = os.path.join(build_ext.build_temp, 'test_compile')\n if not os.path.exists(test_compile_dir):\n os.makedirs(test_compile_dir)\n\n source_file = os.path.join(test_compile_dir, '%s.cc' % name)\n with open(source_file, 'w') as f:\n f.write(code)\n\n compiler = build_ext.compiler\n [object_file] = compiler.object_filenames([source_file])\n shared_object_file = compiler.shared_object_filename(\n name, output_dir=test_compile_dir)\n\n compiler.compile([source_file], extra_preargs=extra_compile_preargs,\n include_dirs=include_dirs, macros=macros)\n compiler.link_shared_object(\n [object_file], shared_object_file, libraries=libraries, library_dirs=library_dirs,\n extra_preargs=extra_link_preargs)\n\n return shared_object_file\n\n\ndef get_cuda_dirs(build_ext, cpp_flags):\n cuda_include_dirs = []\n cuda_lib_dirs = []\n\n cuda_home = os.environ.get('HOROVOD_CUDA_HOME')\n if cuda_home:\n cuda_include_dirs += ['%s/include' % cuda_home]\n cuda_lib_dirs += ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]\n\n cuda_include = os.environ.get('HOROVOD_CUDA_INCLUDE')\n if cuda_include:\n cuda_include_dirs += [cuda_include]\n\n cuda_lib = os.environ.get('HOROVOD_CUDA_LIB')\n if cuda_lib:\n cuda_lib_dirs += [cuda_lib]\n\n if not cuda_include_dirs and not cuda_lib_dirs:\n # default to /usr/local/cuda\n cuda_include_dirs += ['/usr/local/cuda/include']\n cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']\n\n try:\n test_compile(build_ext, 'test_cuda', libraries=['cudart'], include_dirs=cuda_include_dirs,\n library_dirs=cuda_lib_dirs, extra_compile_preargs=cpp_flags,\n code=textwrap.dedent('''\\\n #include <cuda_runtime.h>\n void test() {\n cudaSetDevice(0);\n }\n '''))\n except (CompileError, LinkError):\n raise DistutilsPlatformError(\n 'CUDA library was not found (see error above).\\n'\n 'Please specify correct CUDA location with the HOROVOD_CUDA_HOME '\n 'environment variable or combination of HOROVOD_CUDA_INCLUDE and '\n 'HOROVOD_CUDA_LIB environment variables.\\n\\n'\n 'HOROVOD_CUDA_HOME - path where CUDA include and lib directories can be found\\n'\n 'HOROVOD_CUDA_INCLUDE - path to CUDA include directory\\n'\n 'HOROVOD_CUDA_LIB - path to CUDA lib directory')\n\n return cuda_include_dirs, cuda_lib_dirs\n\n\ndef get_nccl_vals(build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags):\n nccl_include_dirs = []\n nccl_lib_dirs = []\n nccl_libs = []\n\n nccl_home = os.environ.get('HOROVOD_NCCL_HOME')\n if nccl_home:\n nccl_include_dirs += ['%s/include' % nccl_home]\n nccl_lib_dirs += ['%s/lib' % nccl_home, '%s/lib64' % nccl_home]\n\n nccl_include_dir = os.environ.get('HOROVOD_NCCL_INCLUDE')\n if nccl_include_dir:\n nccl_include_dirs += [nccl_include_dir]\n\n nccl_lib_dir = os.environ.get('HOROVOD_NCCL_LIB')\n if nccl_lib_dir:\n nccl_lib_dirs += [nccl_lib_dir]\n\n nccl_link_mode = os.environ.get('HOROVOD_NCCL_LINK', 'STATIC')\n if nccl_link_mode.upper() == 'SHARED':\n nccl_libs += ['nccl']\n else:\n nccl_libs += ['nccl_static']\n\n try:\n test_compile(build_ext, 'test_nccl', libraries=nccl_libs, include_dirs=nccl_include_dirs + cuda_include_dirs,\n library_dirs=nccl_lib_dirs + cuda_lib_dirs, extra_compile_preargs=cpp_flags,\n code=textwrap.dedent('''\\\n #include <nccl.h>\n #if NCCL_MAJOR < 2\n #error Horovod requires NCCL 2.0 or later version, please upgrade.\n #endif\n void test() {\n ncclUniqueId nccl_id;\n ncclGetUniqueId(&nccl_id);\n }\n '''))\n except (CompileError, LinkError):\n raise DistutilsPlatformError(\n 'NCCL 2.0 library or its later version was not found (see error above).\\n'\n 'Please specify correct NCCL location with the HOROVOD_NCCL_HOME '\n 'environment variable or combination of HOROVOD_NCCL_INCLUDE and '\n 'HOROVOD_NCCL_LIB environment variables.\\n\\n'\n 'HOROVOD_NCCL_HOME - path where NCCL include and lib directories can be found\\n'\n 'HOROVOD_NCCL_INCLUDE - path to NCCL include directory\\n'\n 'HOROVOD_NCCL_LIB - path to NCCL lib directory')\n\n return nccl_include_dirs, nccl_lib_dirs, nccl_libs\n\n\ndef get_ddl_dirs():\n # Default DDL home\n ddl_home = '/opt/DL/ddl'\n ddl_include_dir = '%s/include' % ddl_home\n ddl_lib_dir = '%s/lib' % ddl_home\n\n if not os.path.exists(ddl_lib_dir):\n raise DistutilsPlatformError(\n 'DDL lib was not found. Please, make sure \\'ddl\\' package is installed.')\n if not os.path.exists(ddl_include_dir):\n raise DistutilsPlatformError(\n 'DDL include was not found. Please, make sure \\'ddl-dev\\' package is installed.')\n\n return [ddl_include_dir], [ddl_lib_dir]\n\n\ndef get_common_options(build_ext):\n cpp_flags = get_cpp_flags(build_ext)\n link_flags = get_link_flags(build_ext)\n mpi_flags = get_mpi_flags()\n\n gpu_allreduce = os.environ.get('HOROVOD_GPU_ALLREDUCE')\n if gpu_allreduce and gpu_allreduce != 'MPI' and gpu_allreduce != 'NCCL' and \\\n gpu_allreduce != 'DDL':\n raise DistutilsError('HOROVOD_GPU_ALLREDUCE=%s is invalid, supported '\n 'values are \"\", \"MPI\", \"NCCL\", \"DDL\".' % gpu_allreduce)\n\n gpu_allgather = os.environ.get('HOROVOD_GPU_ALLGATHER')\n if gpu_allgather and gpu_allgather != 'MPI':\n raise DistutilsError('HOROVOD_GPU_ALLGATHER=%s is invalid, supported '\n 'values are \"\", \"MPI\".' % gpu_allgather)\n\n gpu_broadcast = os.environ.get('HOROVOD_GPU_BROADCAST')\n if gpu_broadcast and gpu_broadcast != 'MPI':\n raise DistutilsError('HOROVOD_GPU_BROADCAST=%s is invalid, supported '\n 'values are \"\", \"MPI\".' % gpu_broadcast)\n\n if gpu_allreduce or gpu_allgather or gpu_broadcast:\n have_cuda = True\n cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(build_ext, cpp_flags)\n else:\n have_cuda = False\n cuda_include_dirs = cuda_lib_dirs = []\n\n if gpu_allreduce == 'NCCL':\n have_nccl = True\n nccl_include_dirs, nccl_lib_dirs, nccl_libs = get_nccl_vals(\n build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags)\n else:\n have_nccl = False\n nccl_include_dirs = nccl_lib_dirs = nccl_libs = []\n\n if gpu_allreduce == 'DDL':\n have_ddl = True\n ddl_include_dirs, ddl_lib_dirs = get_ddl_dirs()\n else:\n have_ddl = False\n ddl_include_dirs = ddl_lib_dirs = []\n\n if (gpu_allreduce == 'NCCL' and (gpu_allgather == 'MPI' or gpu_broadcast == 'MPI')\n and not os.environ.get('HOROVOD_ALLOW_MIXED_GPU_IMPL')):\n raise DistutilsError('You should not mix NCCL and MPI GPU due to a possible deadlock.\\n'\n 'If you\\'re sure you want to mix them, set the '\n 'HOROVOD_ALLOW_MIXED_GPU_IMPL environment variable to \\'1\\'.')\n\n MACROS = []\n INCLUDES = []\n SOURCES = ['horovod/common/common.cc',\n 'horovod/common/mpi_message.cc',\n 'horovod/common/operations.cc',\n 'horovod/common/timeline.cc']\n COMPILE_FLAGS = cpp_flags + shlex.split(mpi_flags)\n LINK_FLAGS = link_flags + shlex.split(mpi_flags)\n LIBRARY_DIRS = []\n LIBRARIES = []\n\n if have_cuda:\n MACROS += [('HAVE_CUDA', '1')]\n INCLUDES += cuda_include_dirs\n LIBRARY_DIRS += cuda_lib_dirs\n LIBRARIES += ['cudart']\n\n if have_nccl:\n MACROS += [('HAVE_NCCL', '1')]\n INCLUDES += nccl_include_dirs\n LIBRARY_DIRS += nccl_lib_dirs\n LIBRARIES += nccl_libs\n\n if have_ddl:\n MACROS += [('HAVE_DDL', '1')]\n INCLUDES += ddl_include_dirs\n LIBRARY_DIRS += ddl_lib_dirs\n LIBRARIES += ['ddl', 'ddl_pack']\n\n if gpu_allreduce:\n MACROS += [('HOROVOD_GPU_ALLREDUCE', \"'%s'\" % gpu_allreduce[0])]\n\n if gpu_allgather:\n MACROS += [('HOROVOD_GPU_ALLGATHER', \"'%s'\" % gpu_allgather[0])]\n\n if gpu_broadcast:\n MACROS += [('HOROVOD_GPU_BROADCAST', \"'%s'\" % gpu_broadcast[0])]\n\n return dict(MACROS=MACROS,\n INCLUDES=INCLUDES,\n SOURCES=SOURCES,\n COMPILE_FLAGS=COMPILE_FLAGS,\n LINK_FLAGS=LINK_FLAGS,\n LIBRARY_DIRS=LIBRARY_DIRS,\n LIBRARIES=LIBRARIES)\n\n\ndef build_tf_extension(build_ext, options):\n check_tf_version()\n tf_compile_flags, tf_link_flags = get_tf_flags(\n build_ext, options['COMPILE_FLAGS'])\n\n tensorflow_mpi_lib.define_macros = options['MACROS']\n tensorflow_mpi_lib.include_dirs = options['INCLUDES']\n tensorflow_mpi_lib.sources = options['SOURCES'] + \\\n ['horovod/tensorflow/mpi_ops.cc']\n tensorflow_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \\\n tf_compile_flags\n tensorflow_mpi_lib.extra_link_args = options['LINK_FLAGS'] + tf_link_flags\n tensorflow_mpi_lib.library_dirs = options['LIBRARY_DIRS']\n tensorflow_mpi_lib.libraries = options['LIBRARIES']\n\n build_ext.build_extension(tensorflow_mpi_lib)\n\n\ndef parse_version(version_str):\n m = re.match('^(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?(?:\\.(\\d+))?', version_str)\n if m is None:\n return None\n\n # turn version string to long integer\n version = int(m.group(1)) * 10 ** 9\n if m.group(2) is not None:\n version += int(m.group(2)) * 10 ** 6\n if m.group(3) is not None:\n version += int(m.group(3)) * 10 ** 3\n if m.group(4) is not None:\n version += int(m.group(4))\n return version\n\n\ndef dummy_import_torch():\n try:\n import torch\n except:\n pass\n\n\ndef check_torch_version():\n try:\n import torch\n if LooseVersion(torch.__version__) < LooseVersion('0.4.0'):\n raise DistutilsPlatformError(\n 'Your PyTorch version %s is outdated. '\n 'Horovod requires torch>=0.4.0' % torch.__version__)\n except ImportError:\n raise DistutilsPlatformError(\n 'import torch failed, is it installed?\\n\\n%s' % traceback.format_exc())\n\n # parse version\n version = parse_version(torch.__version__)\n if version is None:\n raise DistutilsPlatformError(\n 'Unable to determine PyTorch version from the version string \\'%s\\'' % torch.__version__)\n return version\n\n\ndef is_torch_cuda():\n try:\n from torch.utils.ffi import create_extension\n cuda_test_ext = create_extension(\n name='horovod.torch.test_cuda',\n headers=['horovod/torch/dummy.h'],\n sources=[],\n with_cuda=True,\n extra_compile_args=['-std=c11', '-fPIC', '-O2']\n )\n cuda_test_ext.build()\n return True\n except:\n print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')\n return False\n\n\ndef is_torch_cuda_v2(build_ext, include_dirs, extra_compile_args):\n try:\n from torch.utils.cpp_extension import include_paths\n test_compile(build_ext, 'test_torch_cuda', include_dirs=include_dirs + include_paths(cuda=True),\n extra_compile_preargs=extra_compile_args, code=textwrap.dedent('''\\\n #include <THC/THC.h>\n void test() {\n }\n '''))\n return True\n except (CompileError, LinkError, EnvironmentError):\n print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')\n return False\n\n\ndef check_macro(macros, key):\n return any(k == key and v for k, v in macros)\n\n\ndef set_macro(macros, key, new_value):\n if any(k == key for k, _ in macros):\n return [(k, new_value if k == key else v) for k, v in macros]\n else:\n return macros + [(key, new_value)]\n\n\nclass protect_files(object):\n def __init__(self, *files):\n self.files = files\n\n def __enter__(self):\n for file in self.files:\n os.rename(file, file + '.protected')\n\n def __exit__(self, type, value, traceback):\n for file in self.files:\n os.rename(file + '.protected', file)\n\n\ndef build_torch_extension(build_ext, options, torch_version):\n have_cuda = is_torch_cuda()\n if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):\n raise DistutilsPlatformError(\n 'Horovod build with GPU support was requested, but this PyTorch '\n 'installation does not support CUDA.')\n\n # Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking\n # HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU\n # version or transfer tensors to CPU memory for those operations.\n updated_macros = set_macro(\n options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))\n\n # Export TORCH_VERSION equal to our representation of torch.__version__. Internally it's\n # used for backwards compatibility checks.\n updated_macros = set_macro(updated_macros, 'TORCH_VERSION', str(torch_version))\n\n # Create_extension overwrites these files which are customized, we need to protect them.\n with protect_files('horovod/torch/mpi_lib/__init__.py',\n 'horovod/torch/mpi_lib_impl/__init__.py'):\n from torch.utils.ffi import create_extension\n ffi_iface = create_extension(\n name='horovod.torch.mpi_lib',\n headers=['horovod/torch/interface.h'] +\n (['horovod/torch/interface_cuda.h'] if have_cuda else []),\n with_cuda=have_cuda,\n language='c',\n package=True,\n sources=[],\n extra_compile_args=['-std=c11', '-fPIC', '-O2']\n )\n ffi_impl = create_extension(\n name='horovod.torch.mpi_lib_impl',\n headers=[],\n with_cuda=have_cuda,\n language='c++',\n package=True,\n source_extension='.cc',\n define_macros=updated_macros,\n include_dirs=options['INCLUDES'],\n sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',\n 'horovod/torch/handle_manager.cc',\n 'horovod/torch/ready_event.cc',\n 'horovod/torch/tensor_util.cc',\n 'horovod/torch/cuda_util.cc',\n 'horovod/torch/adapter.cc'],\n extra_compile_args=options['COMPILE_FLAGS'],\n extra_link_args=options['LINK_FLAGS'],\n library_dirs=options['LIBRARY_DIRS'],\n libraries=options['LIBRARIES']\n )\n\n for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),\n (ffi_impl, torch_mpi_lib_impl)]:\n ffi_ext = ffi.distutils_extension()\n # ffi_ext is distutils Extension, not setuptools Extension\n for k, v in ffi_ext.__dict__.items():\n setuptools_ext.__dict__[k] = v\n build_ext.build_extension(setuptools_ext)\n\n\ndef build_torch_extension_v2(build_ext, options, torch_version):\n have_cuda = is_torch_cuda_v2(build_ext, include_dirs=options['INCLUDES'],\n extra_compile_args=options['COMPILE_FLAGS'])\n if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):\n raise DistutilsPlatformError(\n 'Horovod build with GPU support was requested, but this PyTorch '\n 'installation does not support CUDA.')\n\n # Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking\n # HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU\n # version or transfer tensors to CPU memory for those operations.\n updated_macros = set_macro(\n options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))\n\n # Export TORCH_VERSION equal to our representation of torch.__version__. Internally it's\n # used for backwards compatibility checks.\n updated_macros = set_macro(updated_macros, 'TORCH_VERSION', str(torch_version))\n\n # Always set _GLIBCXX_USE_CXX11_ABI, since PyTorch can only detect whether it was set to 1.\n import torch\n updated_macros = set_macro(updated_macros, '_GLIBCXX_USE_CXX11_ABI',\n str(int(torch.compiled_with_cxx11_abi())))\n\n # PyTorch requires -DTORCH_API_INCLUDE_EXTENSION_H\n updated_macros = set_macro(updated_macros, 'TORCH_API_INCLUDE_EXTENSION_H', '1')\n\n if have_cuda:\n from torch.utils.cpp_extension import CUDAExtension as TorchExtension\n else:\n # CUDAExtension fails with `ld: library not found for -lcudart` if CUDA is not present\n from torch.utils.cpp_extension import CppExtension as TorchExtension\n ext = TorchExtension(torch_mpi_lib_v2.name,\n define_macros=updated_macros,\n include_dirs=options['INCLUDES'],\n sources=options['SOURCES'] + ['horovod/torch/mpi_ops_v2.cc',\n 'horovod/torch/handle_manager.cc',\n 'horovod/torch/ready_event.cc',\n 'horovod/torch/cuda_util.cc',\n 'horovod/torch/adapter_v2.cc'],\n extra_compile_args=options['COMPILE_FLAGS'],\n extra_link_args=options['LINK_FLAGS'],\n library_dirs=options['LIBRARY_DIRS'],\n libraries=options['LIBRARIES'])\n\n # Patch an existing torch_mpi_lib_v2 extension object.\n for k, v in ext.__dict__.items():\n torch_mpi_lib_v2.__dict__[k] = v\n build_ext.build_extension(torch_mpi_lib_v2)\n\n\n# run the customize_compiler\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n options = get_common_options(self)\n built_plugins = []\n # If PyTorch is installed, it must be imported before TensorFlow, otherwise\n # we may get an error: dlopen: cannot load any more object with static TLS\n if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n dummy_import_torch()\n if not os.environ.get('HOROVOD_WITHOUT_TENSORFLOW'):\n try:\n build_tf_extension(self, options)\n built_plugins.append(True)\n except:\n if not os.environ.get('HOROVOD_WITH_TENSORFLOW'):\n print('INFO: Unable to build TensorFlow plugin, will skip it.\\n\\n'\n '%s' % traceback.format_exc(), file=sys.stderr)\n built_plugins.append(False)\n else:\n raise\n if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n try:\n torch_version = check_torch_version()\n if torch_version >= 1000000000:\n build_torch_extension_v2(self, options, torch_version)\n else:\n build_torch_extension(self, options, torch_version)\n built_plugins.append(True)\n except:\n if not os.environ.get('HOROVOD_WITH_PYTORCH'):\n print('INFO: Unable to build PyTorch plugin, will skip it.\\n\\n'\n '%s' % traceback.format_exc(), file=sys.stderr)\n built_plugins.append(False)\n else:\n raise\n if not built_plugins:\n raise DistutilsError(\n 'Both TensorFlow and PyTorch plugins were excluded from build. Aborting.')\n if not any(built_plugins):\n raise DistutilsError(\n 'Neither TensorFlow nor PyTorch plugins were built. See errors above.')\n\n\nsetup(name='horovod',\n version=__version__,\n packages=find_packages(),\n description='Distributed training framework for TensorFlow, Keras, and PyTorch.',\n author='Uber Technologies, Inc.',\n long_description=textwrap.dedent('''\\\n Horovod is a distributed training framework for TensorFlow, Keras, and PyTorch.\n The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),\n url='https://github.com/uber/horovod',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License'\n ],\n ext_modules=[tensorflow_mpi_lib, torch_mpi_lib, torch_mpi_lib_impl, torch_mpi_lib_v2],\n cmdclass={'build_ext': custom_build_ext},\n # cffi is required for PyTorch\n # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,\n # which is undesirable. Luckily, `install` action will install cffi before executing build,\n # so it's only necessary for `build*` or `bdist*` actions.\n setup_requires=['cffi>=1.4.0'] if is_build_action() else [],\n install_requires=['cffi>=1.4.0'],\n zip_safe=False)\n"
] |
[
[
"tensorflow.python.framework.load_library.load_op_library",
"torch.compiled_with_cxx11_abi",
"torch.utils.cpp_extension.include_paths",
"tensorflow.sysconfig.get_link_flags",
"torch.utils.ffi.create_extension",
"torch.utils.cpp_extension.CppExtension",
"tensorflow.sysconfig.get_lib",
"tensorflow.sysconfig.get_include",
"tensorflow.sysconfig.get_compile_flags"
]
] |
jay-johnson/donkeycar
|
[
"6968ee50ae5a43b28ce21d660245c008337885b8"
] |
[
"donkeycar/management/base.py"
] |
[
"\nimport sys\nimport os\nimport socket\nimport shutil\nimport argparse\nimport json\nimport time\n\nimport donkeycar as dk\nfrom donkeycar.parts.datastore import Tub\nfrom donkeycar.utils import *\nfrom donkeycar.management.tub import TubManager\nfrom donkeycar.management.joystick_creator import CreateJoystick\nimport numpy as np\n\nPACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nTEMPLATES_PATH = os.path.join(PACKAGE_PATH, 'templates')\n\ndef make_dir(path):\n real_path = os.path.expanduser(path)\n print('making dir ', real_path)\n if not os.path.exists(real_path):\n os.makedirs(real_path)\n return real_path\n\n\ndef load_config(config_path):\n\n '''\n load a config from the given path\n '''\n conf = os.path.expanduser(config_path)\n\n if not os.path.exists(conf):\n print(\"No config file at location: %s. Add --config to specify\\\n location or run from dir containing config.py.\" % conf)\n return None\n\n try:\n cfg = dk.load_config(conf)\n except:\n print(\"Exception while loading config from\", conf)\n return None\n\n return cfg\n\n\nclass BaseCommand(object):\n pass\n\n\nclass CreateCar(BaseCommand):\n \n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='createcar', usage='%(prog)s [options]')\n parser.add_argument('--path', default=None, help='path where to create car folder')\n parser.add_argument('--template', default=None, help='name of car template to use')\n parser.add_argument('--overwrite', action='store_true', help='should replace existing files')\n \n parsed_args = parser.parse_args(args)\n return parsed_args\n \n def run(self, args):\n args = self.parse_args(args)\n self.create_car(path=args.path, template=args.template, overwrite=args.overwrite)\n \n def create_car(self, path, template='complete', overwrite=False):\n \"\"\"\n This script sets up the folder structure for donkey to work.\n It must run without donkey installed so that people installing with\n docker can build the folder structure for docker to mount to.\n \"\"\"\n\n #these are neeeded incase None is passed as path\n path = path or '~/mycar'\n template = template or 'complete'\n\n\n print(\"Creating car folder: {}\".format(path))\n path = make_dir(path)\n \n print(\"Creating data & model folders.\")\n folders = ['models', 'data', 'logs']\n folder_paths = [os.path.join(path, f) for f in folders] \n for fp in folder_paths:\n make_dir(fp)\n \n #add car application and config files if they don't exist\n app_template_path = os.path.join(TEMPLATES_PATH, template+'.py')\n config_template_path = os.path.join(TEMPLATES_PATH, 'cfg_' + template + '.py')\n myconfig_template_path = os.path.join(TEMPLATES_PATH, 'myconfig.py')\n train_template_path = os.path.join(TEMPLATES_PATH, 'train.py')\n car_app_path = os.path.join(path, 'manage.py')\n car_config_path = os.path.join(path, 'config.py')\n mycar_config_path = os.path.join(path, 'myconfig.py')\n train_app_path = os.path.join(path, 'train.py')\n \n if os.path.exists(car_app_path) and not overwrite:\n print('Car app already exists. Delete it and rerun createcar to replace.')\n else:\n print(\"Copying car application template: {}\".format(template))\n shutil.copyfile(app_template_path, car_app_path)\n \n if os.path.exists(car_config_path) and not overwrite:\n print('Car config already exists. Delete it and rerun createcar to replace.')\n else:\n print(\"Copying car config defaults. Adjust these before starting your car.\")\n shutil.copyfile(config_template_path, car_config_path)\n \n if os.path.exists(train_app_path) and not overwrite:\n print('Train already exists. Delete it and rerun createcar to replace.')\n else:\n print(\"Copying train script. Adjust these before starting your car.\")\n shutil.copyfile(train_template_path, train_app_path)\n\n if not os.path.exists(mycar_config_path):\n print(\"Copying my car config overrides\")\n shutil.copyfile(myconfig_template_path, mycar_config_path)\n #now copy file contents from config to myconfig, with all lines commented out.\n cfg = open(car_config_path, \"rt\")\n mcfg = open(mycar_config_path, \"at\")\n copy = False\n for line in cfg:\n if \"import os\" in line:\n copy = True\n if copy: \n mcfg.write(\"# \" + line)\n cfg.close()\n mcfg.close()\n\n \n print(\"Donkey setup complete.\")\n\n\nclass UpdateCar(BaseCommand):\n '''\n always run in the base ~/mycar dir to get latest\n '''\n\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='update', usage='%(prog)s [options]')\n parsed_args = parser.parse_args(args)\n return parsed_args\n \n def run(self, args):\n cc = CreateCar()\n cc.create_car(path=\".\", overwrite=True)\n \n\nclass FindCar(BaseCommand):\n def parse_args(self, args):\n pass \n\n \n def run(self, args):\n print('Looking up your computer IP address...')\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\",80))\n ip = s.getsockname()[0] \n print('Your IP address: %s ' %s.getsockname()[0])\n s.close()\n \n print(\"Finding your car's IP address...\")\n cmd = \"sudo nmap -sP \" + ip + \"/24 | awk '/^Nmap/{ip=$NF}/B8:27:EB/{print ip}'\"\n print(\"Your car's ip address is:\" )\n os.system(cmd)\n \n \n \nclass CalibrateCar(BaseCommand): \n \n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='calibrate', usage='%(prog)s [options]')\n parser.add_argument('--channel', help=\"The channel you'd like to calibrate [0-15]\")\n parser.add_argument('--address', default='0x40', help=\"The i2c address you'd like to calibrate [default 0x40]\")\n parser.add_argument('--bus', default=None, help=\"The i2c bus you'd like to calibrate [default autodetect]\")\n parser.add_argument('--pwmFreq', default=60, help=\"The frequency to use for the PWM\")\n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def run(self, args):\n from donkeycar.parts.actuator import PCA9685\n from donkeycar.parts.sombrero import Sombrero\n\n s = Sombrero()\n\n args = self.parse_args(args)\n channel = int(args.channel)\n busnum = None\n if args.bus:\n busnum = int(args.bus)\n address = int(args.address, 16)\n print('init PCA9685 on channel %d address %s bus %s' %(channel, str(hex(address)), str(busnum)))\n freq = int(args.pwmFreq)\n print(\"Using PWM freq: {}\".format(freq))\n c = PCA9685(channel, address=address, busnum=busnum, frequency=freq)\n print()\n while True:\n try:\n val = input(\"\"\"Enter a PWM setting to test ('q' for quit) (0-1500): \"\"\")\n if val == 'q' or val == 'Q':\n break\n pmw = int(val)\n c.run(pmw)\n except KeyboardInterrupt:\n print(\"\\nKeyboardInterrupt received, exit.\")\n break\n except Exception as ex:\n print(\"Oops, {}\".format(ex))\n\n\nclass MakeMovieShell(BaseCommand):\n '''\n take the make movie args and then call make movie command\n with lazy imports\n '''\n def __init__(self):\n self.deg_to_rad = math.pi / 180.0\n\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='makemovie')\n parser.add_argument('--tub', help='The tub to make movie from')\n parser.add_argument('--out', default='tub_movie.mp4', help='The movie filename to create. default: tub_movie.mp4')\n parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')\n parser.add_argument('--model', default=None, help='the model to use to show control outputs')\n parser.add_argument('--type', default=None, help='the model type to load')\n parser.add_argument('--salient', action=\"store_true\", help='should we overlay salient map showing activations')\n parser.add_argument('--start', type=int, default=1, help='first frame to process')\n parser.add_argument('--end', type=int, default=-1, help='last frame to process')\n parser.add_argument('--scale', type=int, default=2, help='make image frame output larger by X mult')\n parsed_args = parser.parse_args(args)\n return parsed_args, parser\n\n def run(self, args):\n '''\n Load the images from a tub and create a movie from them.\n Movie\n '''\n args, parser = self.parse_args(args)\n\n from donkeycar.management.makemovie import MakeMovie\n\n mm = MakeMovie()\n mm.run(args, parser)\n\n\nclass TubCheck(BaseCommand):\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='tubcheck', usage='%(prog)s [options]')\n parser.add_argument('tubs', nargs='+', help='paths to tubs')\n parser.add_argument('--fix', action='store_true', help='remove problem records')\n parser.add_argument('--delete_empty', action='store_true', help='delete tub dir with no records')\n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def check(self, tub_paths, fix=False, delete_empty=False):\n '''\n Check for any problems. Looks at tubs and find problems in any records or images that won't open.\n If fix is True, then delete images and records that cause problems.\n '''\n tubs = [Tub(path) for path in tub_paths]\n\n for tub in tubs:\n tub.check(fix=fix)\n if delete_empty and tub.get_num_records() == 0:\n import shutil\n print(\"removing empty tub\", tub.path)\n shutil.rmtree(tub.path)\n\n def run(self, args):\n args = self.parse_args(args)\n self.check(args.tubs, args.fix, args.delete_empty)\n\n\nclass ShowHistogram(BaseCommand):\n\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]')\n parser.add_argument('--tub', nargs='+', help='paths to tubs')\n parser.add_argument('--record', default=None, help='name of record to create histogram')\n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def show_histogram(self, tub_paths, record_name):\n '''\n Produce a histogram of record type frequency in the given tub\n '''\n from matplotlib import pyplot as plt\n from donkeycar.parts.datastore import TubGroup\n\n tg = TubGroup(tub_paths=tub_paths)\n if record_name is not None:\n tg.df[record_name].hist(bins=50)\n else:\n tg.df.hist(bins=50)\n\n try:\n filename = os.path.basename(tub_paths) + '_hist_%s.png' % record_name.replace('/', '_')\n plt.savefig(filename)\n print('saving image to:', filename)\n except:\n pass\n plt.show()\n\n def run(self, args):\n args = self.parse_args(args)\n args.tub = ','.join(args.tub)\n self.show_histogram(args.tub, args.record)\n\n\nclass ConSync(BaseCommand):\n '''\n continuously rsync data\n '''\n \n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='consync', usage='%(prog)s [options]')\n parser.add_argument('--dir', default='./cont_data/', help='paths to tubs')\n parser.add_argument('--delete', default='y', help='remove files locally that were deleted remotely y=yes n=no')\n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def run(self, args):\n args = self.parse_args(args)\n cfg = load_config('config.py')\n dest_dir = args.dir\n del_arg = \"\"\n\n if args.delete == 'y':\n reply = input('WARNING:this rsync operation will delete data in the target dir: %s. ok to proceeed? [y/N]: ' % dest_dir)\n\n if reply != 'y' and reply != \"Y\":\n return\n del_arg = \"--delete\"\n\n if not dest_dir[-1] == '/' and not dest_dir[-1] == '\\\\':\n print(\"Desination dir should end with a /\")\n return\n\n try:\n os.mkdir(dest_dir)\n except:\n pass\n\n while True:\n command = \"rsync -aW --progress %s@%s:%s/data/ %s %s\" %\\\n (cfg.PI_USERNAME, cfg.PI_HOSTNAME, cfg.PI_DONKEY_ROOT, dest_dir, del_arg)\n\n os.system(command)\n time.sleep(5)\n\nclass ConTrain(BaseCommand):\n '''\n continuously train data\n '''\n \n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='contrain', usage='%(prog)s [options]')\n parser.add_argument('--tub', default='./cont_data/*', help='paths to tubs')\n parser.add_argument('--model', default='./models/drive.h5', help='path to model')\n parser.add_argument('--transfer', default=None, help='path to transfer model')\n parser.add_argument('--type', default='categorical', help='type of model (linear|categorical|rnn|imu|behavior|3d)')\n parser.add_argument('--aug', action=\"store_true\", help='perform image augmentation') \n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def run(self, args):\n args = self.parse_args(args)\n cfg = load_config('config.py')\n import sys\n sys.path.append('.')\n from train import multi_train\n continuous = True\n multi_train(cfg, args.tub, args.model, args.transfer, args.type, continuous, args.aug)\n\n\nclass ShowCnnActivations(BaseCommand):\n\n def __init__(self):\n import matplotlib.pyplot as plt\n self.plt = plt\n\n def get_activations(self, image_path, model_path, cfg):\n '''\n Extracts features from an image\n\n returns activations/features\n '''\n from tensorflow.python.keras.models import load_model, Model\n\n model_path = os.path.expanduser(model_path)\n image_path = os.path.expanduser(image_path)\n\n model = load_model(model_path)\n image = load_scaled_image_arr(image_path, cfg)[None, ...]\n\n conv_layer_names = self.get_conv_layers(model)\n input_layer = model.get_layer(name='img_in').input\n activations = [] \n for conv_layer_name in conv_layer_names:\n output_layer = model.get_layer(name=conv_layer_name).output\n\n layer_model = Model(inputs=[input_layer], outputs=[output_layer])\n activations.append(layer_model.predict(image)[0])\n return activations\n\n def create_figure(self, activations):\n import math\n cols = 6\n\n for i, layer in enumerate(activations):\n fig = self.plt.figure()\n fig.suptitle('Layer {}'.format(i+1))\n\n print('layer {} shape: {}'.format(i+1, layer.shape))\n feature_maps = layer.shape[2]\n rows = math.ceil(feature_maps / cols)\n\n for j in range(feature_maps):\n self.plt.subplot(rows, cols, j + 1)\n\n self.plt.imshow(layer[:, :, j])\n \n self.plt.show()\n\n def get_conv_layers(self, model):\n conv_layers = []\n for layer in model.layers:\n if layer.__class__.__name__ == 'Conv2D':\n conv_layers.append(layer.name)\n return conv_layers\n\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='cnnactivations', usage='%(prog)s [options]')\n parser.add_argument('--image', help='path to image')\n parser.add_argument('--model', default=None, help='path to model')\n parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')\n \n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def run(self, args):\n args = self.parse_args(args)\n cfg = load_config(args.config)\n activations = self.get_activations(args.image, args.model, cfg)\n self.create_figure(activations)\n\n\nclass ShowPredictionPlots(BaseCommand):\n\n def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type):\n '''\n Plot model predictions for angle and throttle against data from tubs.\n\n '''\n import matplotlib.pyplot as plt\n import pandas as pd\n\n model_path = os.path.expanduser(model_path)\n model = dk.utils.get_model_by_type(model_type, cfg)\n # This just gets us the text for the plot title:\n if model_type is None:\n model_type = cfg.DEFAULT_MODEL_TYPE\n model.load(model_path)\n\n records = gather_records(cfg, tub_paths)\n user_angles = []\n user_throttles = []\n pilot_angles = []\n pilot_throttles = [] \n\n records = records[:limit]\n num_records = len(records)\n print('processing %d records:' % num_records)\n\n for record_path in records:\n with open(record_path, 'r') as fp:\n record = json.load(fp)\n img_filename = os.path.join(tub_paths, record['cam/image_array'])\n img = load_scaled_image_arr(img_filename, cfg)\n user_angle = float(record[\"user/angle\"])\n user_throttle = float(record[\"user/throttle\"])\n pilot_angle, pilot_throttle = model.run(img)\n\n user_angles.append(user_angle)\n user_throttles.append(user_throttle)\n pilot_angles.append(pilot_angle)\n pilot_throttles.append(pilot_throttle)\n\n angles_df = pd.DataFrame({'user_angle': user_angles, 'pilot_angle': pilot_angles})\n throttles_df = pd.DataFrame({'user_throttle': user_throttles, 'pilot_throttle': pilot_throttles})\n\n fig = plt.figure()\n\n title = \"Model Predictions\\nTubs: \" + tub_paths + \"\\nModel: \" + model_path + \"\\nType: \" + model_type\n fig.suptitle(title)\n\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n angles_df.plot(ax=ax1)\n throttles_df.plot(ax=ax2)\n\n ax1.legend(loc=4)\n ax2.legend(loc=4)\n\n plt.savefig(model_path + '_pred.png')\n plt.show()\n\n def parse_args(self, args):\n parser = argparse.ArgumentParser(prog='tubplot', usage='%(prog)s [options]')\n parser.add_argument('--tub', nargs='+', help='paths to tubs')\n parser.add_argument('--model', default=None, help='name of record to create histogram')\n parser.add_argument('--limit', default=1000, help='how many records to process')\n parser.add_argument('--type', default=None, help='model type')\n parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')\n parsed_args = parser.parse_args(args)\n return parsed_args\n\n def run(self, args):\n args = self.parse_args(args)\n args.tub = ','.join(args.tub)\n cfg = load_config(args.config)\n self.plot_predictions(cfg, args.tub, args.model, args.limit, args.type)\n \n\ndef execute_from_command_line():\n \"\"\"\n This is the function linked to the \"donkey\" terminal command.\n \"\"\"\n commands = {\n 'createcar': CreateCar,\n 'findcar': FindCar,\n 'calibrate': CalibrateCar,\n 'tubclean': TubManager,\n 'tubhist': ShowHistogram,\n 'tubplot': ShowPredictionPlots,\n 'tubcheck': TubCheck,\n 'makemovie': MakeMovieShell, \n 'createjs': CreateJoystick,\n 'consync': ConSync,\n 'contrain': ConTrain,\n 'cnnactivations': ShowCnnActivations,\n 'update': UpdateCar,\n }\n \n args = sys.argv[:]\n\n if len(args) > 1 and args[1] in commands.keys():\n command = commands[args[1]]\n c = command()\n c.run(args[2:])\n else:\n dk.utils.eprint('Usage: The available commands are:')\n dk.utils.eprint(list(commands.keys()))\n \n \nif __name__ == \"__main__\":\n execute_from_command_line()\n \n"
] |
[
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"tensorflow.python.keras.models.load_model",
"tensorflow.python.keras.models.Model",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
Sun-Joong/aifh
|
[
"1b6363d26f54b77348020ce88ced0670568ed736"
] |
[
"vol3/vol3-python-examples/examples/example_iris.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\n Artificial Intelligence for Humans\n Volume 3: Deep Learning and Neural Networks\n Python Version\n http://www.aifh.org\n http://www.jeffheaton.com\n Code repository:\n https://github.com/jeffheaton/aifh\n Copyright 2015 by Jeff Heaton\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n For more information on Heaton Research copyrights, licenses\n and trademarks visit:\n http://www.heatonresearch.com/copyright\n\"\"\"\n\nimport os\nimport sys\n\n# Find the AIFH core files\naifh_dir = os.path.dirname(os.path.abspath(__file__))\ndata_dir = aifh_dir\naifh_dir = os.path.abspath(aifh_dir + os.sep + \"..\" + os.sep + \"lib\" + os.sep + \"aifh\")\ndata_dir = os.path.abspath(data_dir + os.sep + \"..\" + os.sep + \"datasets\")\nsys.path.append(aifh_dir)\n\nfrom util import *\nimport pandas as pd\nimport types\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nimport sklearn\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import OneHotEncoder\nfrom normalize import *\n\nfilename = os.path.join(data_dir, \"iris.csv\")\ndf = pd.read_csv(filename, na_values=['NA', '?'])\n\n# Encode feature vector\nencode_numeric_zscore(df, 'petal_w')\nencode_numeric_zscore(df, 'petal_l')\nencode_numeric_zscore(df, 'sepal_w')\nencode_numeric_zscore(df, 'sepal_l')\nspecies = encode_text_index(df, \"species\")\nnum_classes = len(species)\n\n# Create x & y for training\n\n# Create the x-side (feature vectors) of the training\nx, y = to_xy(df, 'species')\n\n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.25, random_state=45)\n\nnet = Sequential()\nnet.add(Dense(20, input_shape=(x.shape[1],)))\nnet.add(Activation('relu'))\nnet.add(Dense(y.shape[1]))\nnet.add(Activation('softmax'))\nnet.compile(loss='categorical_crossentropy', optimizer='adam')\n\n# Train the network\nnet.fit(x_train,y_train,epochs=300)\n\n# Predict the validation set\npred_y = net.predict(x_test)\npred_y = np.argmax(pred_y,axis=1)\ny_test2 = np.argmax(y_test,axis=1)\nprint(pred_y)\nprint(y_test2)\n\n\n# Display predictions and count the number of incorrect predictions.\n\ncount = 0\ncorrect = 0\nfor element in zip(x_test,y_test2,pred_y):\n print(element)\n print(\"Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}\".format(\n element[0][0],element[0][1],element[0][2],element[0][3],\n species[element[1]],\n species[element[2]]))\n if element[1] == element[2]:\n correct += 1\n count += 1\n\nprint(\"Correct: {}/{} ({}%)\".format(correct,count,(correct/count)*100))\n\n"
] |
[
[
"pandas.read_csv",
"numpy.argmax",
"sklearn.model_selection.train_test_split"
]
] |
neelabh17/MAVI-Face
|
[
"5dbf105b51a8b90203cd144f2fe671770d38eb81"
] |
[
"vimak.py"
] |
[
"\n # 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=1e-03_shuffle=False',\n # 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=None_shuffle=False',\nfrom torch.utils.tensorboard import SummaryWriter\nfrom toolbox.pickleOpers import loadup\nls=['5XOhemWithShuffleNoScheduler_epoch_36',\n 'new_1xOhem_shuffle_true_scheduler_e2_epoch_22',\n 'new_5xOhem_shuffle_true_scheduler_e2_epoch_10',\n # 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=1e-03_shuffle=False_epoch_40',\n # 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=None_shuffle=False_epoch_22',\n '1XOhemWithShuffleNoScheduler_lr-beg=1.3e-04_lr-sch=None_shuffle=True_epoch_40',\n 'newOhemTrainSingleSampling32_lr-beg=1.3e-03_lr-sch=None_shuffle=True_epoch_1',\n 'newOhemTrainSingleSampling32_lr-beg=1.3e-04_lr-sch=None_shuffle=True_epoch_4']\n # 'SingleSamplingOhemAdamLRe3_epoch_32',\n # 'Resnet50_Final']\n# ls=['5XOhemWithShuffleNoScheduler',\n# 'new_1xOhem_shuffle_true_scheduler_e2',\n# 'new_5xOhem_shuffle_true_scheduler_e2',\n# '1XOhemWithShuffleNoScheduler_lr-beg=1.3e-04_lr-sch=None_shuffle=True',\n# 'newOhemTrainSingleSampling32_lr-beg=1.3e-03_lr-sch=None_shuffle=True',\n# 'newOhemTrainSingleSampling32_lr-beg=1.3e-04_lr-sch=None_shuffle=True','Renet50_Final','SingleSamplingOhemAdamLRe3_epoch_32']\nwriter=SummaryWriter(\"prData7\")\nfor l in ls:\n a=loadup(\"evalData/{}_inferConf=0.7/prData/prCurve_val_epoch_{}.pickle\".format(l.split(\"_epoch_\")[0],l.split(\"_epoch_\")[1]))\n \n for i in range(len(a)):\n writer.add_scalars(\"curve/\",{l.split(\"_epoch_\")[0]:a[i][0]},a[i][1]*1000)\n\nls=['SingleSamplingOhemAdamLRe3_epoch_32',\n 'Resnet50_Final']\nfor l in ls:\n a=loadup(\"evalData/{}_inferConf=0.7/prData/prCurve_val.pickle\".format(l))\n \n for i in range(len(a)):\n writer.add_scalars(\"curve/\",{l:a[i][0]},a[i][1]*1000)\n\n\nwriter.close()\n\n# # # ls=[ 'newOhemTrainSingleSampling32_lr-beg=1.3e-03_lr-sch=None_shuffle=True_epoch_1', 'newOhemTrainSingleSampling32_lr-beg=1.3e-04_lr-sch=None_shuffle=True_epoch_4']\n# ls=[ 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=1e-03_shuffle=False_epoch_40', 'newOneToOneOhem_lr-beg=1.3e-03_lr-sch=None_shuffle=False_epoch_22']\n# import os\n\n# # os.system(f'python evaluate.py --trained_model \"SingleSamplingOhemAdamLRe3_epoch_32\" --confidence_threshold_infer 0.7')\n# # os.system(f'python evaluate.py --trained_model \"Resnet50_Final\" --confidence_threshold_infer 0.7')\n# for l in ls:\n# print(l)\n# os.system(f'python evaluate.py --trained_model \"{l.split(\"_epoch_\")[0]}\" --mode \"series\" --confidence_threshold_infer 0.7')\n\n\n\n# for l in ls:\n# print(l)\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.055 --mode \"images\"')\n# os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.7 --mode \"images\"')\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.43 --mode \"images\"')\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.055 --mode \"videos\" --fps 1')\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.055 --mode \"videos\" --fps 5')\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.43 --mode \"videos\" --fps 5')\n# os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.7 --mode \"videos\" --fps 5')\n# os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.7 --mode \"videos\" --fps 1')\n# # os.system(f'python detect.py --trained_model \"{l}\" --save_name \"{l}\" --vis_thres 0.43 --mode \"videos\" --fps 1')\n"
] |
[
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
hasselg/pysubtracking
|
[
"e3666d89c98562361ed0521c366fb0a299f2fdae"
] |
[
"subtracking/grouse.py"
] |
[
"# Copyright 2015 Gregory Hasseler\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .tracker import Tracker\n\n\nclass Grouse(Tracker):\n \"\"\"\n This class is an implementation of the Grassmannian Rank-One Update\n Subspace Estimation (GROUSE) subspace tracking algorithm, presented by\n Balzano et al. in http://arxiv.org/abs/1006.4046\n\n This class assumes the use of Numpy vectors.\n \"\"\"\n\n def __init__(self, ambient_dim, rank, step):\n \"\"\"\n Keyword arguments:\n ambient_dim -- ambient dimension of the observations\n rank -- estimate of the rank\n step -- step size\n \"\"\"\n self.ambient_dim = ambient_dim\n self.rank = rank\n self.step = step\n\n # Initialize our subspace estimate to a random orthogonal matrix\n self.U = np.linalg.qr(np.random.rand(ambient_dim, rank))[0] \n\n def consume(self, ob_vec, sample_vec, step=None):\n if step is None:\n step = self.step\n\n # Find our projection into the subspace\n projection = self._project(ob_vec, sample_vec)\n\n # Find the imputed measurement residual\n row_indices = np.nonzero(sample_vec)[0]\n imputed_measurement = self.U @ projection\n residual = np.zeros((self.ambient_dim, 1))\n residual[row_indices, :] = ob_vec[row_indices, :] - imputed_measurement[row_indices, :]\n\n sigma = np.linalg.norm(residual) * np.linalg.norm(imputed_measurement)\n\n normalized_imputed_measurement = np.nan_to_num(imputed_measurement / np.linalg.norm(imputed_measurement))\n normalized_residual = np.nan_to_num(residual / np.linalg.norm(residual))\n normalized_projection = np.nan_to_num(projection.T / np.linalg.norm(projection))\n\n lhs_inner_update = (np.cos(sigma * step) - 1) * normalized_imputed_measurement\n rhs_inner_update = np.sin(sigma * step) * normalized_residual\n rhs = (lhs_inner_update + rhs_inner_update) @ normalized_projection\n\n self.U = self.U + rhs\n"
] |
[
[
"numpy.nonzero",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.random.rand",
"numpy.zeros"
]
] |
volpatto/pysodes
|
[
"48add3ce16ee48e2f3af7a928935f9b22d74d908"
] |
[
"tests/test_odeint_integrate_const.py"
] |
[
"import pytest\nfrom pytest import approx\nimport numpy as np\nfrom scipy.integrate import solve_ivp\n\nfrom pysodes.odeint import integrate_const\n\n\ndef lotka_volterra(z, dzdt, t):\n x, y = z\n\n a = 1.5\n b = 1.0\n c = 3.0\n d = 1.0\n\n dzdt[0] = a*x - b*x*y\n dzdt[1] = -c*y + d*x*y\n\n return dzdt\n\n\ndef lotka_volterra_scipy(t, z):\n x, y = z\n\n a = 1.5\n b = 1.0\n c = 3.0\n d = 1.0\n\n dxdt = [\n a*x - b*x*y,\n -c*y + d*x*y\n ]\n return dxdt\n\n\ndef lorenz(x, dxdt, t):\n sigma = 10.0\n R = 28.0\n b = 8.0 / 3.0\n\n dxdt[0] = sigma * (x[1] - x[0])\n dxdt[1] = R * x[0] - x[1] - x[0] * x[2]\n dxdt[2] = -b * x[2] + x[0] * x[1]\n\n return dxdt\n\n\ndef lorenz_scipy(t, y):\n x = y\n\n sigma = 10.0\n R = 28.0\n b = 8.0 / 3.0\n\n dxdt_0 = sigma * (x[1] - x[0])\n dxdt_1 = R * x[0] - x[1] - x[0] * x[2]\n dxdt_2 = -b * x[2] + x[0] * x[1]\n\n return dxdt_0, dxdt_1, dxdt_2\n\n\n@pytest.mark.parametrize(\"odeint_method, rtol\", [\n [\"runge_kutta4\", 4e-3],\n [\"runge_kutta_cash_karp54\", 4e-3],\n [\"runge_kutta_fehlberg78\", 4e-3],\n # This relative error is expected, since midpoint is\n # a poor quality method and the problem is stiff\n [\"modified_midpoint\", 2e-2],\n])\ndef test_compare_regular_steppers_with_scipy_lv(odeint_method, rtol):\n t_span = (0., 10.)\n dt = 0.01\n num_of_steps = int((t_span[1] - t_span[0]) / dt) + 1\n t_eval_scipy = np.linspace(t_span[0], t_span[1], num_of_steps)\n y0 = np.array([10., 5.])\n\n _, solution_odeint = integrate_const(lotka_volterra, t_span, dt, y0, method=odeint_method)\n\n result = solve_ivp(lotka_volterra_scipy, t_span, y0, t_eval=t_eval_scipy, method='Radau')\n solution_scipy = result.y.T\n\n assert solution_odeint == approx(solution_scipy, rel=rtol)\n\n\n@pytest.mark.parametrize(\"odeint_method, rtol\", [\n [\"runge_kutta4\", 7e-3],\n [\"runge_kutta_cash_karp54\", 7e-3],\n [\"runge_kutta_fehlberg78\", 7e-3],\n # This relative error is expected, since midpoint is\n # a poor quality method and the problem is stiff\n [\"modified_midpoint\", 7e-2],\n])\ndef test_compare_regular_steppers_with_scipy_lorenz(odeint_method, rtol):\n t_span = (0., 10.)\n dt = 0.01\n num_of_steps = int((t_span[1] - t_span[0]) / dt) + 1\n t_eval_scipy = np.linspace(t_span[0], t_span[1], num_of_steps)\n y0 = np.array([0., 1., 0.1])\n\n _, solution_odeint = integrate_const(lorenz, t_span, dt, y0, method=odeint_method)\n\n result = solve_ivp(lorenz_scipy, t_span, y0, t_eval=t_eval_scipy, method='Radau')\n solution_scipy = result.y.T\n\n assert solution_odeint == approx(solution_scipy, rel=rtol)\n"
] |
[
[
"scipy.integrate.solve_ivp",
"numpy.array",
"numpy.linspace"
]
] |
kw-0/MyGrad
|
[
"307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141"
] |
[
"src/mygrad/indexing_routines/ops.py"
] |
[
"import numpy as np\n\nfrom mygrad.operation_base import Operation\n\n\nclass Where(Operation):\n def __call__(self, a, b, *, condition):\n self.variables = (a, b)\n self.condition = np.asarray(condition, dtype=bool)\n return np.where(condition, a.data, b.data)\n\n def backward_var(self, grad, index, **kwargs):\n condition = self.condition if index == 0 else ~self.condition\n return np.where(condition, grad, 0)\n"
] |
[
[
"numpy.asarray",
"numpy.where"
]
] |
tripti-agarwal/MapperInteractive
|
[
"466b6b133f279cdd67b19e4c9dee6632f0d82b0b"
] |
[
"app/views.py"
] |
[
"from flask import render_template,request, url_for, jsonify, redirect, Response, send_from_directory\nfrom app import app\nfrom app import APP_STATIC\nfrom app import APP_ROOT\nimport json\nfrom mogutda import SimplicialComplex\nimport numpy as np\nimport pandas as pd\nimport os\nimport re\n# from kmapper import KeplerMapper, Cover\nfrom .kmapper import KeplerMapper, Cover\nfrom sklearn import cluster\nimport networkx as nx\nimport sklearn\n# from sklearn.linear_model import LinearRegression\ntry:\n import statsmodels.api as sm\nexcept:\n print('No statsmodel found')\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.spatial import distance\nfrom sklearn.cluster import KMeans\nfrom collections import defaultdict\nimport importlib\n\n\n@app.route('/')\n@app.route('/MapperInteractive_new')\ndef index():\n return render_template('index.html')\n\n@app.route('/data_process', methods=['POST','GET'])\ndef process_text_data():\n '''\n Check for:\n 1. Missing value\n 2. Non-numerical elements in numerical cols\n 3. If cols are non-numerical, check if cols are categorical\n '''\n text_data = request.get_data().decode('utf-8').splitlines()\n cols = text_data[0].split(',')\n mat = [n.split(',') for n in text_data] # csv: if an element is empty, it will be \"\".\n newdf1 = np.array(mat)[1:]\n rows2delete = np.array([])\n cols2delete = []\n \n # ### Delete missing values ###\n for i in range(len(cols)):\n col = newdf1[:,i]\n if np.sum(col == \"\") >= 0.2*len(newdf1): # if more than 20% elements in this column are empty, delete the whole column\n cols2delete.append(i)\n else:\n rows2delete = np.concatenate((rows2delete, np.where(col==\"\")[0]))\n rows2delete = np.unique(rows2delete).astype(\"int\")\n newdf2 = np.delete(np.delete(newdf1, cols2delete, axis=1), rows2delete, axis=0)\n cols = [cols[i] for i in range(len(cols)) if i not in cols2delete]\n\n ### check if numerical cols ###\n cols_numerical_idx = []\n cols_categorical_idx = []\n cols_others_idx = []\n rows2delete = np.array([])\n r1 = re.compile(r'^-?\\d+(?:\\.\\d+)?$')\n r2 = re.compile(r'[+\\-]?[^A-Za-z]?(?:0|[1-9]\\d*)(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)') # scientific notation\n vmatch = np.vectorize(lambda x:bool(r1.match(x) or r2.match(x)))\n for i in range(len(cols)):\n col = newdf2[:,i]\n col_match = vmatch(col)\n if np.sum(col_match) >= 0.8*len(newdf1): # if more than 90% elements can be converted to float, keep the col, and delete rows that cannot be convert to float:\n cols_numerical_idx.append(i)\n rows2delete = np.concatenate((rows2delete, np.where(col_match==False)[0]))\n else: \n ### check if categorical cols### \n if len(np.unique(col)) <= 200: # if less than 10 different values: categorical\n cols_categorical_idx.append(i)\n else:\n cols_others_idx.append(i)\n newdf3 = newdf2[:, cols_numerical_idx+cols_categorical_idx+cols_others_idx]\n rows2delete = rows2delete.astype(int)\n newdf3 = np.delete(newdf3, rows2delete, axis=0)\n newdf3_cols = [cols[idx] for idx in cols_numerical_idx+cols_categorical_idx+cols_others_idx]\n newdf3 = pd.DataFrame(newdf3)\n newdf3.columns = newdf3_cols\n # write the data frame\n newdf3.to_csv(APP_STATIC+\"/uploads/processed_data.csv\", index=False) \n # write the cols info\n cols_numerical = [cols[idx] for idx in cols_numerical_idx]\n cols_categorical = [cols[idx] for idx in cols_categorical_idx]\n cols_others = [cols[idx] for idx in cols_others_idx]\n cols_dict = {'cols_numerical':cols_numerical, 'cols_categorical':cols_categorical, 'cols_others':cols_others}\n with open(APP_STATIC+\"/uploads/cols_info.json\", 'w') as f:\n f.write(json.dumps(cols_dict, indent=4))\n return jsonify(columns=cols_numerical, categorical_columns=cols_categorical, other_columns=cols_others)\n\n# @app.route('/data_process', methods=['POST','GET'])\n# def load_data():\n# filename = request.get_data().decode('utf-8').splitlines()[0]\n# print(filename)\n# df = pd.read_csv(APP_STATIC+\"/uploads/\"+filename)\n# cols = list(df.columns)\n# df_0 = df.iloc[0,:]\n# cols_numerical_idx = []\n# cols_categorical_idx = []\n# cols_others_idx = []\n# rows2delete = np.array([])\n# for i in range(len(cols)):\n# c = df_0.iloc[i]\n# try:\n# float(c)\n# cols_numerical_idx.append(i)\n# except ValueError:\n# cols_categorical_idx.append(i)\n# # if isinstance(c,int) or isinstance(c,float):\n# # cols_numerical_idx.append(i)\n# # else:\n# # cols_categorical_idx.append(i)\n# df.to_csv(APP_STATIC+\"/uploads/processed_data.csv\", index=False) \n# cols_numerical = [cols[idx] for idx in cols_numerical_idx]\n# cols_categorical = [cols[idx] for idx in cols_categorical_idx]\n# cols_others = [cols[idx] for idx in cols_others_idx]\n# cols_dict = {'cols_numerical':cols_numerical, 'cols_categorical':cols_categorical, 'cols_others':cols_others}\n# print(cols_dict)\n# with open(APP_STATIC+\"/uploads/cols_info.json\", 'w') as f:\n# f.write(json.dumps(cols_dict, indent=4))\n# return jsonify(columns=cols_numerical, categorical_columns=cols_categorical, other_columns=cols_others)\n\n@app.route('/mapper_data_process', methods=['POST','GET'])\ndef load_mapper_data():\n filename = request.get_data().decode('utf-8').splitlines()[0]\n with open(APP_STATIC+\"/uploads/\"+filename) as f:\n mapper_graph = json.load(f)\n mapper_graph[\"links\"] = mapper_graph[\"edges\"]\n del mapper_graph[\"edges\"]\n mapper_graph_new = _parse_result(mapper_graph)\n connected_components = compute_cc(mapper_graph_new)\n return jsonify(mapper=mapper_graph_new, connected_components=connected_components)\n\n@app.route('/mapper_loader', methods=['POST','GET'])\ndef get_graph():\n mapper_data = request.form.get('data')\n mapper_data = json.loads(mapper_data)\n selected_cols = mapper_data['cols']\n all_cols = mapper_data['all_cols'] # all numerical cols\n categorical_cols = mapper_data['categorical_cols']\n data = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n data_categorical = data[categorical_cols]\n data = data[all_cols]\n\n # data = data[selected_cols].astype(\"float\")\n config = mapper_data[\"config\"]\n norm_type = config[\"norm_type\"]\n clustering_alg = config[\"clustering_alg\"]\n clustering_alg_params = config[\"clustering_alg_params\"]\n # eps = config[\"eps\"]\n # min_samples = config[\"min_samples\"]\n\n #### TODO: update filter_parameters ####\n filter_parameters = config\n\n # filter functions\n filter_function = config[\"filter\"]\n if len(filter_function) == 1:\n interval = int(config[\"interval1\"])\n overlap = float(config[\"overlap1\"]) / 100\n elif len(filter_function) == 2:\n interval = [int(config[\"interval1\"]), int(config[\"interval2\"])]\n overlap = [float(config[\"overlap1\"])/100, float(config[\"overlap2\"])/100]\n #print(interval, overlap)\n # TODO: fix normalization (only point cloud column needs to be modified?)\n # normalization\n if norm_type == \"none\":\n pass\n elif norm_type == \"0-1\": # axis=0, min-max norm for each column\n scaler = MinMaxScaler()\n data = scaler.fit_transform(data)\n else:\n data = sklearn.preprocessing.normalize(data, norm=norm_type, axis=0, copy=False, return_norm=False)\n data = pd.DataFrame(data, columns = all_cols)\n mapper_result = run_mapper(data, selected_cols, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters)\n if len(categorical_cols) > 0:\n for node in mapper_result['nodes']:\n #print(\"node\", node['id'])\n vertices = node['vertices']\n data_categorical_i = data_categorical.iloc[vertices]\n node['categorical_cols_summary'] = {}\n for col in categorical_cols:\n node['categorical_cols_summary'][col] = data_categorical_i[col].value_counts().to_dict()\n connected_components = compute_cc(mapper_result)\n return jsonify(mapper=mapper_result, connected_components=connected_components)\n\n@app.route('/multiscale_mapper_loader', methods=['POST','GET'])\ndef get_multiscale_graph():\n mapper_data = request.form.get('data')\n mapper_data = json.loads(mapper_data)\n selected_cols = mapper_data['cols']\n all_cols = mapper_data['all_cols'] # all numerical cols\n categorical_cols = mapper_data['categorical_cols']\n data = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n data_categorical = data[categorical_cols]\n data = data[all_cols]\n\n # data = data[selected_cols].astype(\"float\")\n config = mapper_data[\"config\"]\n intervals = config['intervals']\n norm_type = config[\"norm_type\"]\n clustering_alg = config[\"clustering_alg\"]\n clustering_alg_params = config[\"clustering_alg_params\"]\n # eps = config[\"eps\"]\n # min_samples = config[\"min_samples\"]\n\n data_idx2cluster = defaultdict(list)\n res = []\n for interval in intervals:\n #### TODO: update filter_parameters ####\n filter_parameters = config\n\n # filter functions\n filter_function = config[\"filter\"]\n if len(filter_function) == 1:\n overlap = float(config[\"overlap1\"]) / 100\n elif len(filter_function) == 2:\n interval = [interval, interval]\n overlap = [float(config[\"overlap1\"])/100, float(config[\"overlap2\"])/100]\n #print('interval,overlap', interval, overlap)\n # TODO: fix normalization (only point cloud column needs to be modified?)\n # normalization\n if norm_type == \"none\":\n pass\n elif norm_type == \"0-1\": # axis=0, min-max norm for each column\n scaler = MinMaxScaler()\n data = scaler.fit_transform(data)\n else:\n data = sklearn.preprocessing.normalize(data, norm=norm_type, axis=0, copy=False, return_norm=False)\n data = pd.DataFrame(data, columns = all_cols)\n mapper_result = run_mapper(data, selected_cols, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters)\n if len(categorical_cols) > 0:\n for node in mapper_result['nodes']:\n #print(\"node\", node['id'])\n vertices = node['vertices']\n data_categorical_i = data_categorical.iloc[vertices]\n node['categorical_cols_summary'] = {}\n for col in categorical_cols:\n node['categorical_cols_summary'][col] = data_categorical_i[col].value_counts().to_dict()\n \n # establish links for multiscale mapper\n for node in mapper_result['nodes']:\n multiscale_key = f'm{interval}_{node[\"id\"]}'\n for v in node['vertices']:\n data_idx2cluster[v].append(multiscale_key)\n\n print(mapper_result[\"betti\"])\n #print('data_idx2cluster', data_idx2cluster)\n connected_components = compute_cc(mapper_result)\n\n res.append({\n 'mapper': mapper_result,\n 'connected_components': connected_components\n })\n \n # figure out links between mapper at different scales\n links = defaultdict(set)\n for related_clusters in data_idx2cluster.values():\n for c0 in related_clusters:\n for c1 in related_clusters:\n m0 = c0.split('_')[0]\n m1 = c1.split('_')[0]\n if m0 != m1:\n links[c0].add(c1)\n links[c1].add(c0)\n for k in links.keys():\n links[k] = list(links[k])\n\n res = {'mappers': res, 'links': links}\n #print('links', links)\n\n return jsonify(res)\n\n@app.route('/linear_regression', methods=['POST','GET'])\ndef linear_regression():\n json_data = json.loads(request.form.get('data'))\n selected_nodes = json_data['nodes']\n y_name = json_data['dep_var']\n X_names = json_data['indep_vars']\n #print(y_name, X_names)\n with open(APP_STATIC+\"/uploads/nodes_detail.json\") as f:\n nodes_detail = json.load(f)\n data = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n if len(selected_nodes) > 0:\n selected_rows = []\n for node in selected_nodes:\n selected_rows += nodes_detail[node]\n selected_rows = list(set(selected_rows))\n data = data.iloc[selected_rows, :]\n data.index = range(len(data))\n y = data.loc[:,y_name]\n X = data.loc[:,X_names]\n X2 = sm.add_constant(X)\n reg = sm.OLS(y, X2)\n #print(y,X2)\n result = reg.fit()\n conf_int = np.array(result.conf_int())\n conf_int_new = []\n for i in range(conf_int.shape[0]):\n conf_int_new.append(list(conf_int[i,:]))\n #print(result.summary())\n return jsonify(params=list(result.params), pvalues=list(result.pvalues), conf_int=conf_int_new, stderr=list(result.bse))\n\n@app.route('/pca', methods=['POST','GET'])\ndef pca():\n '''\n Dimension reduction using PCA\n n_components = 2\n '''\n selected_nodes = json.loads(request.form.get('data'))['nodes']\n #print(selected_nodes)\n data = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n with open(APP_STATIC+\"/uploads/cols_info.json\") as f:\n cols_dict = json.load(f)\n cols = cols_dict['cols_numerical']\n #print(cols)\n with open(APP_STATIC+\"/uploads/nodes_detail.json\") as f:\n nodes_detail = json.load(f)\n if len(selected_nodes) > 0:\n selected_rows = []\n for node in selected_nodes:\n selected_rows += nodes_detail[node]\n selected_rows = list(set(selected_rows))\n data = data.iloc[selected_rows, :]\n data.index = range(len(data))\n pca = PCA(n_components=2)\n data_new = pca.fit_transform(data.loc[:,cols])\n data_new = pd.DataFrame(data_new)\n data_new.columns = ['pc1', 'pc2']\n #print(data.shape)\n #print(data_new)\n # clustering\n if len(selected_nodes)>0:\n data_new['kmeans_cluster'] = KMeans(n_clusters=min(len(selected_nodes), 6), random_state=0).fit(data_new).labels_\n else:\n # data_new['kmeans_cluster'] = KMeans(n_clusters=10, random_state=0).fit(data_new).labels_\n data_new['kmeans_cluster'] = KMeans(n_clusters=6, random_state=0).fit(data_new).labels_\n data_new = data_new.to_json(orient='records')\n return jsonify(pca=data_new)\n\n@app.route('/update_cluster_details', methods=['POST','GET'])\ndef update_cluster_details():\n label_column = request.get_data().decode('utf-8')\n df = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\") \n with open(APP_STATIC+\"/uploads/cols_info.json\") as f:\n cols_dict = json.load(f)\n labels = df[label_column]\n if label_column in cols_dict['cols_numerical']:\n labels = np.round(labels,2)\n labels = list(labels)\n return jsonify(labels=labels)\n\ndef run_mapper(data_array, col_names, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters=None):\n \"\"\"This function is called when the form is submitted. It triggers construction of Mapper. \n\n Each parameter of this function is defined in the configuration.\n\n To customize the Mapper construction, you can inherit from :code:`KeplerMapperConfig` and customize this function.\n\n\n Parameters\n -------------\n\n interval: int\n Number of intervals \n\n overlap: float\n Percentage of overlap. This value will be divided by 100 to produce proporition.\n \n dbscan_eps: float\n :code:`eps` parameter for the DBSCAN clustering used in Kepler Mapper construction.\n \n dbscan_min_samples: int\n :code:`min_samples` parameter for the DBSCAN clustering used in Kepler Mapper construction.\n\n filter_function: str\n Projection for constructing the lens for Kepler Mapper.\n\n \"\"\"\n # data_array = np.array(data_array)\n\n km_result = _call_kmapper(data_array, col_names, \n interval,\n overlap,\n clustering_alg,\n clustering_alg_params,\n filter_function,\n filter_parameters\n )\n return _parse_result(km_result, data_array)\n\ndef _call_kmapper(data, col_names, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters=None):\n #print(filter_parameters)\n mapper = KeplerMapper()\n if len(col_names) == 1:\n data_new = np.array(data[col_names[0]]).reshape(-1,1)\n else:\n data_new = np.array(data[col_names])\n\n if len(filter_function) == 1:\n f = filter_function[0]\n if f in data.columns:\n lens = data[f]\n else:\n lens = compute_lens(f, data_new, mapper, filter_parameters)\n \n elif len(filter_function) == 2:\n lens = []\n for f in filter_function:\n if f in data.columns:\n lens_f = np.array(data[f]).reshape(-1,1)\n else:\n lens_f = compute_lens(f, data_new, mapper, filter_parameters)\n lens.append(lens_f)\n lens = np.concatenate((lens[0], lens[1]), axis=1)\n # clusterer = sklearn.cluster.DBSCAN(eps=eps, min_samples=min_samples, metric='euclidean', n_jobs=8)\n #print(data_new.shape)\n #print(np.max(np.max(data_new)))\n #print(np.mean(np.mean(data_new)))\n if clustering_alg == \"DBSCAN\":\n graph = mapper.map_parallel(lens, data_new, clusterer=cluster.DBSCAN(eps=float(clustering_alg_params[\"eps\"]), min_samples=float(clustering_alg_params[\"min_samples\"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n elif clustering_alg == \"Agglomerative Clustering\":\n graph = mapper.map_parallel(lens, data_new, clusterer=cluster.AgglomerativeClustering(n_clusters=None, linkage=clustering_alg_params[\"linkage\"], distance_threshold=float(clustering_alg_params[\"dist\"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n # graph = mapper.map_parallel(lens, data_new, clusterer=cluster.AgglomerativeClustering( linkage=clustering_alg_params[\"linkage\"]), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n elif clustering_alg == \"Mean Shift\":\n # graph = mapper.map_parallel(lens, data_new, clusterer=cluster.MeanShift(bandwidth=float(clustering_alg_params[\"bandwidth\"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n graph = mapper.map_parallel(lens, data_new, clusterer=cluster.MeanShift(bandwidth=1), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n \n #print(len(graph['nodes'].keys()))\n # graph = mapper.map(lens, data_new, clusterer=cluster.DBSCAN(eps=eps, min_samples=min_samples), cover=Cover(n_cubes=interval, perc_overlap=overlap))\n #print(\"Betti0=\",SimplicialComplex(simplices=graph['simplices']).betti_number(0))\n return graph\n\ndef compute_lens(f, data, mapper, filter_parameters=None):\n data_array = np.array(data)\n if f in [\"sum\", \"mean\", \"median\", \"max\", \"min\", \"std\", \"l2norm\"]:\n lens = mapper.fit_transform(data_array, projection=f).reshape(-1,1)\n elif f == \"Density\":\n density_kernel = filter_parameters['density_kernel']\n density_bandwidth = filter_parameters['density_bandwidth']\n #print(\"density\", density_kernel, density_bandwidth)\n kde = KernelDensity(kernel=density_kernel, bandwidth=density_bandwidth).fit(data_array)\n lens = kde.score_samples(data_array).reshape(-1,1)\n scaler = MinMaxScaler()\n lens = scaler.fit_transform(lens)\n elif f == \"Eccentricity\":\n p = filter_parameters['eccent_p']\n distance_matrix = filter_parameters['eccent_dist']\n #print(\"eccent\", p, distance_matrix)\n pdist = distance.squareform(distance.pdist(data_array, metric=distance_matrix))\n lens = np.array([(np.sum(pdist**p, axis=1)/len(data_array))**(1/p)]).reshape(-1,1)\n elif f == \"PC1\":\n pca = PCA(n_components=min(2, data_array.shape[1]))\n lens = pca.fit_transform(data_array)[:,0].reshape(-1,1)\n elif f == \"PC2\":\n if data_array.shape[1] > 1:\n pca = PCA(n_components=2)\n lens = pca.fit_transform(data_array)[:,1].reshape(-1,1)\n # else:\n # lens = np.array(data[f]).reshape(-1,1)\n return lens\n\n\ndef _parse_result(graph, data_array=[]):\n if len(data_array)>0:\n col_names = data_array.columns\n data_array = np.array(data_array)\n data = {\"nodes\": [], \"links\": [], \"betti\":[]}\n\n # nodes\n node_keys = graph['nodes'].keys()\n name2id = {}\n i = 1\n nodes_detail = {}\n for key in node_keys:\n name2id[key] = i\n cluster = graph['nodes'][key]\n nodes_detail[i] = cluster\n if len(data_array)>0:\n cluster_data = data_array[cluster]\n cluster_avg = np.mean(cluster_data, axis=0)\n cluster_avg_dict = {}\n for j in range(len(col_names)):\n cluster_avg_dict[col_names[j]] = cluster_avg[j]\n data['nodes'].append({\n \"id\": str(i),\n \"size\": len(graph['nodes'][key]),\n \"avgs\": cluster_avg_dict,\n \"vertices\": cluster\n })\n else:\n data['nodes'].append({\n \"id\": str(i),\n \"size\": len(graph['nodes'][key]),\n \"vertices\": cluster\n })\n i += 1\n \n with open(APP_STATIC+\"/uploads/nodes_detail.json\",\"w\") as f:\n json.dump(nodes_detail, f)\n\n # links\n links = set()\n for link_from in graph['links'].keys():\n for link_to in graph['links'][link_from]:\n from_id = name2id[link_from]\n to_id = name2id[link_to]\n left_id = min(from_id, to_id)\n right_id = max(from_id, to_id)\n links.add((left_id, right_id))\n for link in links:\n data['links'].append({\"source\": link[0], \"target\": link[1]})\n betti0 = int(SimplicialComplex(simplices=graph['simplices']).betti_number(0))\n betti1 = int(SimplicialComplex(simplices=graph['simplices']).betti_number(1))\n betti2 = int(SimplicialComplex(simplices=graph['simplices']).betti_number(2))\n data['betti'].append(betti0)\n data['betti'].append(betti1)\n data['betti'].append(betti2)\n return data\n\ndef compute_cc(graph): \n '''\n Compute connected components for the mapper graph\n '''\n G = nx.Graph()\n for node in graph['nodes']:\n nodeId = int(node['id'])-1\n G.add_node(nodeId)\n for edge in graph['links']:\n sourceId = int(edge['source'])-1\n targetId = int(edge['target'])-1\n G.add_edge(sourceId, targetId)\n cc = nx.connected_components(G)\n cc_list = []\n for c in cc:\n cc_list.append(list(c))\n return cc_list\n\ndef get_selected_data(selected_nodes):\n data = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n with open(APP_STATIC+\"/uploads/cols_info.json\") as f:\n cols_dict = json.load(f)\n cols = cols_dict['cols_numerical']\n #print(cols)\n with open(APP_STATIC+\"/uploads/nodes_detail.json\") as f:\n nodes_detail = json.load(f)\n if len(selected_nodes) > 0:\n selected_rows = []\n for node in selected_nodes:\n selected_rows += nodes_detail[node]\n selected_rows = list(set(selected_rows))\n data = data.iloc[selected_rows, :]\n data.index = range(len(data))\n return data, cols\n\n@app.route('/module_extension', methods=['POST','GET'])\ndef module_extension():\n module_info = \"\"\n with open(APP_STATIC+\"/uploads/new_modules.json\") as f:\n module_info = json.load(f)\n return module_info\n\n@app.route('/module_computing', methods=['POST','GET'])\ndef module_computing():\n json_data = json.loads(request.form.get('data'))\n selected_nodes = json_data['nodes']\n data, cols = get_selected_data(selected_nodes)\n module_info = json_data['module_info']\n data_new = call_module_function(data, cols, module_info)\n # data_new['kmeans_cluster'] = KMeans(n_clusters=4, random_state=0).fit(data_new).labels_\n # data_new = data_new.to_json(orient='records')\n # return jsonify(module_result=data_new)\n return data_new\n # # kNN graph\n # from pynndescent import NNDescent\n # df = pd.read_csv(APP_STATIC+\"/uploads/processed_data.csv\")\n # activations = df.iloc[:, 0:512]\n # k=5\n # index = NNDescent(activations, n_neighbors=15, metric='euclidean')\n # out = index.query(activations, k=k)\n # dist = out[1]\n # s_dist=np.sort(dist, axis=0)\n # s_dist = list(s_dist[:,k-1].astype(\"str\"))\n # print(s_dist)\n # return jsonify(s_dist=s_dist)\n\ndef call_module_function(data, cols, module_info):\n mod_name, func_name = module_info['function-name'].rsplit('.',1)\n mod = importlib.import_module(mod_name)\n method_to_call = getattr(mod, func_name)\n if module_info['module-type'] == \"unsupervised_learning\":\n result = method_to_call(**module_info['function-parameters'])\n data_new = result.fit_transform(data.loc[:,cols])\n data_new = pd.DataFrame(data_new)\n data_new_cols = []\n for i in range(data_new.shape[1]):\n data_new_cols.append(\"col\"+str(i+1))\n data_new.columns = data_new_cols\n data_new['kmeans_cluster'] = KMeans(n_clusters=4, random_state=0).fit(data_new).labels_\n data_new = data_new.to_json(orient='records')\n data_new = jsonify(module_result=data_new)\n elif module_info['module-type'] == \"supervised_learning\":\n y = data.loc[:,module_info['input-variables']['dependent']]\n X = data.loc[:,module_info['input-variables']['independent']]\n X2 = sm.add_constant(X)\n reg = method_to_call(np.asarray(y), np.asarray(X2))\n result = reg.fit()\n conf_int = np.array(result.conf_int())\n conf_int_new = []\n for i in range(conf_int.shape[0]):\n conf_int_new.append(list(conf_int[i,:]))\n #print(result.summary())\n data_new = jsonify(params=list(result.params), pvalues=list(result.pvalues), conf_int=conf_int_new, stderr=list(result.bse))\n return data_new\n"
] |
[
[
"pandas.read_csv",
"numpy.sum",
"sklearn.cluster.MeanShift",
"sklearn.cluster.KMeans",
"numpy.unique",
"numpy.asarray",
"pandas.DataFrame",
"numpy.round",
"numpy.concatenate",
"numpy.delete",
"sklearn.preprocessing.normalize",
"numpy.mean",
"scipy.spatial.distance.pdist",
"sklearn.neighbors.KernelDensity",
"numpy.array",
"numpy.where",
"sklearn.decomposition.PCA",
"sklearn.preprocessing.MinMaxScaler"
]
] |
deepneuro/Document_Clustering
|
[
"347d6db613c1c1df9da71e87fb4221a7915dfb5a"
] |
[
"reader.py"
] |
[
"import pandas as pd\nimport pather\n\n\ndef find_txt_files(initial_path):\n \"\"\"\n For a given path, returns the paths to the .txt files\n :param initial_path: the initial directory of search\n :return: list of paths inside the initial path\n \"\"\"\n\n path_list = pather.find_paths(initial_path, \"txt\")\n\n last_elements = pather.return_last_element(path_list)\n\n first_elements = pather.return_first_elements(path_list)\n\n return path_list, last_elements, first_elements\n\n\ndef read_txt_file(path):\n \"\"\"\n For a given .txt path, reads the file and returns a string\n :param path: The path to the .txt file\n :return: string of the file\n \"\"\"\n\n file = open(path, \"r\", encoding='utf-8')\n text_string = file.read()\n\n return text_string\n\n\ndef create_dir(first_element):\n \"\"\"\n For a given path, returns the directory from the CV directory to the last\n one.\n :param first_element: str, the element to be truncated\n :return: str, the truncated directory\n \"\"\"\n first_elements_list = first_element.split('\\\\')\n for index, element in enumerate(first_elements_list):\n if element == 'CV' or element == 'cv':\n path = '\\\\'.join(first_elements_list[index:])\n return path\n\n\ndef create_filename(last_element):\n \"\"\"\n For a given file, returns its name without the extension.\n :param last_element: str, file name\n :return: str, file name without extension\n \"\"\"\n return last_element[:-4]\n\n\ndef create_data_frame(initial_path):\n \"\"\"\n For a given path, returns a dataframe with the document name of the document\n language and text.\n :param initial_path: path with the .txt files\n :return: dataframe with the mentioned fields\n \"\"\"\n\n path_list, last_elements, first_elements = find_txt_files(initial_path)\n\n lang = [element.split('\\\\')[-2] for element in first_elements]\n\n text_files = [read_txt_file(file) for file in path_list]\n\n dirs = [create_dir(first_element) for first_element in first_elements]\n\n name = [create_filename(last_element) for last_element in last_elements]\n\n dataframe_dict = {'file': last_elements, 'lang': lang, 'text': text_files,\n 'dir': dirs, 'names': name}\n\n dataframe = pd.DataFrame(dataframe_dict)\n\n return dataframe\n"
] |
[
[
"pandas.DataFrame"
]
] |
anukchat/cassava_leaf_classifier
|
[
"f5c2f9d1fceae2a504d0119b3ea078786db6f8c1"
] |
[
"app/main.py"
] |
[
"from fastapi import FastAPI, File, UploadFile, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom pydantic import ValidationError\nimport uvicorn\n\nfrom PIL import Image\nimport io\nimport sys\nimport logging\nimport cv2\nimport numpy as np\n\nfrom response_dto.prediction_response_dto import PredictionResponseDto\nfrom ml.predictions.classify_image import ImageClassifier\n\napp = FastAPI()\n\norigins = [\n \"http://127.0.0.1:3000\",\n \"https://127.0.0.1:3000\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nimage_classifier = ImageClassifier()\n\n\n@app.post(\"/predict/\", response_model=PredictionResponseDto)\nasync def predict(file: UploadFile = File(...)):\n if file.content_type.startswith('image/') is False:\n raise HTTPException(\n status_code=400, detail=f'File \\'{file.filename}\\' is not an image.')\n\n try:\n contents = await file.read()\n\n image = np.fromstring(contents, np.int8)\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n predicted_class = image_classifier.predict(image)\n\n logging.info(f\"Predicted Class: {predicted_class}\")\n return {\n \"filename\": file.filename,\n \"contenttype\": file.content_type,\n \"likely_class\": predicted_class\n }\n\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n except ValidationError as e:\n print(e.json())\n"
] |
[
[
"numpy.fromstring"
]
] |
HenriARM/co-mod-gan
|
[
"6b0e8be87aafe8af3de19f796aac1b7cd4cc2d37"
] |
[
"training/misc.py"
] |
[
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"Miscellaneous utility functions.\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageFont\nimport dnnlib\nfrom dnnlib import tflib\n\nimport tensorflow as tf\n\n#----------------------------------------------------------------------------\n# Convenience wrappers for pickle that are able to load data produced by\n# older versions of the code, and from external URLs.\n\ndef open_file_or_url(file_or_url):\n if dnnlib.util.is_url(file_or_url):\n return dnnlib.util.open_url(file_or_url, cache_dir='.stylegan2-cache')\n return open(file_or_url, 'rb')\n\ndef load_pkl(file_or_url):\n with open_file_or_url(file_or_url) as file:\n return pickle.load(file, encoding='latin1')\n\ndef save_pkl(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n#----------------------------------------------------------------------------\n# Image utils.\n\ndef adjust_dynamic_range(data, drange_in, drange_out):\n if drange_in != drange_out:\n scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))\n bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)\n data = data * scale + bias\n return data\n\ndef create_image_grid(images, grid_size=None, pix2pix=False):\n if pix2pix:\n images, _ = np.split(images, 2, axis=1)\n assert images.ndim == 3 or images.ndim == 4\n num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]\n\n if grid_size is not None:\n grid_w, grid_h = tuple(grid_size)\n else:\n grid_w = max(int(np.ceil(np.sqrt(num))), 1)\n grid_h = max((num - 1) // grid_w + 1, 1)\n\n grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)\n for idx in range(num):\n x = (idx % grid_w) * img_w\n y = (idx // grid_w) * img_h\n grid[..., y : y + img_h, x : x + img_w] = images[idx]\n return grid\n\ndef convert_to_pil_image(image, drange=[0,1]):\n assert image.ndim == 2 or image.ndim == 3\n if image.ndim == 3:\n if image.shape[0] == 1:\n image = image[0] # grayscale CHW => HW\n else:\n image = image.transpose(1, 2, 0) # CHW -> HWC\n\n if drange is not None:\n image = adjust_dynamic_range(image, drange, [0,255])\n image = np.rint(image).clip(0, 255).astype(np.uint8)\n fmt = 'RGB' if image.ndim == 3 else 'L'\n return PIL.Image.fromarray(image, fmt)\n\ndef save_image_grid(images, filename, drange=[0,1], grid_size=None, pix2pix=False):\n convert_to_pil_image(create_image_grid(images, grid_size, pix2pix=pix2pix), drange).save(filename)\n\ndef apply_mirror_augment(minibatch):\n mask = np.random.rand(minibatch.shape[0]) < 0.5\n minibatch = np.array(minibatch)\n minibatch[mask] = minibatch[mask, :, :, ::-1]\n return minibatch\n\n#----------------------------------------------------------------------------\n# Loading data from previous training runs.\n\ndef parse_config_for_previous_run(run_dir):\n with open(os.path.join(run_dir, 'submit_config.pkl'), 'rb') as f:\n data = pickle.load(f)\n data = data.get('run_func_kwargs', {})\n return dict(train=data, dataset=data.get('dataset_args', {}))\n\n#----------------------------------------------------------------------------\n# Size and contents of the image snapshot grids that are exported\n# periodically during training.\n\ndef setup_snapshot_image_grid(training_set,\n size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.\n layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.\n\n # Select size.\n gw = 1; gh = 1\n if size == '1080p':\n gw = np.clip(1920 // training_set.shape[2], 3, 32)\n gh = np.clip(1080 // training_set.shape[1], 2, 32)\n if size == '4k':\n gw = np.clip(3840 // training_set.shape[2], 7, 32)\n gh = np.clip(2160 // training_set.shape[1], 4, 32)\n if size == '8k':\n gw = np.clip(7680 // training_set.shape[2], 7, 32)\n gh = np.clip(4320 // training_set.shape[1], 4, 32)\n\n # Initialize data arrays.\n reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)\n labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)\n\n # Random layout.\n if layout == 'random':\n reals[:], labels[:] = training_set.get_minibatch_val_np(gw * gh)\n\n # Class-conditional layouts.\n class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4])\n if layout in class_layouts:\n bw, bh = class_layouts[layout]\n nw = (gw - 1) // bw + 1\n nh = (gh - 1) // bh + 1\n blocks = [[] for _i in range(nw * nh)]\n for _iter in range(1000000):\n real, label = training_set.get_minibatch_val_np(1)\n idx = np.argmax(label[0])\n while idx < len(blocks) and len(blocks[idx]) >= bw * bh:\n idx += training_set.label_size\n if idx < len(blocks):\n blocks[idx].append((real, label))\n if all(len(block) >= bw * bh for block in blocks):\n break\n for i, block in enumerate(blocks):\n for j, (real, label) in enumerate(block):\n x = (i % nw) * bw + j % bw\n y = (i // nw) * bh + j // bw\n if x < gw and y < gh:\n reals[x + y * gw] = real[0]\n labels[x + y * gw] = label[0]\n \n masks = training_set.get_random_masks_np(gw * gh)\n\n return (gw, gh), reals, labels, masks\n\n#----------------------------------------------------------------------------\n"
] |
[
[
"numpy.split",
"numpy.sqrt",
"numpy.clip",
"numpy.rint",
"numpy.argmax",
"numpy.random.rand",
"numpy.float32",
"numpy.array",
"numpy.zeros"
]
] |
JohannesSchuele/unet-keras
|
[
"6cc081dae917a97310ffba7309dae48196e88192"
] |
[
"tools/utilz_graph.py"
] |
[
"import glob\nimport skimage.io as io\nimport skimage.transform as trans\nimport numpy as np\nimport pylab as plt\nfrom natsort import natsorted\nimport glob\nimport os\nimport tensorflow as tf\ndef get_sorted_data_names_from_paths(path_to_image,path_to_mask):\n image_names = glob.glob(path_to_image + \"/*.png\")\n image_names = natsorted(image_names)\n image_names = [os.path.basename(x) for x in image_names]\n\n mask_names = glob.glob(path_to_mask + \"/*.npy\")\n mask_names = natsorted(mask_names)\n mask_names = [os.path.basename(x) for x in mask_names]\n return image_names, mask_names\n\n\n\ndef convert_to_tensor(arg):\n arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n return arg\n\n#\n# y_label_positions = y_label_positions.reshape((y_label_positions.shape[0] * 2))\n# adjacency_label_indices = np.triu_indices(y_label_adjacency.shape[1], k=1)\n# y_label_adjacency = y_label_adjacency[adjacency_label_indices[0], adjacency_label_indices[1]]\n# if y_label_positions.shape[0] >= network_dim * 2:\n# print(\n# 'the number of labeld nodes/frame is too high for network dimension - decrease nodes in training data or consider to adapt the network size')\n# a[index, 0:network_dim * 2] = y_label_positions[0:network_dim * 2]\n# b[index, 0:adj_flatten_dim] = y_label_adjacency[0:adj_flatten_dim]\n# else:\n\n\ndef create_adj_matrix(adj_vector,networksize):\n adj_matrix = np.zeros((networksize, networksize))\n\n adj_matrix[np.triu_indices(networksize, k = 1)] = adj_vector[0:np.shape(np.triu_indices(networksize, k = 1))[1]]\n adj_matrix = adj_matrix+np.transpose(adj_matrix)\n\n return(adj_matrix)\n\n\ndef create_position_matrix(position_vector, cut_off_size = None):\n length = int(len(position_vector)/2)\n position_matrix = position_vector.reshape(length,2)\n if cut_off_size == None:\n cut_off_size = length\n position_matrix = position_matrix[0:cut_off_size,:]\n return position_matrix\n\ndef create_img_mask(network_dim, pos: np.array):\n node_img_mask = np.zeros((network_dim,network_dim))\n positions = pos.astype(int)\n for i in range(len(positions[:,0])):\n node_img_mask[positions[i][0],positions[i][1]] = 1\n\n return node_img_mask\n\n\n\n\ndef create_graph_mask(network_dim, pos: np.array, adjacency: np.ndarray):\n node_adj_mask = np.zeros((network_dim,network_dim))\n positions = pos.astype(int)\n for x_pixel in range(len(adjacency)):\n for y_pixel in range(len(adjacency)):\n node_adj_mask[positions[x_pixel][0],positions[y_pixel][1]] = adjacency[x_pixel,y_pixel]\n\n return node_adj_mask\n#\n\n\ndef create_graph_tensor(mask):\n \"\"\" Mask Normalization\n Function that returns normalized mask\n Each pixel is either 0 or 1\n \"\"\"\n mask = np.asarray(mask)\n y_positions_label = mask[:, 0:2, 0]\n y_adjacency_label = mask[:, 2:, 0]\n pos = y_positions_label.astype(int)\n adj_dim = int((len(pos) * len(pos) -len(pos)) / 2)\n tensor_graph = np.zeros(adj_dim)\n\n for node_idx in range(len(pos)):\n adjacency_idx_vec = np.argwhere(y_adjacency_label[node_idx, :] == 1) # find all set of nodes that are connected to the one respective node_idx (this is evaluated for all rows)\n idx_trivec = get_indices_trivec_adjacency(len(pos),node_idx,adjacency_idx_vec)\n tensor_graph[idx_trivec] = 1 # label\n tensor_graph.astype(int)\n\n return tensor_graph\n\n\ndef create_graph_vec_fixed_dim(adj, dim_nr_nodes = 128):\n \"\"\" Mask Normalization\n Function that returns normalized mask\n Each pixel is either 0 or 1\n \"\"\"\n adj_dim = int((dim_nr_nodes * dim_nr_nodes -dim_nr_nodes) / 2)\n tensor_graph = np.zeros(adj_dim)\n for node_idx in range(len(adj[:,0])):\n adjacency_idx_vec = np.argwhere(adj[node_idx, :] == 1) # find all set of nodes that are connected to the one respective node_idx (this is evaluated for all rows)\n #global_adj_idx_tuple = pos[adjacency_idx, :] # position of the connected node\n idx_trivec = get_indices_trivec_adjacency(dim_nr_nodes,node_idx,adjacency_idx_vec)\n #tensor_graph[node_idx[0], node_idx[1], idx_trivec] = 1 #label\n tensor_graph[idx_trivec] = 1 # label\n tensor_graph.astype(int)\n\n return tensor_graph\n\n\n\ndef get_indices_trivec_adjacency(node_dim,node_idx,adjacency_idx_vec):\n adj_tri_vec_idx = np.triu_indices(node_dim, k=1)\n tmp_matrix = np.full((node_dim, node_dim), False, dtype=bool)\n tmp_matrix[node_idx,adjacency_idx_vec] = True\n adj_tri_vec = tmp_matrix[adj_tri_vec_idx]\n idx_trivec = np.argwhere(adj_tri_vec == True)\n idx_trivec[np.lexsort(np.fliplr(idx_trivec).T)]\n\n #idx_trivec = np.sort(idx_trivec, axis=0) # ensure that idx_trivec is correclty sorted\n return np.squeeze(idx_trivec)\n\n\ndef tensor_2_adjmatrix(adj_vector,networksize, nr_nodes):\n adj_matrix = np.zeros((networksize, networksize))\n #adj_matrix[np.triu_indices(networksize, k = 1)] = adj_vector[0:np.shape(np.triu_indices(networksize, k = 1))[1]]\n adj_matrix[np.triu_indices(networksize, k=1)] = adj_vector\n adj_matrix = adj_matrix+np.transpose(adj_matrix)\n #adj_matrix = W = np.maximum( adj_matrix, adj_matrix.T)\n adj_matrix = adj_matrix[: nr_nodes, :nr_nodes]\n adj_matrix_tmp = adj_matrix.copy()\n adj_matrix_tmp[: nr_nodes, :nr_nodes] = 0\n print('Is just True iff all entries are zero ::', np.all(adj_matrix_tmp==0))\n\n return adj_matrix\n\n\ndef create_input_image_node_tensor(img, nodes, size):\n tensor = np.zeros((size[0],size[1],2))\n tensor[:, :, 0] = img\n nodes = nodes.astype(int)\n for index in range(len(nodes)):\n tensor[nodes[index,0],nodes[index,1],1] = 1\n return tensor\n\n\ndef tensor_2_image_and_pos(tensor):\n image = tensor[:, :, 0]*255\n image = np.array(image, dtype='float32')\n pos_matrix = tensor[:, :, 1].astype(int)\n pos = np.argwhere(pos_matrix > 0)\n #pos = np.sort(pos)\n #print('output pos after where',pos)\n pos = (np.asarray(pos))\n pos[np.lexsort(np.fliplr(pos).T)]\n #pos = np.sort(pos, axis=0) # new to ensure correct order, this is maybe redundantp\n print('len of pos', len(pos))\n #print('shape of postion',pos.shape)\n\n return image, pos\n\n"
] |
[
[
"tensorflow.convert_to_tensor",
"numpy.triu_indices",
"numpy.asarray",
"numpy.fliplr",
"numpy.squeeze",
"numpy.full",
"numpy.argwhere",
"numpy.all",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] |
sytham/rnn-benchmark
|
[
"0e78860972d71fac1a0bbbc1d3ffc198d2c3b4d6"
] |
[
"sandbox/rocky/tf/samplers/vectorized_sampler.py"
] |
[
"import pickle\n\nimport tensorflow as tf\nfrom rllab.sampler.base import BaseSampler\nfrom sandbox.rocky.tf.envs.parallel_vec_env_executor import ParallelVecEnvExecutor\nfrom sandbox.rocky.tf.envs.vec_env_executor import VecEnvExecutor\nfrom rllab.misc import tensor_utils\nimport numpy as np\nfrom rllab.sampler.stateful_pool import ProgBarCounter\nimport rllab.misc.logger as logger\nimport itertools\n\n\nclass VectorizedSampler(BaseSampler):\n\n def __init__(self, algo, n_envs=None):\n super(VectorizedSampler, self).__init__(algo)\n self.n_envs = n_envs\n\n def start_worker(self):\n n_envs = self.n_envs\n if n_envs is None:\n n_envs = int(self.algo.batch_size / self.algo.max_path_length)\n n_envs = max(1, min(n_envs, 100))\n\n if getattr(self.algo.env, 'vectorized', False):\n self.vec_env = self.algo.env.vec_env_executor(n_envs=n_envs, max_path_length=self.algo.max_path_length)\n else:\n envs = [pickle.loads(pickle.dumps(self.algo.env)) for _ in range(n_envs)]\n print(\"VectorizedSampler using\", len(envs), \"envs\")\n self.vec_env = VecEnvExecutor(\n envs=envs,\n max_path_length=self.algo.max_path_length\n )\n self.env_spec = self.algo.env.spec\n\n def shutdown_worker(self):\n self.vec_env.terminate()\n\n def obtain_samples(self, itr):\n logger.log(\"Obtaining samples for iteration %d...\" % itr)\n paths = []\n n_samples = 0\n obses = self.vec_env.reset()\n dones = np.asarray([True] * self.vec_env.num_envs)\n running_paths = [None] * self.vec_env.num_envs\n\n pbar = ProgBarCounter(self.algo.batch_size)\n policy_time = 0\n env_time = 0\n process_time = 0\n \n policy = self.algo.policy\n import time\n self.vec_env.envs[0].render()\n while n_samples < self.algo.batch_size:\n t = time.time()\n policy.reset(dones)\n actions, agent_infos = policy.get_actions(obses)\n\n policy_time += time.time() - t\n t = time.time()\n next_obses, rewards, dones, env_infos = self.vec_env.step(actions)\n env_time += time.time() - t\n\n t = time.time()\n self.vec_env.envs[0].render()\n agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)\n env_infos = tensor_utils.split_tensor_dict_list(env_infos)\n if env_infos is None:\n env_infos = [dict() for _ in range(self.vec_env.num_envs)]\n if agent_infos is None:\n agent_infos = [dict() for _ in range(self.vec_env.num_envs)]\n for idx, observation, action, reward, env_info, agent_info, done in zip(itertools.count(), obses, actions,\n rewards, env_infos, agent_infos,\n dones):\n if running_paths[idx] is None:\n running_paths[idx] = dict(\n observations=[],\n actions=[],\n rewards=[],\n env_infos=[],\n agent_infos=[],\n )\n running_paths[idx][\"observations\"].append(observation)\n running_paths[idx][\"actions\"].append(action)\n running_paths[idx][\"rewards\"].append(reward)\n running_paths[idx][\"env_infos\"].append(env_info)\n running_paths[idx][\"agent_infos\"].append(agent_info)\n if done:\n paths.append(dict(\n observations=self.env_spec.observation_space.flatten_n(running_paths[idx][\"observations\"]),\n actions=self.env_spec.action_space.flatten_n(running_paths[idx][\"actions\"]),\n rewards=tensor_utils.stack_tensor_list(running_paths[idx][\"rewards\"]),\n env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx][\"env_infos\"]),\n agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx][\"agent_infos\"]),\n ))\n n_samples += len(running_paths[idx][\"rewards\"])\n running_paths[idx] = None\n process_time += time.time() - t\n pbar.inc(len(obses))\n obses = next_obses\n\n pbar.stop()\n\n logger.record_tabular(\"PolicyExecTime\", policy_time)\n logger.record_tabular(\"EnvExecTime\", env_time)\n logger.record_tabular(\"ProcessExecTime\", process_time)\n\n return paths\n"
] |
[
[
"numpy.asarray"
]
] |
alimhanif/blocksparse
|
[
"89074c5ccf78e3a88b4aa2aefc9e208d4773dcbc"
] |
[
"blocksparse/matmul.py"
] |
[
"\n\"\"\"Cuda op Python library.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport scipy.sparse as sparse\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops.init_ops import Initializer\nfrom blocksparse.utils import _op_module, z_order_2d, ceil_div, scalar_constant\nimport blocksparse.ewops as ew\n\n\nblocksparse_matmul = _op_module.blocksparse_matmul\nblocksparse_matmul_dx = _op_module.blocksparse_matmul_dx\nblocksparse_matmul_dw = _op_module.blocksparse_matmul_dw\nblocksparse_matmul_dwa = _op_module.blocksparse_matmul_dwa\nblocksparse_matmul_dg = _op_module.blocksparse_matmul_dg\nblocksparse_reduced_dw = _op_module.blocksparse_reduced_dw\nl2_normalize_ck = _op_module.l2_normalize_ck\nl2_normalize_grad_ck = _op_module.l2_normalize_grad_ck\nl2_normalize_gain_ck = _op_module.l2_normalize_gain_ck\nl2_normalize_gain_grad_ck = _op_module.l2_normalize_gain_grad_ck\n\nidentity_init = _op_module.blocksparse_matmul_identity_init\n\n# save a bit of gpu memory by only creating one copy of identical constant lookup tables\ng_lookup_cache = dict()\ng_lut_idx = 0\n\ndef get_constant(lut, name):\n global g_lookup_cache\n global g_lut_idx\n\n default_graph = tf.get_default_graph()\n if name not in g_lookup_cache:\n g_lookup_cache[name] = list()\n for np_entry, tf_entry in g_lookup_cache[name]:\n if np_entry.dtype == lut.dtype and np_entry.shape == lut.shape and tf_entry.graph is default_graph:\n if np.abs(np_entry.astype(np.int64) - lut.astype(np.int64)).sum() == 0:\n # found an exact match\n return tf_entry\n\n #print(name, lut.size)\n #tf_entry = tf.constant(lut, name=name+\"_lut\")\n with tf.control_dependencies(None):\n tf_entry = tf.get_variable(f\"{name}_lut_{g_lut_idx}\", initializer=lut.view(np.int64), trainable=False)\n g_lut_idx += 1\n\n g_lookup_cache[name].append( (lut, tf_entry) )\n return tf_entry\n\nclass IdentityInit(Initializer):\n\n def __init__(self, lut, CB, KB, blocks, bsize, scale=1.0):\n self.lut = lut\n self.CB = CB\n self.KB = KB\n self.blocks = blocks\n self.bsize = bsize\n self.scale = scale\n\n def __call__(self, shape, dtype=None, partition_info=None):\n assert shape[0] == self.blocks\n #lut = get_constant(self.lut, name=\"updat\")\n with tf.control_dependencies(None):\n lut = tf.constant(self.lut, name=\"identity_lut\")\n return identity_init(lut, CB=self.CB, KB=self.KB, blocks=self.blocks, bsize=self.bsize, scale=self.scale)\n\nSEG_MAX = (1<<63)-1\n\nclass BlocksparseMatMul(object):\n\n def __getstate__(self):\n return (self.layout, self.bsize, self.axis, self.z_order, self.name)\n\n def __setstate__(self, state):\n self.__init__(*state)\n\n def __init__(self, layout, block_size=32, feature_axis=0, z_order=True, name=None):\n\n if (feature_axis == 0 and block_size in (8,16,32)) or \\\n (feature_axis == 1 and block_size in (32,64)):\n self.axis = feature_axis\n self.bsize = block_size\n else:\n raise ValueError(\"Unsupported block size with this feature axis\")\n\n assert len(layout.shape) == 2\n CB, KB = layout.shape\n\n group_sizes = layout.sum(axis=0) # assume symetrical transpose\n max_group = group_sizes.max()\n min_group = group_sizes[np.nonzero(group_sizes)].min()\n if max_group / min_group > 2.0:\n segment_size = max(ceil_div(max_group,4), min_group*2)\n else:\n segment_size = SEG_MAX # not worth segmenting\n #print(max_group, min_group, segment_size, KB)\n #segment_size = SEG_MAX\n\n # don't creat any segments smaller than this\n seg_min = max(ceil_div(segment_size, 4), 4)\n\n # segment_size = seg_min = 2\n\n if layout.dtype != np.int32:\n layout = layout.astype(np.int32)\n\n # convert to csr for vastly more efficient python iteration on large matrices\n csr = sparse.csr_matrix(layout)\n cs, ks, vs = sparse.find(csr) # ks is in sorted order by default\n blocks = len(vs)\n idx = list(range(blocks))\n idxT = sorted(idx, key=lambda i: cs[i]) # transpose view\n\n # morton order (z-order) the blocks for efficient L2 cache utilization across all 3 ops\n updat_list = list()\n if z_order:\n blk = 0\n for _, i in sorted( [ (z_order_2d(cs[i], ks[i]), i) for i in range(blocks) ] ):\n vs[i] = blk\n updat_list.append((cs[i], ks[i]))\n blk += 1\n else:\n # row contiguous\n updat_list = list( zip(cs, ks) )\n vs = list(range(blocks))\n # cs = [b[0] for b in updat_list]\n # ks = [b[1] for b in updat_list]\n\n self.updat_list = updat_list\n self.updat_lut = np.array(updat_list, dtype=np.int32)\n\n fsetup = self.xprop_lut(KB, cs, ks, vs, idx, segment_size, seg_min)\n bsetup = self.xprop_lut(CB, ks, cs, vs, idxT, segment_size, seg_min)\n\n self.fprop_list, self.fprop_lut, self.l2_lut, self.fprop_shared, self.l2_shared, self.fprop_segments, self.fprop_locks = fsetup\n self.bprop_list, self.bprop_lut, _, self.bprop_shared, _, self.bprop_segments, self.bprop_locks = bsetup\n\n if name is None:\n name = \"BlocksparseMatMul\"\n\n self.z_order = z_order\n self.name = name\n self.flops = blocks * block_size * block_size * 2\n self.blocks = blocks\n self.w_shape = (blocks, block_size, block_size)\n self.g_shape = (blocks,)\n self.count = 0\n\n self.CB = CB\n self.KB = KB\n self.C = CB * block_size\n self.K = KB * block_size\n\n self.sparsity = round(float(blocks) / float(CB * KB), 3)\n\n # save boolean version for serialization purposes, TODO save csr version\n self.layout = layout > 0\n\n\n def i_shape(self, N): return (N, self.C) if self.axis else (self.C, N)\n def o_shape(self, N): return (N, self.K) if self.axis else (self.K, N)\n\n # return the coordinate in the layout that corresponds to a given block id\n def block_coord(self, block): return self.updat_list[block]\n\n # TODO: write a kernel to do this on the gpu to allow dynamic sparsity\n def xprop_lut(self, KB, cs, ks, vs, idx, max_seg, min_seg):\n\n locks = 0\n lockids = dict()\n seg = list()\n segs = list()\n col = list()\n cols = list()\n kset = set()\n\n # get a count of channels for each k\n channels = [0 for k in range(KB)]\n for i in idx:\n channels[ks[i]] += 1\n\n K = ks[idx[0]]\n seg_count = 0\n for i in idx:\n c, k, v = cs[i], ks[i], vs[i]\n kset.add(k)\n\n # check for new value of k\n if k != K:\n\n # keep track of unsegmented columns (for l2norm and testing)\n cols.append( (K, col) )\n col = list()\n\n # append segment for previous K and start a new one\n if len(seg):\n segs.append( (K, seg) )\n seg = list()\n seg_count += 1\n # for more than one segment we need to use spin locks to sync accumulation\n if seg_count > 1:\n locks += 1\n lockids[K] = locks\n seg_count = 0\n K = k\n\n col.append( (c, v) )\n seg.append( (c, v) )\n\n channels[k] -= 1\n\n # split columns up into segments, but don't let them be too small for effciency sake\n if len(seg) >= max_seg and channels[k] >= min_seg:\n segs.append( (k, seg) )\n seg = list()\n seg_count += 1\n\n # append last value of k\n cols.append( (k, col) )\n if len(seg):\n segs.append( (k, seg) )\n seg_count += 1\n if seg_count > 1:\n locks += 1\n lockids[k] = locks\n\n # add in any empty k blocks at the end\n for k in range(KB):\n if k not in kset:\n segs.append( (k, []) )\n cols.append( (k, []) )\n #else:\n # raise ValueError(\"sparsity mask has empty mappings. Not yet supported with feature_axis=0\")\n\n #segs.sort(key=lambda x: len(x[1]), reverse=True)\n\n # bsmm lut\n offset = len(segs) * 4\n xp_lut = np.empty(offset + len(vs)*2, dtype=np.int32)\n xp_max = 0\n for i, (k, lut) in enumerate(segs):\n # build the lut header: int2 offset, lut_size, K, lock_id\n xp_lut[i*4:(i+1)*4] = offset//2, len(lut), k, lockids.get(k, 0)\n xp_max = max(xp_max, len(lut))\n for entry in lut:\n xp_lut[offset:offset+2] = entry\n offset += 2\n\n # l2 norm lut (columns not broken up into segments)\n offset = len(cols) * 4\n l2_siz = offset + len(vs)\n # we use int64 views into the lut for tf compatibility reasons..\n if l2_siz & 1:\n l2_siz += 1\n l2_lut = np.zeros(l2_siz, dtype=np.int32)\n l2_max = 0\n for i, (k, lut) in enumerate(cols):\n # build the lut header: int offset, lut_size, K\n l2_lut[i*4:(i+1)*4] = offset, len(lut), k, 0\n l2_max = max(l2_max, len(lut))\n for entry in lut:\n l2_lut[offset] = entry[1]\n offset += 1\n\n return cols, xp_lut, l2_lut, xp_max*8, l2_max*4, len(segs), locks\n\n def prune(self, param, gate):\n new_blocks = np.sum(gate != 0.0)\n if new_blocks != self.blocks:\n new_param = np.empty((new_blocks, self.bsize, self.bsize), dtype=param.dtype)\n new_w = 0\n layout = self.layout\n for w, (c, k) in enumerate(self.updat_list):\n if gate[w] == 0.0:\n layout[c,k] = False\n else:\n new_param[new_w,:,:] = param[w,:,:]\n new_w += 1\n else:\n new_param = param\n\n sparsity = round(100 * float(new_blocks) / float(self.CB * self.KB), 1)\n\n print(\"prune: \", self.blocks, new_blocks, sparsity)\n return new_param, np.ones((new_blocks,), dtype=gate.dtype)\n\n def ortho_init(self):\n def _initializer(shape, dtype=np.float32, partition_info=None):\n W = np.empty(self.w_shape, dtype=dtype)\n bsize = self.bsize\n if self.sparsity < 1.0:\n print(\"%s ortho_init sparsity(%.2f)\" % (self.name, self.sparsity))\n # different block columns are already mostly orthogonal due to sparsity\n # So just make columns within each block of block_size orthogonal\n for k, lut, _ in self.fprop_list:\n shape = (len(lut) * bsize, bsize)\n a = np.random.normal(0.0, 1.0, shape).astype(dtype)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n if u.shape != shape:\n u = v\n for i, (c, w) in enumerate(lut):\n W[w,:,:] = u[i*bsize:(i+1)*bsize,:]\n else:\n print(\"%s ortho_init dense\" % (self.name,))\n shape = (self.C, self.K)\n a = np.random.normal(0.0, 1.0, shape).astype(dtype)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n if u.shape != shape:\n u = v\n for w, (c, k) in enumerate(self.updat_list):\n W[w,:,:] = u[c*bsize:(c+1)*bsize, k*bsize:(k+1)*bsize]\n\n return W\n return _initializer\n\n def identity_init(self, scale=1.0):\n\n return IdentityInit(self.updat_lut, self.CB, self.KB, self.blocks, self.bsize, scale=scale)\n\n # def _initializer(shape, dtype=np.float32, partition_info=None):\n # print(\"%s identity_init sparsity(%.2f)\" % (self.name, self.sparsity))\n # W = np.zeros(self.w_shape, dtype=dtype)\n # for w in range(self.blocks):\n # cb, kb = self.updat_list[w]\n # if (cb % self.KB) == (kb % self.CB):\n # W[w] = np.eye(self.bsize, dtype=dtype)\n # return W\n # return _initializer\n\n def checker_init(self):\n def _initializer(shape, dtype=np.float32, partition_info=None):\n gate = np.empty(self.blocks, dtype=dtype)\n for w, (c, k) in enumerate(self.updat_list):\n gate[w] = (c & 1) ^ (k & 1) ^ 1\n return gate\n return _initializer\n\n# grid = []\n# for c in range(5):\n# row = []\n# for k in range(5):\n# row.append((c & 1) ^ (k & 1) ^ 1)\n# grid.append(row)\n\n# for row in grid:\n# print(row)\n\n def fprop_test(self, I, W, gate=None):\n bsize = self.bsize\n if self.axis:\n O = np.zeros((I.shape[0], self.KB, bsize))\n I = I.reshape((-1, self.CB, bsize))\n for k, lut in self.fprop_list:\n for c, w in lut:\n O[:,k,:] += np.dot( I[:,c,:], W[w,:,:] ) # NC x CK = NK\n return O.reshape(I.shape[0], -1)\n else:\n N = I[0].size\n O = np.zeros((self.KB, bsize, N))\n I = I.reshape((self.CB, bsize, N))\n for k, lut in self.fprop_list:\n if gate is None:\n for c, w in lut:\n O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) # CK.T x CN = KN\n else:\n for c, w in lut:\n if gate[w] != 0.0:\n O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) * gate[w] # CK.T x CN = KN\n\n return O.reshape(-1, N)\n\n def bprop_test(self, E, W, gate=None):\n bsize = self.bsize\n if self.axis:\n B = np.zeros((E.shape[0], self.CB, bsize))\n E = E.reshape((-1, self.KB, bsize))\n for c, lut in self.bprop_list:\n for k, w in lut:\n B[:,c,:] += np.dot( E[:,k,:], W[w,:,:].T ) # NK x CK.T = NC\n return B.reshape(E.shape[0], -1)\n else:\n N = E[0].size\n B = np.zeros((self.CB, bsize, N))\n E = E.reshape((self.KB, bsize, N))\n for c, lut in self.bprop_list:\n if gate is None:\n for k, w in lut:\n B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) # CK x KN = CN\n else:\n for k, w in lut:\n if gate[w] != 0.0:\n B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) * gate[w] # CK x KN = CN\n\n return B.reshape(-1, N)\n\n def updat_test(self, I, E, gate=None, dw_gated=False):\n U = np.zeros(self.w_shape)\n bsize = self.bsize\n if self.axis:\n I = I.reshape((-1, self.CB, bsize))\n E = E.reshape((-1, self.KB, bsize))\n for w, (c, k) in enumerate(self.updat_list):\n U[w,:,:] = np.dot( I[:,c,:].T, E[:,k,:] ) # NC.T x NK = CK\n else:\n I = I.reshape((self.CB, bsize, -1))\n E = E.reshape((self.KB, bsize, -1))\n if not dw_gated or gate is None:\n for w, (c, k) in enumerate(self.updat_list):\n U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) # CN x KN.T = CK\n else:\n for w, (c, k) in enumerate(self.updat_list):\n if gate[w] != 0.0:\n U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) * gate[w] # CN x KN.T = CK\n return U\n\n def l2_normalize_test(self, W, epsilon=1e-12):\n W = W.copy()\n for k, lut in self.fprop_list:\n ws = [w for c, w in lut]\n W2 = W[ws,:,:].reshape(-1, self.bsize)\n norm = np.sqrt(np.maximum(np.sum(np.square(W2), axis=0, keepdims=True), epsilon))\n for w in ws:\n W[w,:,:] /= norm\n return W\n\n def l2_normalize_grad_test(self, W, U, epsilon=1e-12):\n for k, lut in self.fprop_list:\n ws = [w for c, w in lut]\n W2 = W[ws,:,:].reshape(-1, self.bsize)\n U2 = U[ws,:,:].reshape(-1, self.bsize)\n\n sum_sqr_w = np.sum(np.square(W2), axis=0, keepdims=True)\n max_w = np.maximum(sum_sqr_w, epsilon)\n norm_grad = ( U2 + W2 * (sum_sqr_w >= epsilon) * np.sum(-U2 * W2 / max_w, axis=0, keepdims=True) ) / np.sqrt(max_w)\n norm_grad = norm_grad.reshape(-1, self.bsize, self.bsize)\n for i, w in enumerate(ws):\n U[w,:,:] = norm_grad[i]\n return U\n\n def l2_normalize(self, W, gain=None, epsilon=1e-12, dtype=tf.float32):\n\n l2_lut = get_constant(self.l2_lut, name=\"l2\")\n\n if gain is None:\n W, _ = l2_normalize_ck(W, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )\n else:\n W, _ = l2_normalize_gain_ck(W, gain, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )\n return W\n\n def matmul(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):\n return self.__call__(I, W, gate=gate, gate_grad=gate_grad, dw_gated=dw_gated, name=name, bench=bench)\n\n def __call__(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):\n\n if name is None:\n name = self.name + (\"_%06d\" % self.count)\n self.count += 1\n\n if gate is None:\n gate = []\n else:\n gate = [gate]\n #assert self.bsize == 8 and self.axis == 0, \"blocksparse gating only implemented for block_size 8 on axis 0\"\n\n fprop_lut = get_constant(self.fprop_lut, name=\"fprop\")\n bprop_lut = get_constant(self.bprop_lut, name=\"bprop\")\n updat_lut = get_constant(self.updat_lut, name=\"updat\")\n\n O, _ = blocksparse_matmul(\n I, W, fprop_lut, bprop_lut, updat_lut, gate,\n gated_dw=bool(dw_gated), gate_grad=bool(gate_grad),\n blocks=self.blocks, bsize=self.bsize, axis=self.axis, C=self.C, K=self.K,\n segments=self.fprop_segments, segments_dx=self.bprop_segments,\n locks=self.fprop_locks, locks_dx=self.bprop_locks,\n shared=self.fprop_shared, shared_dx=self.bprop_shared, bench=bench, name=name\n )\n #print(O.op.name, O.op.device)\n return O\n\n@ops.RegisterGradient(\"BlocksparseMatmul\")\ndef blocksparse_matmul_grad(op, dy, temp):\n\n blocks = op.get_attr(\"blocks\")\n bsize = op.get_attr(\"bsize\")\n axis = op.get_attr(\"axis\")\n C = op.get_attr(\"C\")\n K = op.get_attr(\"K\")\n segments = op.get_attr(\"segments_dx\")\n shared = op.get_attr(\"shared_dx\")\n locks = op.get_attr(\"locks_dx\")\n gated_dw = op.get_attr(\"gated_dw\")\n gate_grad = op.get_attr(\"gate_grad\")\n bench = op.get_attr(\"bench\")\n x = op.inputs[0]\n w = op.inputs[1]\n lut_dx = op.inputs[3]\n lut_dw = op.inputs[4]\n gate = [op.inputs[5]] if len(op.inputs) > 5 else []\n name = op.name.split('/')[-1]\n\n dx, _ = blocksparse_matmul_dx(\n dy, w, lut_dx, gate, gated_dw=gated_dw, gate_grad=gate_grad,\n blocks=blocks, bsize=bsize, axis=axis, C=K, K=C, # swap C,K\n segments=segments, locks=locks, shared=shared,\n bench=bench, name=name+\"_bprop\")\n\n dw = blocksparse_matmul_dw(\n [x], [dy], lut_dw, gate, gated_dw=gated_dw, gate_grad=gate_grad,\n blocks=blocks, bsize=bsize, axis=axis, C=C, K=K,\n bench=bench, name=name+\"_updat\")\n\n # print(dx.op.name, dx.op.device)\n # print(dw.op.name, dw.op.device)\n\n if len(gate) == 0:\n return (dx, dw, None, None, None)\n elif gate_grad:\n dw, dg = blocksparse_matmul_dg(dw, w, gate[0])\n else:\n dg = None\n\n return (dx, dw, None, None, None, dg)\n\n\n@ops.RegisterGradient(\"L2NormalizeCK\")\ndef blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):\n\n epsilon = op.get_attr(\"epsilon\")\n K = op.get_attr(\"K\")\n shared = op.get_attr(\"shared\")\n bsize = op.get_attr(\"bsize\")\n grad_x = l2_normalize_grad_ck(dy, op.inputs[0], op.outputs[1], op.inputs[1], epsilon=epsilon, K=K, shared=shared, bsize=bsize)\n\n return (grad_x, None)\n\n@ops.RegisterGradient(\"L2NormalizeGainCK\")\ndef blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):\n\n epsilon = op.get_attr(\"epsilon\")\n K = op.get_attr(\"K\")\n shared = op.get_attr(\"shared\")\n bsize = op.get_attr(\"bsize\")\n grad_x, grad_g = l2_normalize_gain_grad_ck(\n dy, op.inputs[0], op.inputs[1], op.outputs[1], op.inputs[2], epsilon=epsilon, K=K, shared=shared, bsize=bsize)\n\n return (grad_x, grad_g, None)\n\n\n# Utils for graph re-writing\n\ndef block_reduced_full_dw(param_grad, scale=1.0, norm=\"max\", group_size=8):\n\n # max(abs()) or l2_norm()\n norm = 0 if norm.lower() == \"max\" else 1\n # host side scalar, if zero will cause compute for this op to be skipped.\n scale = scalar_constant(scale, dtype=tf.float32)\n\n assert group_size <= 8\n\n # backward walk param grad to find BlocksparseMatmulDW ops\n # this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts\n ops = get_parents(param_grad, \"BlocksparseMatmulDW\")\n if len(ops) < 1:\n raise ValueError(\"BlocksparseMatmulDW op not found\")\n\n # this sorting is dependent on the op names being correctly ordered.\n ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)\n\n # use the parent scope for the new ops\n scope = ops[-1].name.split('/')\n scope = '/'.join(scope[0:-1])\n\n # we're going to be using absolute names, so clear name_scope\n with tf.name_scope(None):\n dw_full = None\n offset = 0\n while offset < len(ops):\n\n xs = [op.inputs[0] for op in ops[offset:offset+group_size] ]\n gs = [op.inputs[1] for op in ops[offset:offset+group_size] ]\n\n # Get the corresponding activation grad op for the last param grad op in the group\n bprop = None\n for consumer in gs[-1].consumers():\n if consumer.type == \"BlocksparseMatmulDX\":\n bprop = consumer\n break\n assert bprop is not None\n\n # get attributes of first op in group\n up = ops[offset]\n bsize = up.get_attr(\"bsize\")\n axis = up.get_attr(\"axis\")\n name = \"%s/block_reduced_full_dw_%03d\" % (scope, offset)\n dw_full = [] if dw_full is None else [dw_full]\n\n dw_full, _, _ = blocksparse_reduced_dw(xs, gs, scale, dw_full, bsize=bsize, norm=norm, axis=axis, name=name)\n\n # force the dw op before any more time steps are processed\n bprop._add_control_input(dw_full.op)\n\n offset += group_size\n\n return dw_full\n\n\ndef group_param_grads(param_grad, group_size=8):\n\n assert group_size <= 8\n\n # backward walk param grad to find BlocksparseMatmulDW ops\n # this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts\n ops = get_parents(param_grad, \"BlocksparseMatmulDW\")\n\n if len(ops) <= 1:\n return param_grad\n\n # this sorting is dependent on the op names being correctly ordered.\n ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)\n # for x in ops:\n # print(x.name)\n # print(\"\")\n # exit()\n segment_size = len(ops)\n if ops[0].get_attr(\"gate_grad\") and len(ops[0].inputs) == 4:\n gate_count = dict()\n max_count = 0\n for op in ops:\n gate = op.inputs[3]\n count = gate_count.get(gate, 0) + 1\n gate_count[gate] = count\n max_count = max(max_count, count)\n for count in gate_count.values():\n if count != max_count:\n raise ValueError(\"Non-uniform gate broadcasting detected.\")\n segment_size = max_count\n if group_size > segment_size:\n group_size = segment_size\n else:\n assert segment_size % group_size == 0\n # nothing to rewrite here.\n if segment_size == 1:\n return param_grad\n\n # use the parent scope for the new ops\n scope = ops[-1].name.split('/')\n scope = '/'.join(scope[0:-1])\n\n # we're going to be using absolute names, so clear name_scope\n with tf.name_scope(None):\n dw = None\n dws = list()\n offset = 0\n seg_cnt = 0\n while offset < len(ops):\n\n xs = [op.inputs[0] for op in ops[offset:offset+group_size] ]\n gs = [op.inputs[1] for op in ops[offset:offset+group_size] ]\n\n # Get the corresponding activation grad op for the last param grad op in the group\n bprop = None\n for consumer in gs[-1].consumers():\n if consumer.type == \"BlocksparseMatmulDX\":\n bprop = consumer\n break\n assert bprop is not None\n\n # get attributes of first op in group\n up = ops[offset]\n blocks = up.get_attr(\"blocks\")\n bsize = up.get_attr(\"bsize\")\n axis = up.get_attr(\"axis\")\n gated_dw = up.get_attr(\"gated_dw\")\n gate_grad = up.get_attr(\"gate_grad\")\n C = up.get_attr(\"C\")\n K = up.get_attr(\"K\")\n bench = up.get_attr(\"bench\") // len(xs)\n lut = up.inputs[2]\n name = \"%s/matmul_concat_updat_%03d\" % (scope, offset)\n gate = [up.inputs[3]] if len(up.inputs) > 3 else []\n\n # The first op needs to allocate a new dw tensor\n if dw is None:\n dw = blocksparse_matmul_dw(\n xs, gs, lut, gate, gated_dw=gated_dw,\n gate_grad=gate_grad, blocks=blocks, bsize=bsize, axis=axis,\n C=C, K=K, bench=bench, name=name)\n # subsequent ops can just accumulate in place\n else:\n dw = blocksparse_matmul_dwa(\n xs, gs, lut, dw, gate, gated_dw=gated_dw,\n gate_grad=gate_grad, blocks=blocks, bsize=bsize, axis=axis,\n C=C, K=K, bench=bench, name=name)\n\n # force the dw op before any more time steps are processed\n bprop._add_control_input(dw.op)\n\n seg_cnt += group_size\n offset += group_size\n\n if gate_grad and seg_cnt >= segment_size:\n seg_cnt = 0\n dws.append(dw)\n dw = None\n\n if gate_grad:\n for i, dw in enumerate(dws):\n # for op in ops[i*group_size:(i+1)*group_size]:\n # print(op.name)\n # print()\n dw_op = ops[i*segment_size:(i+1)*segment_size][-1]\n dws[i] = group_dg_grads(dw_op, dw, scope)\n\n # add up final dw values in groups of 4 for good mix of perforamnce and memory use\n dw = ew.add_n8_op(dws[0:4]) if len(dws) > 1 else dws[0]\n for i in range(4, len(dws), 4):\n dw = ew.add_n8_op(dws[i:i+4] + [dw])\n\n # splice in these grad op types sitting on top of the param\n if param_grad.op.type in (\"Cast\", \"FloatCast\", \"L2NormalizeGradCK\", \"L2NormalizeGainGradCK\"):\n param_grad.op._update_input(0, dw)\n dw = param_grad\n elif param_grad.op.type not in (\"AddN\", \"AddN8\", \"BlocksparseMatmulDW\",\"BlocksparseMatmulDG\"):\n raise ValueError(\"Unexpected grad op type:\", param_grad.op.type, param_grad.op.name)\n\n return dw\n\ndef group_dg_grads(bsmm_dw_op, dw, scope):\n\n # splice the dg + addn ops out of the graph and replace with a single dg op\n # that takes in the final accumulated dw value\n dg_op = bsmm_dw_op.outputs[0].consumers()[0]\n assert dg_op.type == \"BlocksparseMatmulDG\"\n dw, dg = blocksparse_matmul_dg(dw, *dg_op.inputs[1:], name=f\"{scope}/BlocksparseMatmulDG\")\n\n # splice old add_n op out of graph\n addn_op = dg_op.outputs[1].consumers()[0]\n addn_ops = list()\n addn_ops.append(addn_op)\n if addn_op.type[0:3] != \"Add\":\n raise ValueError(f\"bad type: {addn_ops[0].type} Cause: this segment does not share a broadcasted gate.\")\n elif addn_op.type == \"AddN8\":\n while True:\n addn_op = addn_op.outputs[0].consumers()[0]\n if addn_op.type == \"AddN8\":\n addn_ops.append(addn_op)\n else:\n break\n\n # print(addn_op.name)\n # for i in addn_op.inputs:\n # print(i.name)\n # print()\n addn = addn_ops[-1].outputs[0]\n dg_consumers = addn.consumers()\n #for op in dg_consumers:\n\n assert len(dg_consumers) > 0, \"raw dg grad not supported\"\n #print(addn.name)\n for dg_consumer in dg_consumers:\n found = False\n #print(dg_consumer.name)\n for i, t in enumerate(dg_consumer.inputs):\n #print(i, t.name)\n if t is addn:\n #print(f\"splicing dg into: {dg_consumer.name} at {i}\")\n dg_consumer._update_input(i, dg)\n found = True\n break\n if not found:\n print(f\"splice failed for {dg_consumer.name}\")\n return dw\n\n\ndef get_bsmm_dx_ops(param_grad):\n\n dw_ops = get_parents(param_grad, \"BlocksparseMatmulDW\")\n dx_ops = list()\n\n # this sorting is dependent on the op names being correctly ordered.\n dw_ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)\n for dw_op in dw_ops:\n # Get the corresponding activation grad op\n dx_op = None\n for op in dw_op.inputs[1].consumers():\n if op.type==\"BlocksparseMatmulDX\":\n dx_op = op\n break\n assert dx_op is not None\n dx_ops.append(dx_op)\n return dx_ops\n\ndef get_parents(grad, op_type):\n if grad.op.type == op_type:\n return [grad.op]\n ops = list()\n wave = set([grad.op])\n while wave:\n new_wave = set()\n for op in wave:\n # print(op.name)\n # for i in op.inputs:\n # print(\" \", i.name)\n # print()\n for op in (t.op for t in op.inputs):\n if op.type == op_type:\n ops.append(op)\n else:\n new_wave.add(op)\n wave = new_wave\n return ops\n\ndef largest_block(dim):\n for blk in (32,16,8):\n if dim % blk == 0:\n return (blk, dim // blk)\n raise ValueError(\"dimension not multiple of 8, 16, or 32\")\n\n############################## Sparse Projection Ops #####################################\n\ngather_scatter_op = _op_module.gather_scatter\nscatter_add_mul_op = _op_module.scatter_add_mul\nscatter_mul_grad_op = _op_module.scatter_mul_grad\n\nOP_GAT = 0\nOP_SCT = 1\nOP_ADD = 2\nOP_MUL = 3\n\nclass SparseProj(object):\n\n def __getstate__(self):\n return (self.nhidden, self.nproj, self.gather_lut, self.name)\n\n def __setstate__(self, state):\n self.__init__(state[0], nproj=state[1], gather_lut=state[2], name=state[3])\n\n def __init__(self, nhidden, nproj=None, proj_stride=None, block_size=32, gather_lut=None, name=None):\n\n if gather_lut is None:\n\n gather_lut = np.arange(nhidden, dtype=np.int32)\n\n if nproj is not None:\n\n assert nproj <= nhidden\n np.random.shuffle(gather_lut)\n gather_lut = np.sort(gather_lut[0:nproj])\n\n elif proj_stride is not None:\n assert proj_stride <= nhidden\n\n # trim to multiple of block_size\n gather_max = ((nhidden // proj_stride) // block_size) * block_size * proj_stride\n gather_lut = gather_lut[:gather_max:proj_stride].copy()\n nproj = gather_lut.size\n else:\n raise ValueError(\"missing nproj, proj_stride or gather_lut\")\n\n if name is None:\n name = \"SparseProj\"\n\n # build reverse mapping\n scatter_lut = np.empty(nhidden, dtype=np.int32)\n scatter_lut[:] = -1\n scatter_lut[gather_lut] = np.arange(nproj, dtype=np.int32)\n\n self.name = name\n self.gather_lut = gather_lut\n self.scatter_lut = scatter_lut\n self.nhidden = nhidden\n self.nproj = nproj\n\n\n def gather(self, x):\n assert x.get_shape()[0].value == self.nhidden\n gather_lut = get_constant(self.gather_lut, name=\"gather\")\n scatter_lut = get_constant(self.scatter_lut, name=\"scatter\")\n return gather_scatter_op(x, gather_lut, scatter_lut, C=self.nhidden, K=self.nproj, op=OP_GAT)\n\n def scatter(self, x):\n assert x.get_shape()[0].value == self.nproj\n gather_lut = get_constant(self.gather_lut, name=\"gather\")\n scatter_lut = get_constant(self.scatter_lut, name=\"scatter\")\n return gather_scatter_op(x, scatter_lut, gather_lut, C=self.nproj, K=self.nhidden, op=OP_SCT)\n\n def scatter_add(self, x, y):\n assert x.get_shape()[0].value == self.nhidden\n assert y.get_shape()[0].value == self.nproj\n gather_lut = get_constant(self.gather_lut, name=\"gather\")\n scatter_lut = get_constant(self.scatter_lut, name=\"scatter\")\n return scatter_add_mul_op(x, y, gather_lut, scatter_lut, C=self.nproj, K=self.nhidden, op=OP_ADD)\n\n def scatter_mul(self, x, y):\n assert x.get_shape()[0].value == self.nhidden\n assert y.get_shape()[0].value == self.nproj\n gather_lut = get_constant(self.gather_lut, name=\"gather\")\n scatter_lut = get_constant(self.scatter_lut, name=\"scatter\")\n return scatter_add_mul_op(x, y, gather_lut, scatter_lut, C=self.nproj, K=self.nhidden, op=OP_MUL)\n\n\n@ops.RegisterGradient(\"GatherScatter\")\ndef gather_scatter_grad(op, dy):\n dx = gather_scatter_op(dy, op.inputs[2], op.inputs[1], C=op.get_attr(\"K\"), K=op.get_attr(\"C\"), op=1-op.get_attr(\"op\"))\n return dx, None, None\n\n@ops.RegisterGradient(\"ScatterAddMul\")\ndef scatter_add_mul_grad(op, dz):\n\n if op.get_attr(\"op\") == OP_ADD:\n dx = dz\n dy = gather_scatter_op(dz, op.inputs[2], op.inputs[3], C=op.get_attr(\"K\"), K=op.get_attr(\"C\"), op=OP_GAT)\n else:\n dx, dy = scatter_mul_grad_op(dz, *op.inputs[0:3], C=op.get_attr(\"C\"), K=op.get_attr(\"K\"))\n\n return dx, dy, None, None\n\n\n# REGISTER_OP(\"GatherScatter\")\n# .Input(\"x: T\")\n# .Input(\"gather: int32\")\n# .Input(\"scatter: int32\")\n# .Output(\"y: T\")\n# .Attr(\"T: {half, float, bfloat16}\")\n# .Attr(\"C: int\")\n# .Attr(\"K: int\")\n# .Attr(\"op: int\")\n\n# REGISTER_OP(\"ScatterAddMul\")\n# .Input(\"x: T\")\n# .Input(\"y: T\")\n# .Input(\"gather: int32\")\n# .Input(\"scatter: int32\")\n# .Output(\"z: T\")\n# .Attr(\"T: {half, float, bfloat16}\")\n# .Attr(\"C: int\")\n# .Attr(\"K: int\")\n# .Attr(\"op: int\")\n\n# REGISTER_OP(\"ScatterMulGrad\")\n# .Input(\"dz: T\")\n# .Input(\"x: T\")\n# .Input(\"y: T\")\n# .Input(\"gather: int32\")\n# .Output(\"dx: T\")\n# .Output(\"dy: T\")\n# .Attr(\"T: {half, float, bfloat16}\")\n# .Attr(\"C: int\")\n# .Attr(\"K: int\")\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.get_default_graph",
"numpy.square",
"numpy.linalg.svd",
"scipy.sparse.find",
"numpy.arange",
"tensorflow.name_scope",
"numpy.zeros",
"numpy.nonzero",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.sum",
"tensorflow.constant",
"numpy.maximum",
"numpy.random.shuffle",
"numpy.ones",
"numpy.sort",
"numpy.random.normal",
"numpy.empty"
]
] |
mattwigway/DiscreteChoiceModels.jl
|
[
"82cc136dcd7dd9cbeb2d5b83c19fe08b0f5dcdd1"
] |
[
"benchmark/foreign/python/benchmarkable.py"
] |
[
"import timeit\nimport numpy as np\nimport multiprocessing\nimport tempfile\nimport os\n\n\"\"\"\nThis class represents something that can be benchmarked. Subclasses should\noverride the setup() and measurable() methods. Only the time taken by measurable() will be benchmarked.\n\"\"\"\n\n\nclass Benchmarkable(multiprocessing.Process):\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n\n def run(self):\n # biogeme writes reams of output - hide that\n with tempfile.TemporaryDirectory() as tmpdir:\n os.chdir(tmpdir)\n self.setup()\n extime = timeit.timeit(\"self.measurable()\", number=1, globals={\"self\": self})\n self.queue.put(extime)\n\n def setup(self):\n pass\n\n def measurable(self):\n raise NotImplementedError(\"Override measurable() in subclass\")\n\n @classmethod\n def benchmark(object, number=100, func=np.median):\n times = np.full(number, np.nan, \"float64\")\n # do executions sequentially so they don't interfere with each other\n for i in range(number):\n q = multiprocessing.Queue()\n # benchmarkable extends multiprocessing.Process\n p = object(q)\n p.start()\n p.join()\n times[i] = q.get()\n\n return func(times)\n\n def runonce(self):\n self.setup()\n return self.measurable()\n"
] |
[
[
"numpy.full"
]
] |
mhaseeb123/hicops
|
[
"e40ab4e4e737cdb7f5921e1743c95bab7e3e3e27"
] |
[
"tools/ms2prep/mspartition.py"
] |
[
"#\n# imports\n#\n\nimport os\nimport sys\nimport glob\nimport copy\nimport numpy as np\nimport argparse\n\n#\n# main function\n#\n\nif __name__ == '__main__':\n\n # initialize arg parser\n parser = argparse.ArgumentParser(description='Split/merge the MS/MS data set into/from partitions')\n\n # data root directory\n parser.add_argument('-i', '--idir', dest='inpath', type=str, required=True,\n help='Path to data set files')\n\n # data file extensions\n parser.add_argument('-e', '--ext', dest='extension', type=str, required=True,\n help='Data set file extension e.g. ms2, mzML, mzXML, mgf')\n\n # number of nodes/partitions\n parser.add_argument('-N', dest='nodes', type=int, required=True,\n help='Number of partitions: >= 2 if -m not set, else anything')\n\n # merge operation?\n parser.add_argument('-m', '--merge', dest='merge', action='store_true',\n help='Merge all existing partitions: -N <anything is fine>, default: False')\n\n # parse arguments\n args = parser.parse_args()\n\n # input path to the MS2 data directory\n inpath = args.inpath.lstrip(' ')\n inpath = os.path.expanduser(inpath)\n\n # check if directory exists\n if not os.path.isdir(inpath):\n print ('ERROR: Directory does not exist\\n')\n sys.exit (-1)\n\n # get file extension\n extension = args.extension.lstrip(' ')\n\n # extract extra fields from spectra\n merge = args.merge\n\n # if merge operation\n if (merge == True):\n \n # get all files in part_*/*.ext\n files = np.array(glob.glob(inpath + '/part_*/*.' + extension))\n \n # move data parts back to the root\n for file in files:\n os.rename(file, inpath + '/' + os.path.split(file)[1])\n \n # scatter operation\n else:\n # get number of nodes\n nodes = args.nodes\n \n # check for number of nodes\n if nodes < 2:\n print ('ERROR: N >= 2 required')\n sys.exit(-1)\n \n # get all files with *.ext\n files = np.array(glob.glob(inpath + '/*.' + extension))\n\n # check if files found\n if len(files) < 1:\n print ('ERROR: No ' + extension + ' files found\\n')\n sys.exit(-1)\n\n # check number of nodes and files\n if nodes > len(files):\n print ('WARNING: # partitions > # files')\n print ('Some partitions may be empty\\n')\n \n # shuffle the files\n np.random.shuffle(files)\n \n # split files in 'nodes' partitions\n splits = np.array_split(files, nodes)\n \n # create new directories\n for i in range(nodes):\n os.makedirs(inpath + '/part_' + str(i+1), exist_ok=True)\n\n # move data to parts\n for file in splits[i]:\n os.rename(file, inpath + '/part_' + str(i+1) + '/' + os.path.split(file)[1])\n\n # print final status\n print ('DONE\\n')"
] |
[
[
"numpy.array_split",
"numpy.random.shuffle"
]
] |
azuki-miho/RFCR
|
[
"039733f25818d0d3db6af8c9e00e7ad989b69ee1",
"039733f25818d0d3db6af8c9e00e7ad989b69ee1"
] |
[
"KPConv_deform_ScanNet/visualize_scannet.py",
"KPConv_deform_ScanNet/models/KPFCNN_model.py"
] |
[
"\"\"\"\nAuthor: Jingyu Gong\nDate: May 2021\n\"\"\"\n\nimport numpy as np\nimport plyfile\nfrom plyfile import PlyData\nimport os\nimport glob\nfrom utils.ply import write_ply\n\n#result_path = \"results/Log_2020-08-26_08-38-15_add_weight_697/val_preds_449/\"\nresult_path = \"test/Log_2020-10-05_09-36-55_regionce_701/val_predictions/\"\nmesh_path = \"Data/Scannet/training_meshes/\"\nsave_path = result_path + \"visualization/\"\nmethod = \"rfcr\"\n\n# color palette for nyu40 labels\ndef create_color_palette():\n return [\n (0, 0, 0),\n (174, 199, 232),\t\t# wall\n (152, 223, 138),\t\t# floor\n (31, 119, 180), \t\t# cabinet\n (255, 187, 120),\t\t# bed\n (188, 189, 34), \t\t# chair\n (140, 86, 75), \t\t# sofa\n (255, 152, 150),\t\t# table\n (214, 39, 40), \t\t# door\n (197, 176, 213),\t\t# window\n (148, 103, 189),\t\t# bookshelf\n (196, 156, 148),\t\t# picture\n (23, 190, 207), \t\t# counter\n (178, 76, 76), \n (247, 182, 210),\t\t# desk\n (66, 188, 102), \n (219, 219, 141),\t\t# curtain\n (140, 57, 197), \n (202, 185, 52), \n (51, 176, 203), \n (200, 54, 131), \n (92, 193, 61), \n (78, 71, 183), \n (172, 114, 82), \n (255, 127, 14), \t\t# refrigerator\n (91, 163, 138), \n (153, 98, 156), \n (140, 153, 101),\n (158, 218, 229),\t\t# shower curtain\n (100, 125, 154),\n (178, 127, 135),\n (120, 185, 128),\n (146, 111, 194),\n (44, 160, 44), \t\t# toilet\n (112, 128, 144),\t\t# sink\n (96, 207, 209), \n (227, 119, 194),\t\t# bathtub\n (213, 92, 176), \n (94, 106, 211), \n (82, 84, 163), \t\t# otherfurn\n (100, 85, 144)\n ]\n\nif not os.path.isdir(save_path):\n os.mkdir(save_path)\n\nfor pc in glob.glob(result_path + \"*.ply\"):\n print(pc)\n pc_data = PlyData.read(pc)\n pc_data = pc_data.elements[0].data\n points = np.array([pc_data['x'], pc_data['y'], pc_data['z']]).T\n preds = np.array(pc_data['preds'])\n if result_path[-2] == 's':\n labels = np.array(pc_data['gt'])\n else:\n labels = np.array(pc_data['class'])\n pred_colors = np.zeros(points.shape).astype(np.uint8)\n label_colors = np.zeros(points.shape).astype(np.uint8)\n color_palette = create_color_palette()\n for idx, color in enumerate(color_palette):\n pred_colors[preds==idx] = color\n label_colors[labels==idx] = color\n pred_colors[labels==0] = color_palette[0]\n pred_output_file = save_path + pc.split(\"/\")[-1][:-4] + \"_\" + method + \".ply\"\n gt_output_file = save_path + pc.split(\"/\")[-1][:-4] + \"_\" + \"gt\" + \".ply\"\n color_file = save_path + pc.split(\"/\")[-1][:-4] + \"_\" + \"pc\" + \".ply\"\n write_ply(pred_output_file, [points, pred_colors, preds], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])\n write_ply(gt_output_file, [points, label_colors, labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])\n #original pc\n pc_data = PlyData.read(mesh_path + pc.split(\"/\")[-1][:-4] + \"_mesh.ply\")\n pc_data = pc_data.elements[0].data\n points = np.array([pc_data['x'], pc_data['y'], pc_data['z']]).T\n colors = np.array([pc_data['red'], pc_data['green'], pc_data['blue']]).T\n labels = np.array(pc_data['class'])\n write_ply(color_file, [points, colors, labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])\n\n\n",
"\"\"\"\nModified from KPConv: https://github.com/HuguesTHOMAS/KPConv\nAuthor: Jingyu Gong\nDate: May 2021\n\"\"\"\n\n# Basic libs\nfrom os import makedirs\nfrom os.path import exists\nimport time\nimport tensorflow as tf\nimport sys\n\n# Convolution functions\nfrom models.network_blocks import assemble_FCNN_blocks, segmentation_head, multi_segmentation_head\nfrom models.network_blocks import segmentation_loss, multi_segmentation_loss, h_post_loss\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Model Class\n# \\*****************/\n#\n\n\nclass KernelPointFCNN:\n\n def __init__(self, flat_inputs, config):\n \"\"\"\n Initiate the model\n :param flat_inputs: List of input tensors (flatten)\n :param config: configuration class\n \"\"\"\n\n # Model parameters\n self.config = config\n\n # Path of the result folder\n if self.config.saving:\n if self.config.saving_path == None:\n self.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())\n else:\n self.saving_path = self.config.saving_path\n if not exists(self.saving_path):\n makedirs(self.saving_path)\n\n ########\n # Inputs\n ########\n\n # Sort flatten inputs in a dictionary\n with tf.variable_scope('inputs'):\n self.inputs = dict()\n self.inputs['points'] = flat_inputs[:config.num_layers]\n self.inputs['neighbors'] = flat_inputs[config.num_layers:2 * config.num_layers]\n self.inputs['pools'] = flat_inputs[2 * config.num_layers:3 * config.num_layers]\n self.inputs['upsamples'] = flat_inputs[3 * config.num_layers:4 * config.num_layers]\n ind = 4 * config.num_layers\n self.inputs['features'] = flat_inputs[ind]\n ind += 1\n self.inputs['batch_weights'] = flat_inputs[ind]\n ind += 1\n self.inputs['in_batches'] = flat_inputs[ind]\n ind += 1\n self.inputs['out_batches'] = flat_inputs[ind]\n ind += 1\n self.inputs['point_labels'] = flat_inputs[ind]\n ind += 1\n self.labels = self.inputs['point_labels']\n self.onehot_labels = [tf.cast(tf.one_hot(self.labels, 21)[:,1:], tf.int32)]\n tmp_layer = 0\n for block in config.architecture:\n if 'strided' in block:\n tmp_onehot_label = tf.gather(self.onehot_labels[tmp_layer], self.inputs['pools'][tmp_layer])\n tmp_onehot_label = tf.reduce_max(tmp_onehot_label, axis=1)\n self.onehot_labels.append(tmp_onehot_label)\n tmp_layer += 1\n\n if config.network_model in ['multi_segmentation', 'multi_cloud_segmentation']:\n self.inputs['super_labels'] = flat_inputs[ind]\n ind += 1\n\n self.inputs['augment_scales'] = flat_inputs[ind]\n ind += 1\n self.inputs['augment_rotations'] = flat_inputs[ind]\n\n if config.network_model in [\"cloud_segmentation\", 'multi_cloud_segmentation']:\n ind += 1\n self.inputs['point_inds'] = flat_inputs[ind]\n ind += 1\n self.inputs['cloud_inds'] = flat_inputs[ind]\n\n elif config.network_model in ['multi_segmentation', 'segmentation']:\n ind += 1\n self.inputs['object_inds'] = flat_inputs[ind]\n\n # Dropout placeholder\n self.dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n\n ########\n # Layers\n ########\n\n # Create layers\n with tf.variable_scope('KernelPointNetwork'):\n output_features, supervised_features, se_loss = assemble_FCNN_blocks(self.inputs,\n self.config,\n self.dropout_prob)\n\n if config.network_model in [\"multi_segmentation\", 'multi_cloud_segmentation']:\n self.logits = multi_segmentation_head(output_features,\n self.inputs['super_labels'],\n self.config,\n self.dropout_prob)\n else:\n self.logits = segmentation_head(output_features,\n self.config,\n self.dropout_prob)\n\n h_post = h_post_loss(self.logits, self.inputs) * 0.1\n ########\n # Losses\n ########\n\n with tf.variable_scope('loss'):\n\n\n if config.network_model in [\"multi_segmentation\", 'multi_cloud_segmentation']:\n self.output_loss = multi_segmentation_loss(self.logits,\n self.inputs,\n batch_average=self.config.batch_averaged_loss)\n\n elif len(self.config.ignored_label_inds) > 0:\n\n # Boolean mask of points that should be ignored\n ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)\n for ign_label in self.config.ignored_label_inds:\n ignored_bool = tf.logical_or(ignored_bool, tf.equal(self.labels, ign_label))\n\n # Collect logits and labels that are not ignored\n inds = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))\n new_logits = tf.gather(self.logits, inds, axis=0)\n new_dict = {'point_labels': tf.gather(self.labels, inds, axis=0)}\n\n # Reduce label values in the range of logit shape\n reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)\n inserted_value = tf.zeros((1,), dtype=tf.int32)\n for ign_label in self.config.ignored_label_inds:\n reducing_list = tf.concat([reducing_list[:ign_label], inserted_value, reducing_list[ign_label:]], 0)\n new_dict['point_labels'] = tf.gather(reducing_list, new_dict['point_labels'])\n\n # Add batch weigths to dict if needed\n if self.config.batch_averaged_loss:\n new_dict['batch_weights'] = self.inputs['batch_weights']\n\n # Output loss\n self.output_loss = segmentation_loss(new_logits,\n new_dict,\n batch_average=self.config.batch_averaged_loss)\n\n else:\n self.output_loss = segmentation_loss(self.logits,\n self.inputs,\n batch_average=self.config.batch_averaged_loss)\n\n # Add regularization\n supervision_layer_num = 0\n\n self.h_loss = tf.losses.sigmoid_cross_entropy(self.onehot_labels[4], supervised_features[0])\n supervision_layer_num += 1\n self.h_loss += tf.losses.sigmoid_cross_entropy(self.onehot_labels[3], supervised_features[1])\n supervision_layer_num += 1\n self.h_loss += tf.losses.sigmoid_cross_entropy(self.onehot_labels[2], supervised_features[2])\n supervision_layer_num += 1\n #self.h_loss += tf.losses.sigmoid_cross_entropy(self.onehot_labels[1], supervised_features[3])\n #supervision_layer_num += 1\n self.h_loss /= supervision_layer_num\n # Add regularization\n self.loss = self.regularization_losses() + self.output_loss + h_post + self.h_loss + se_loss \n\n return\n\n def regularization_losses(self):\n\n #####################\n # Regularization loss\n #####################\n\n # Get L2 norm of all weights\n regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]\n self.regularization_loss = self.config.weights_decay * tf.add_n(regularization_losses)\n\n ##############################\n # Gaussian regularization loss\n ##############################\n\n gaussian_losses = []\n for v in tf.global_variables():\n if 'kernel_extents' in v.name:\n\n # Layer index\n layer = int(v.name.split('/')[1].split('_')[-1])\n\n # Radius of convolution for this layer\n conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** (layer - 1))\n\n # Target extent\n target_extent = conv_radius / 1.5\n gaussian_losses += [tf.nn.l2_loss(v - target_extent)]\n\n if len(gaussian_losses) > 0:\n self.gaussian_loss = self.config.gaussian_decay * tf.add_n(gaussian_losses)\n else:\n self.gaussian_loss = tf.constant(0, dtype=tf.float32)\n\n #############################\n # Offsets regularization loss\n #############################\n\n offset_losses = []\n\n if self.config.offsets_loss == 'permissive':\n\n for op in tf.get_default_graph().get_operations():\n if op.name.endswith('deformed_KP'):\n\n # Get deformed positions\n deformed_positions = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** layer)\n\n # Normalized KP locations\n KP_locs = deformed_positions/conv_radius\n\n # Loss will be zeros inside radius and linear outside radius\n # Mean => loss independent from the number of input points\n radius_outside = tf.maximum(0.0, tf.norm(KP_locs, axis=2) - 1.0)\n offset_losses += [tf.reduce_mean(radius_outside)]\n\n\n elif self.config.offsets_loss == 'fitting':\n\n for op in tf.get_default_graph().get_operations():\n\n if op.name.endswith('deformed_d2'):\n\n # Get deformed distances\n deformed_d2 = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)\n\n # Get the distance to closest input point\n KP_min_d2 = tf.reduce_min(deformed_d2, axis=1)\n\n # Normalize KP locations to be independant from layers\n KP_min_d2 = KP_min_d2 / (KP_extent**2)\n\n # Loss will be the square distance to closest input point.\n # Mean => loss independent from the number of input points\n offset_losses += [tf.reduce_mean(KP_min_d2)]\n\n if op.name.endswith('deformed_KP'):\n\n # Get deformed positions\n deformed_KP = op.outputs[0]\n\n # Layer index\n layer = int(op.name.split('/')[1].split('_')[-1])\n\n # Radius of deformed convolution for this layer\n KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)\n\n # Normalized KP locations\n KP_locs = deformed_KP/KP_extent\n\n # Point should not be close to each other\n for i in range(self.config.num_kernel_points):\n other_KP = tf.stop_gradient(tf.concat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], axis=1))\n distances = tf.sqrt(tf.reduce_sum(tf.square(other_KP - KP_locs[:, i:i+1, :]), axis=2))\n repulsive_losses = tf.reduce_sum(tf.square(tf.maximum(0.0, 1.5 - distances)), axis=1)\n offset_losses += [tf.reduce_mean(repulsive_losses)]\n\n elif self.config.offsets_loss != 'none':\n raise ValueError('Unknown offset loss')\n\n if len(offset_losses) > 0:\n self.offsets_loss = self.config.offsets_decay * tf.add_n(offset_losses)\n else:\n self.offsets_loss = tf.constant(0, dtype=tf.float32)\n\n return self.offsets_loss + self.gaussian_loss + self.regularization_loss\n\n def parameters_log(self):\n\n self.config.save(self.saving_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.global_variables",
"tensorflow.equal",
"tensorflow.nn.l2_loss",
"tensorflow.get_default_graph",
"tensorflow.add_n",
"tensorflow.gather",
"tensorflow.square",
"tensorflow.logical_not",
"tensorflow.norm",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.losses.sigmoid_cross_entropy",
"tensorflow.reduce_min",
"tensorflow.variable_scope"
]
] |
soham1024/Handwritten-Digit-Recognition
|
[
"065557e94a46cac75d0439604f7dd93784b6cffe"
] |
[
"app.py"
] |
[
"from flask import Flask, render_template, request\nfrom scipy.misc import imsave,imread, imresize\nimport numpy as np\nimport keras.models\nimport re\nimport base64\n\nimport sys \nimport os\nsys.path.append(os.path.abspath(\"./model\"))\nfrom load import *\n\napp = Flask(__name__)\nglobal model, graph\nmodel, graph = init()\n \n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/predict/', methods=['GET','POST'])\ndef predict():\n # get data from drawing canvas and save as image\n parseImage(request.get_data())\n\n # read parsed image back in 8-bit, black and white mode (L)\n x = imread('output.png', mode='L')\n x = np.invert(x)\n x = imresize(x,(28,28))\n\n # reshape image data for use in neural network\n x = x.reshape(1,28,28,1)\n with graph.as_default():\n out = model.predict(x)\n print(out)\n print(np.argmax(out, axis=1))\n response = np.array_str(np.argmax(out, axis=1))\n return response \n \ndef parseImage(imgData):\n # parse canvas bytes and save as output.png\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))\n\nif __name__ == '__main__':\n app.debug = False\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='127.0.0.1', port=port)\n"
] |
[
[
"scipy.misc.imresize",
"numpy.argmax",
"scipy.misc.imread",
"numpy.invert"
]
] |
qyz96/Legion
|
[
"b12f0d1bd759788356385fbb6ae8b6321068e3b7"
] |
[
"bindings/python/examples/struct.py"
] |
[
"#!/usr/bin/env python\n\n# Copyright 2019 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport legion\nfrom legion import task, Future, Region, RW\nimport numpy\n\n# Define a custom struct type.\nlegion.ffi.cdef(r'''\ntypedef struct mystruct {\n int x;\n double y;\n int8_t z;\n} mystruct;\n''')\nmystruct_np = numpy.dtype([('x', numpy.intc), ('y', numpy.double), ('z', numpy.byte)], align=True)\nmystruct = legion.Type(mystruct_np, 'mystruct')\n\n@task\ndef main():\n myvalue_root = legion.ffi.new('mystruct *')\n myvalue = myvalue_root[0]\n myvalue.x = 123\n myvalue.y = 3.14\n myvalue.z = 65\n\n # Make a future with the custom struct type.\n g = Future(myvalue, mystruct)\n print(\"value of g.get() is %s\" % g.get())\n assert g.get().x == 123\n\n # Make a region with the custom struct type.\n R = Region([4], {'myvalue': mystruct})\n R.myvalue[0] = (123, 3.14, 65)\n print(R.myvalue[0])\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.dtype"
]
] |
linklab-uva/deepracing
|
[
"fc25c47658277df029e7399d295d97a75fe85216",
"fc25c47658277df029e7399d295d97a75fe85216"
] |
[
"deepracing_py/plot_frame_rate.py",
"DCNN-Pytorch/shape_testing.py"
] |
[
"import numpy as np\nimport numpy.linalg as la\nimport scipy\nimport scipy.stats\nimport skimage\nimport PIL\nfrom PIL import Image as PILImage\nimport TimestampedPacketMotionData_pb2\nimport PoseSequenceLabel_pb2\nimport TimestampedImage_pb2\nimport Vector3dStamped_pb2\nimport argparse\nimport os\nimport google.protobuf.json_format\nimport Pose3d_pb2\nimport cv2\nimport bisect\nimport FrameId_pb2\nimport scipy.interpolate\nimport deepracing.pose_utils\nfrom deepracing.pose_utils import getAllImageFilePackets, getAllMotionPackets\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef imageDataKey(data):\n return data.timestamp\nparser = argparse.ArgumentParser()\nparser.add_argument(\"image_path\", help=\"Path to image folder\", type=str)\nparser.add_argument(\"--json\", help=\"Assume dataset files are in JSON rather than binary .pb files.\", action=\"store_true\")\nargs = parser.parse_args()\nimage_folder = args.image_path\nimage_tags = deepracing.pose_utils.getAllImageFilePackets(image_folder, args.json)\nimage_tags = sorted(image_tags,key=imageDataKey)\nprint(image_tags)\nindices = np.array([float(i) for i in range(len(image_tags))])\ntimestamps = np.array([tag.timestamp/1000.0 for tag in image_tags])\nfig = plt.figure(\"Image Index vs OS Time\")\nplt.plot(timestamps, indices, label='indices versus timestamps')\nfig.legend()\nslope, intercept, r_value, p_value, std_err = scipy.stats.linregress(timestamps, indices)\nprint(\"Average framerate: %f\" %(slope))\n#plt.plot( timestamps, slope*timestamps + intercept, label='fitted line' )\nplt.show()\n\n",
"import torch\nimport deepracing_models.nn_models.Models as M\nimport time\n#net = M.AdmiralNetKinematicPredictor(use_3dconv=False, sequence_length=20, context_length=5)\nnet = M.AdmiralNetCurvePredictor(use_3dconv=True, context_length=5, params_per_dimension=6)\nnet = net.cuda(0)\nim = torch.rand(64,5,3,66,200)\nim = im.cuda(0)\nnet=net.eval()\nprint(net)\nprint(\"Running net\")\ntick = time.time()\nout = net(im)\ntock = time.time()\nprint(out.shape)\nprint(\"Got prediction in %f seconds\"%(tock-tick))"
] |
[
[
"matplotlib.pyplot.plot",
"scipy.stats.linregress",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"torch.rand"
]
] |
ZhaozhiQIAN/neurawkes
|
[
"1a3caa837b34f77ac9d078bc9bf10ff10a3bf959"
] |
[
"test_models_and_save.py"
] |
[
"# -*- coding: utf-8 -*-\n# !/usr/bin/python\n\"\"\"\nCreated on Mar 18th 10:58:37 2016\n\ntest a continuous-time sequential model\n\n@author: hongyuan\n\"\"\"\n\nimport pickle\nimport time\nimport numpy\nimport theano\nfrom theano import sandbox\nimport theano.tensor as tensor\nimport os\nimport sys\nfrom collections import defaultdict\nfrom theano.tensor.shared_randomstreams import RandomStreams\nimport modules.utils as utils\nimport modules.models as models\nimport modules.optimizers as optimizers\nimport modules.controllers as controllers\nimport modules.data_processers as data_processers\n\nimport run_models\nimport datetime\n\ndtype=theano.config.floatX\n\n\n#\nimport argparse\n__author__ = 'Hongyuan Mei'\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Testing model ... '\n )\n #\n parser.add_argument(\n '-m', '--Model', required=True,\n choices = ['hawkes', 'hawkesinhib', 'conttime'],\n help='Which model to test? hawkes (SE-MPP)? hawkesinhib (D-SM-MPP)? conttime (N-SM-MPP)?'\n )\n parser.add_argument(\n '-fd', '--FileData', required=True,\n help='Path of the dataset (e.g. ./data/data_hawkes/)'\n )\n parser.add_argument(\n '-fp', '--FilePretrain', required=True,\n help='File of pretrained model (e.g. ./tracks/track_PID=XX_TIME=YY/model.pkl)'\n )\n parser.add_argument(\n '-ts', '--TagSplit', required=True,\n choices = ['dev', 'test', 'test1'],\n help='Which split to test? Dev or Test?'\n )\n #\n parser.add_argument(\n '-s', '--Seed', #required=False,\n default = 12345, type = int,\n help='Seed of random state'\n )\n parser.add_argument(\n '-md', '--MultipleDev', #required=False,\n default = 10, type = int,\n help='Multiple of events to sample (integral) for dev/test'\n )\n parser.add_argument(\n '-sl', '--SaveLog', #required=False,\n default = 1, type = int,\n choices = [0,1],\n help='Do you want to save the log ? 0--False, 1--True'\n )\n parser.add_argument(\n '-pp', '--PartialPredict', #required=False,\n default = 0, type = int,\n choices = [0,1],\n help='What to only predict part of stream ? 0--False, 1--True'\n )\n parser.add_argument(\n '-ps', '--PruneStream', #required=False,\n default = 0, type = int,\n help='Prune stream? Give me the index ! 0 is nothng to prune. Note : index specifies a COMBINATION of event types by its binary coding (e.g. 0--00000, 1--00001, 31-11111 where 1 means this type is pruned)!'\n )\n parser.add_argument(\n '-pf', '--PredictFirst', #required=False,\n default = 1, type = int,\n choices = [0,1],\n help='Predict the first event ? 0--False, 1--True Note: in our project, this is False ONLY on MIMIC, SO and Financial datasets'\n )\n #\n #TODO: related to intensity eval\n parser.add_argument(\n '-pl', '--PredictLambda', #required=False,\n default = 0, type = int,\n choices = [0,1],\n help='Predict Lambda (intensity) ? 0--False, 1--True Note: this is used ONLY in intensity evaluation'\n )\n parser.add_argument(\n '-fg', '--FileGold', required=False,\n help='Gold model to eval intensity ? (e.g. ./gold_models/model.pkl)'\n )\n parser.add_argument(\n '-mg', '--ModelGold', required=False,\n choices = ['hawkes', 'hawkesinhib', 'conttime'],\n help='Gold Model to be used '\n )\n #\n args = parser.parse_args()\n #\n #if args.TrackPeriod == None:\n # args.TrackPeriod = numpy.int32(100)\n #else:\n # args.TrackPeriod = numpy.int32(args.TrackPeriod)\n #\n #\n args.Seed = numpy.int32(args.Seed)\n args.MultipleDev = numpy.int32(args.MultipleDev)\n if args.SaveLog == 0:\n args.SaveLog = False\n else:\n args.SaveLog = True\n #\n if args.PartialPredict == 0:\n args.PartialPredict = False\n else:\n args.PartialPredict = True\n #\n args.PruneStream = numpy.int32(args.PruneStream)\n #\n if args.PredictFirst == 0:\n args.PredictFirst = False\n else:\n args.PredictFirst = True\n #\n # For intensity eval\n #\n if args.PredictLambda == 0:\n args.PredictLambda = False\n else:\n args.PredictLambda = True\n #\n if args.FileGold == None:\n assert(args.PredictLambda==False)\n else:\n args.FileGold = str(args.FileGold)\n if args.ModelGold == None:\n assert(args.PredictLambda==False)\n else:\n args.ModelGold = str(args.ModelGold)\n #\n #\n id_process = os.getpid()\n time_current = datetime.datetime.now().isoformat()\n #\n ## show values ##\n print((\"PID is : %s\" % str(id_process) ))\n print((\"TIME is : %s\" % time_current ))\n print((\"Model is : %s\" % args.Model ))\n #print((\"CoefL2 is : %s\" % str(args.CoefL2) ))\n print((\"FileData is : %s\" % args.FileData ))\n #if 'lstm' in args.Model:\n # print((\"DimLSTM is : %s\" % str(args.DimLSTM) ))\n print((\"Seed is : %s\" % str(args.Seed) ))\n print((\"FilePretrain is : %s\" % args.FilePretrain))\n #print((\"TrackPeriod is : %s\" % str(args.TrackPeriod) ))\n #print((\"MaxEpoch is : %s\" % str(args.MaxEpoch) ))\n print((\"SizeBatch is : %s\" % str(1) ))\n print((\"PartialPredict is : %s\" % args.PartialPredict))\n print((\"PruneStream is : %s\" % str(args.PruneStream) ))\n print((\"PredictFirst is: %s\" % args.PredictFirst ))\n print((\"PredictLambda is : %s\" % str(args.PredictLambda) ))\n print((\"ModelGold is : %s\" % args.ModelGold ))\n print((\"FileGold is : %s\" % args.FileGold ))\n #print((\"Optimizer is : %s\" % args.Optimizer))\n flag_show_1 = (\n args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'\n )\n flag_show_2 = (\n args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'\n )\n #\n if (flag_show_1 and flag_show_2):\n print((\"Multiple for testing is : %s\" % args.MultipleDev))\n #\n #\n dict_args = {\n 'PID': id_process,\n 'TIME': time_current,\n 'Model': args.Model,\n #'CoefL2': args.CoefL2,\n 'FileData': args.FileData,\n #'DimLSTM': args.DimLSTM,\n 'Seed': args.Seed,\n 'FilePretrain': args.FilePretrain,\n #'TrackPeriod': args.TrackPeriod,\n #'MaxEpoch': args.MaxEpoch,\n 'SizeBatch': numpy.int32(1),\n #'ConfidenceLevel': args.ConfidenceLevel,\n #'NumBootstrap': args.NumBootstrap,\n #'Optimizer': args.Optimizer,\n #'MultipleTrain': args.MultipleTrain,\n 'MultipleDev': args.MultipleDev,\n 'PartialPredict': args.PartialPredict,\n 'PruneStream': args.PruneStream,\n 'PredictLambda': args.PredictLambda,\n 'ModelGold': args.ModelGold,\n 'FileGold': args.FileGold\n }\n #\n #some unuseful arguments are set to dummy values here\n #like optimizer, max_epoch, size_batch\n input_test = {\n 'model': args.Model,\n 'seed_random': args.Seed,\n 'path_rawdata': args.FileData,\n 'path_pre_train': args.FilePretrain,\n 'track_period': None,\n 'max_epoch': numpy.int32(1),\n 'size_batch': numpy.int32(1),\n #'dim_model': args.DimLSTM,\n 'optimizer': 'adam',\n 'save_file_path': None,\n 'log_file': None,\n 'tag_split': args.TagSplit,\n 'args': dict_args,\n 'coef_l2': numpy.float32(0.0),\n #'cl': args.ConfidenceLevel,\n #'num_bootstrap': args.NumBootstrap,\n 'loss_type': 'loglikehood',\n 'partial_predict': args.PartialPredict,\n 'prune_stream': args.PruneStream,\n 'predict_lambda': args.PredictLambda,\n 'path_gold': args.FileGold,\n 'model_gold': args.ModelGold\n }\n #\n if '_so' in args.FileData or '_mimic' in args.FileData or '_bookorder' in args.FileData:\n input_test['predict_first'] = False\n else:\n if args.PredictFirst:\n input_test['predict_first'] = True\n else:\n input_test['predict_first'] = False\n #\n #\n path_pre_train = input_test['path_pre_train']\n path_with_no_name = path_pre_train.replace(\n '/'+os.path.basename(path_pre_train), ''\n )\n tag_with_track = os.path.basename(path_with_no_name)\n tag_model = tag_with_track.replace(\n 'track', ''\n )\n #\n input_test['path_logs'] = path_with_no_name\n if args.SaveLog == False:\n input_test['path_logs'] = None\n # code drifts with time\n # when model is trained model,\n # tag_model is PID and TIME\n # but when model is gen_model\n # tag_model is just gen_model the string\n # so we will add _ to make the format match\n #if '_' not in tag_model:\n if tag_model[0] != '_':\n tag_model = '_' + tag_model\n #\n if '_hawkes' in input_test['path_rawdata']:\n tag_data = 'hawkes'\n if '_hawkesinhib' in input_test['path_rawdata']:\n tag_data = 'hawkesinhib'\n if '_neural' in input_test['path_rawdata']:\n tag_data = 'neural'\n if '_neuralgeneral' in input_test['path_rawdata']:\n tag_data = 'neuralgeneral'\n if '_neuraladapt' in input_test['path_rawdata']:\n tag_data = 'neuraladapt'\n if '_neuralsimple' in input_test['path_rawdata']:\n tag_data = 'neuralsimple'\n if 'data_kaggle' in input_test['path_rawdata']:\n tag_data = 'kaggle'\n if 'data_retweet' in input_test['path_rawdata']:\n tag_data = 'retweet'\n if 'data_food' in input_test['path_rawdata']:\n tag_data = 'food'\n if 'data_meme' in input_test['path_rawdata']:\n tag_data = 'meme'\n if 'data_mimic' in input_test['path_rawdata']:\n tag_data = 'mimic'\n #\n if 'data_so' in input_test['path_rawdata']:\n tag_data = 'so'\n if 'data_bookorder' in input_test['path_rawdata']:\n tag_data = 'bookorder'\n #\n if 'data_neuraladapttimescale' in input_test['path_rawdata']:\n tag_data = 'neuraladapttimescale'\n if 'data_neuralreduce' in input_test['path_rawdata']:\n tag_data = 'neuralreduce'\n if 'data_missing' in input_test['path_rawdata']:\n tag_data = 'missing'\n #\n if 'data_conttime' in input_test['path_rawdata']:\n tag_data = 'conttime'\n #\n name_model = 'results_Model='+input_test[\n 'args'\n ]['Model']+'_Data='+tag_data+'_Split='+input_test[\n 'tag_split'\n ]+tag_model+'.pkl'\n #\n path_to_save_results = os.path.abspath(\n './results_'+tag_data\n )\n if args.PredictLambda:\n path_to_save_results += '_predictlambda'\n file_to_save_results = path_to_save_results + '/' + name_model\n #\n if not os.path.exists(path_to_save_results):\n os.makedirs(path_to_save_results)\n #\n input_test['file_to_save_results'] = file_to_save_results\n #\n flag_multiple_1 = (\n args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'\n )\n flag_multiple_2 = (\n args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'\n )\n #\n if (flag_multiple_1 or flag_multiple_2):\n input_test['multiple_sample_for_train'] = numpy.int32(1)\n input_test['multiple_sample_for_dev'] = numpy.int32(\n args.MultipleDev\n )\n #\n #\n #\n # start running\n if args.PredictLambda:\n run_models.test_intensity_and_save(input_test)\n else:\n if args.Model == 'hawkes':\n run_models.test_hawkes_ctsm_and_save(\n input_test\n )\n elif args.Model == 'hawkesinhib' or args.Model == 'hawkesinhibscale':\n run_models.test_hawkesinhib_ctsm_and_save(\n input_test\n )\n #run_models.test_hawkesinhib_ctsm_confidence_interval(input_test)\n elif args.Model == 'neural':\n run_models.test_neural_hawkes_ctsm_and_save(\n input_test\n )\n #run_models.test_neural_hawkes_ctsm_confidence_interval(input_test)\n elif args.Model == 'neuralgeneral':\n run_models.test_generalized_neural_hawkes_ctsm_and_save(\n input_test, tag_neural_type = 'general'\n )\n elif args.Model == 'neuraladapt':\n run_models.test_generalized_neural_hawkes_ctsm_and_save(\n input_test, tag_neural_type = 'adaptive'\n )\n elif args.Model == 'neuralsimple':\n run_models.test_generalized_neural_hawkes_ctsm_and_save(\n input_test, tag_neural_type = 'simple'\n )\n elif args.Model == 'neuraltime':\n run_models.test_generalized_neural_hawkes_ctsm_and_save_time(\n input_test, tag_neural_type = 'neural'\n )\n elif args.Model == 'neuralgeneraltime':\n run_models.test_generalized_neural_hawkes_ctsm_and_save_time(\n input_test, tag_neural_type = 'general'\n )\n elif args.Model == 'neuraladapttime' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime':\n run_models.test_generalized_neural_hawkes_ctsm_and_save_time(\n input_test, tag_neural_type='adaptive'\n )\n #if args.PredictLambda:\n # run_models.test_generalized_neural_hawkes_intensity_and_save_time(\n # input_test, tag_neural_type='adaptive'\n # )\n #else:\n else:\n print(\"Model not implemented yet !!! \")\n #\n #\n\nif __name__ == \"__main__\": main()\n"
] |
[
[
"numpy.int32",
"numpy.float32"
]
] |
benmoseley/seismic-simulation-wavenet
|
[
"c2a136077bba29cab772b88c7c03b4911c9185c9"
] |
[
"models.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 12 16:57:30 2019\n\n@author: bmoseley\n\"\"\"\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom loss_functions import lp_mean_loss\nfrom tfutils import w, b\nfrom wavenet import Wavenet1D\nimport plot_utils\nimport processing_utils\n\n\nclass SeismicWavenet:\n def __init__(self, c, input_features, inverse=None, verbose=False):\n \"\"\"\n Initialize a SeismicWavenet model.\n \n inverse=True tries to learn seismic inversion\n inverse=False tries to learn forward modelling\n \n inverse takes default value from constants object.\n \n Input should follow NWC format:\n \n input_features = { \"velocity\": (num_batches, NZ, 1)\n \"reflectivity\":(num_batches, NSTEPS, 1)\n \"gather\": (num_batches, NSTEPS, NREC)\n }\n \n \"\"\"\n if inverse == None: inverse = c.INVERSE\n \n self.c = c# model hyperparameters\n self.input_features = input_features# dictionary of tensors used as input to the model (can be placeholders or tensors)\n self.inverse = inverse\n self.verbose = verbose\n\n\n def define_graph(self):\n \"\"\"\n Define model graph.\n \"\"\"\n \n if self.verbose: print(\"Defining graph...\")\n \n ##\n # DEFINE INPUT DATA\n ##\n \n if self.inverse:\n self.x = self.input_features[\"gather\"]\n self.y_true = self.input_features[\"reflectivity\"]\n else:\n self.x = self.input_features[\"reflectivity\"]\n self.y_true = self.input_features[\"gather\"]\n self.velocity = self.input_features[\"velocity\"]\n \n # INPUT/OUTPUT HAS SHAPE NWC\n self.x_shape = self.x.shape.as_list()\n self.y_true_shape = self.y_true.shape.as_list()\n \n ##\n # DEFINE VARIABLES\n ##\n \n # define weights for wavenet\n self.W = Wavenet1D(in_channels=self.x_shape[2],\n filter_width=2,\n num_blocks=self.c.NUM_WAVE_BLOCKS,\n num_layers=len(self.c.WAVE_RATES), \n hidden_channels=self.c.WAVE_HIDDEN_CHANNELS, \n rates=self.c.WAVE_RATES, \n activation=self.c.WAVE_ACTIVATION, \n biases=self.c.WAVE_BIASES, \n verbose=False)\n self.W.define_variables()\n \n # define weights for final convolutional layer\n self.CONV_KERNEL = [self.c.CONV_FILTER_LENGTH,self.c.WAVE_HIDDEN_CHANNELS,self.y_true_shape[2]]\n self.weights, self.biases = {}, {}\n with tf.name_scope('conv1d_params'):\n stddev = np.sqrt(1) / np.sqrt(np.prod(self.CONV_KERNEL[:2]))\n weights = w(self.CONV_KERNEL, mean=0., stddev=stddev, name=\"weights\")\n biases = b(self.CONV_KERNEL[2:], const=0.0, name=\"biases\")\n self.weights[\"conv1d\"] = weights\n self.biases[\"conv1d\"] = biases\n \n ##\n # DEFINE GRAPH\n ##\n \n def construct_layers(x):\n \n if self.verbose: \n print(\"y_true: \",self.y_true.shape)\n print(\"x: \",x.shape)\n \n if self.inverse: x = x[:,::-1,:]# FLIP DATA TO REMAIN CAUSAL\n \n # WAVENET\n x = self.W.define_graph(x)\n if self.verbose: print(\"wavenet: \",x.shape)\n \n # CONVOLUTION\n with tf.name_scope(\"conv1d\"):\n # causal convolution\n with tf.name_scope(\"pad_left\"):\n x = tf.pad(x, [[0, 0], [(self.CONV_KERNEL[0]-1), 0], [0, 0]])# pad appropriate zeros on input\n x = tf.nn.convolution(x, filter=self.weights[\"conv1d\"], strides=[1], padding=\"VALID\", data_format=\"NWC\")\n x = x + self.biases[\"conv1d\"]\n if self.verbose: print(\"conv1d: \",x.shape)\n \n if self.inverse: x = x[:,::-1,:]# FLIP DATA TO REMAIN CAUSAL\n \n return x\n \n ## initialise network\n self.y = construct_layers(self.x)\n \n assert self.y.shape.as_list() == self.y_true.shape.as_list()\n\n # print out number of weights\n self.num_weights = np.sum([self.weights[tensor].shape.num_elements() for tensor in self.weights])\n self.num_biases = np.sum([self.biases[tensor].shape.num_elements() for tensor in self.biases])\n self.total_num_trainable_params = self.num_weights+self.num_biases+self.W.num_weights+self.W.num_biases\n if self.verbose: print(self)\n \n # check no more trainable variables introduced\n assert self.total_num_trainable_params == np.sum([tensor.shape.num_elements() for tensor in tf.trainable_variables()])\n \n def define_loss(self):\n \"\"\"\n Define model loss and optimizer ops\n \"\"\"\n \n ##\n # DEFINE LOSS, OPTIMIZER, TRAIN OP\n ##\n if self.verbose: print(\"Defining loss, optimizer and train op...\")\n \n with tf.name_scope('loss'):\n \n # LP loss\n \n # define gain profile for forward loss\n if not self.inverse:\n gain = np.arange(0,self.c.REFLECTIVITY_SHAPE[0])**self.c.T_GAIN\n gain = gain / np.median(gain)# normalise gain profile\n gain = np.expand_dims(gain, -1)\n gain = np.pad(gain, [(0,0),(self.c.GATHER_SHAPE[1]-1,0)],mode='edge')\n gain = tf.constant(gain, dtype=tf.float32, name='gain')\n if self.verbose:\n print(\"gain: \",gain.shape)\n #print((gain*self.y).shape, (gain*self.y_true).shape)\n else:\n gain = 1.\n \n # use the same graph to evaluate train and test loss\n self.loss_train = self.loss_test = lp_mean_loss(gain*self.y, gain*self.y_true, l_num=self.c.L_NUM)\n \n with tf.name_scope('optimiser'):\n self.global_step = tf.Variable(0, trainable=False)# initialise global step variable\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.c.LRATE, name='optimizer')\n self.train_op = self.optimizer.minimize(self.loss_train, global_step=self.global_step, name='train_op')\n \n def define_summaries(self):\n \"\"\"\n Define tensorboard summaries for model\n \"\"\"\n \n ##\n # DEFINE TENSORBOARD SUMMARIES\n ##\n if self.verbose: print(\"Defining tensorboard summaries...\")\n \n self.summaries_train = [] # list of train summaries\n self.summaries_test = [] # list of test summaries\n \n # weights/ biases summary (histogram)\n for tensor in self.weights:\n summary_weights_histogram = tf.summary.histogram('conv1d_weights/%s'%(tensor), self.weights[tensor])\n self.summaries_train.append(summary_weights_histogram)\n for tensor in self.biases:\n summary_biases_histogram = tf.summary.histogram('conv1d_biases/%s'%(tensor), self.biases[tensor])\n self.summaries_train.append(summary_biases_histogram)\n\n with tf.name_scope('accuracy'):\n\n # train loss summary (scalar)\n summary_loss_train = tf.summary.scalar('loss/train_loss', self.loss_train)\n self.summaries_train.append(summary_loss_train)\n \n # test loss summary (scalar)\n summary_loss_test = tf.summary.scalar('loss/test_loss', self.loss_test)\n self.summaries_test.append(summary_loss_test)\n\n # summary images (test and train)\n self.train_image = tf.placeholder(tf.uint8, shape=(None, None, None, 3))\n self.summary_train_image = tf.summary.image('train/predictions', self.train_image, max_outputs=20)# tensor type\n \n self.test_image = tf.placeholder(tf.uint8, shape=(None, None, None, 3))\n self.summary_test_image = tf.summary.image('test/predictions', self.test_image, max_outputs=20)# tensor type\n \n # training rate statistic (updated globably)\n self.trate = tf.placeholder(tf.float32, shape=(), name=\"trate\")\n self.summary_trate = tf.summary.scalar('steps_sec', self.trate)\n \n # merge all summaries\n self.summaries_train = tf.summary.merge(self.summaries_train)\n self.summaries_test = tf.summary.merge(self.summaries_test)\n\n\n def train_step(self, sess, summary_writer=None, handle_dict=None, show_plot=False):\n \"\"\"\n Runs a training step.\n \"\"\"\n # training step\n _, global_step = sess.run([self.train_op,\n self.global_step],\n feed_dict=handle_dict)\n \n # training statistics\n if global_step % self.c.SUMMARY_FREQ == 0:\n \n # generate output\n output = sess.run([self.x, self.y, self.y_true, self.velocity,\n self.loss_train, self.summaries_train],\n feed_dict=handle_dict)\n \n # add summaries\n if summary_writer: summary_writer.add_summary(output[5], global_step)\n \n # add plot summaries\n if global_step % self.c.PLOT_FREQ == 0 or show_plot:\n fig = self._plot_results(*output[0:4], name=\"train\")\n if show_plot: plt.show()\n feed_dict = {self.train_image:plot_utils.fig2rgb_array(fig, expand=True)}\n if not show_plot: plt.close()\n summary_train_image = sess.run(self.summary_train_image, feed_dict=feed_dict)\n if summary_writer: summary_writer.add_summary(summary_train_image, global_step)\n\n print(\"%i Loss (train): %.4f\"%(global_step, output[4]))\n \n return global_step\n\n def test_step(self, sess, summary_writer=None, handle_dict=None, show_plot=False):\n \"\"\"\n Runs a testing step.\n \"\"\"\n global_step = sess.run(self.global_step)\n \n # generate output\n output = sess.run([self.x, self.y, self.y_true, self.velocity,\n self.loss_test, self.summaries_test],\n feed_dict=handle_dict)\n \n # add summaries\n if summary_writer: summary_writer.add_summary(output[5], global_step)\n \n # add plot summaries\n if global_step % self.c.PLOT_FREQ == 0 or show_plot:\n fig = self._plot_results(*output[0:4], name=\"test\")\n if show_plot: plt.show()\n feed_dict = {self.test_image:plot_utils.fig2rgb_array(fig, expand=True)}\n if not show_plot: plt.close()\n summary_test_image = sess.run(self.summary_test_image, feed_dict=feed_dict)\n if summary_writer: summary_writer.add_summary(summary_test_image, global_step)\n\n print(\"%i Loss (test): %.4f\"%(global_step, output[4]))\n \n\n def _plot_results(self, x, y, y_true, velocity, name=\"\"):\n \"\"\"\n Plot test/train results to output\n \"\"\"\n \n n_show = np.min([4, x.shape[0]])\n fig = plt.figure(figsize=(12,10))# width, height\n \n NSTEPS = self.c.REFLECTIVITY_SHAPE[0]\n NREC = self.c.GATHER_SHAPE[1]\n NZ = self.c.VELOCITY_SHAPE[0]\n \n if self.inverse: gather, reflectivity = x,y_true\n else: reflectivity, gather = x,y_true\n \n # get gain profile & limit\n gain = np.arange(0,NSTEPS)**self.c.T_GAIN\n gain = gain / np.median(gain)# normalise gain profile\n lim = 1.5\n \n for ib in range(n_show):\n irow = ib//2\n \n # PLOT VELOCITY PROFILE (DEPTH)\n\n if not self.inverse:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+0))\n label = \"velocity\"\n else:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+3))\n label=\"true velocity\"\n \n plt.plot(velocity[ib,:,0], np.arange(NZ), color=\"tab:red\", label=label)\n if self.inverse:\n v = processing_utils.get_velocity_trace(y[ib,:,0], srate=0.008, DZ=12.5, NZ=NZ, v0=velocity[ib,0,0])\n plt.plot(v, np.arange(NZ), color=\"tab:green\", label=\"predicted velocity\")\n if ib==2:\n plt.xlabel(\"Velocity (m/s)\")\n plt.xticks([1000, 2000, 3000])\n plt.ylabel(\"Depth (samples)\")\n else:\n plt.yticks([])\n plt.xticks([])\n plt.xlim(1000, 4000)\n plt.ylim(NZ, 0)\n plt.legend(loc=1)\n \n # PLOT GATHER (TIME)\n\n if not self.inverse:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+2), colspan=2)\n else:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+0), colspan=2)\n \n for ir in range(NREC):\n if ir == 0:\n if not self.inverse:\n label1=\"true gather\"\n label2=\"predicted gather\"\n else:\n label1=\"gather\"\n else: label1=label2=None\n \n plt.plot(ir+gain*gather[ib,:,ir]/lim, np.arange(NSTEPS), color='tab:red', label=label1)\n if not self.inverse: plt.plot(ir+gain*y[ib,:,ir]/lim, np.arange(NSTEPS), color='tab:green', label=label2)\n \n plt.xlim(-1, (NREC-1)+1)\n plt.yticks([])\n plt.xticks([])\n plt.ylim(NSTEPS,0)\n plt.legend(loc=1)\n \n # PLOT REFFLECTIVITY SERIES (TIME)\n \n if not self.inverse:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+1))\n label = \"reflectivity\"\n else:\n plt.subplot2grid((2, 8), (irow, 4*ib-8*irow+2))\n label=\"true reflectivity\"\n \n plt.plot(reflectivity[ib,:,0], np.arange(NSTEPS), color='tab:red', label=label)\n if self.inverse: plt.plot(y[ib,:,0], np.arange(NSTEPS), color='tab:green', label=\"predicted reflectivity\")\n\n if ib==2:\n plt.xlabel(\"Reflectivity\")\n plt.ylabel(\"TWT (samples)\")\n plt.yticks([100,200,300,400,500])\n else:\n plt.yticks([])\n plt.xticks([])\n plt.xlim(-0.4,0.4)\n plt.ylim(NSTEPS,0)\n plt.legend(loc=3)\n \n plt.subplots_adjust(left=0.05, bottom=0.05, right=1, top=1,\n wspace=0.2, hspace=0.0)\n return fig\n\n ## HELPER METHODS\n \n def __str__(self):\n if hasattr(self, \"total_num_trainable_params\"):\n s = \"Wavenet:\\n\\tNumber of weights: %i\\n\\tNumber of biases: %i\"%(self.W.num_weights, self.W.num_biases)\n s += \"\\nConv1d:\\n\\tNumber of weights: %i\\n\\tNumber of biases: %i\"%(self.num_weights, self.num_biases)\n s += \"\\nTotal number of trainable parameters: %i\"%(self.total_num_trainable_params)\n #for tensor in tf.trainable_variables(): print(tensor.name)\n return s\n\n\nif __name__ == \"__main__\":\n \n from constants import Constants\n from datasets import SeismicDataset\n \n c = Constants()\n \n tf.reset_default_graph()\n tf.set_random_seed(123)\n \n d = SeismicDataset(c)\n d.define_graph()\n train_features, test_features = d.train_features, d.test_features\n \n model = SeismicWavenet(c, train_features, inverse=False, verbose=True)\n model.define_graph()\n model.define_loss()\n model.define_summaries()\n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n model.test_step(sess, summary_writer=None, show_plot=True)\n\n \n "
] |
[
[
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"numpy.sqrt",
"tensorflow.train.AdamOptimizer",
"tensorflow.pad",
"tensorflow.summary.scalar",
"matplotlib.pyplot.subplot2grid",
"numpy.pad",
"tensorflow.Variable",
"tensorflow.summary.image",
"numpy.arange",
"tensorflow.reset_default_graph",
"tensorflow.name_scope",
"tensorflow.Session",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.close",
"tensorflow.trainable_variables",
"matplotlib.pyplot.figure",
"tensorflow.nn.convolution",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.median",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"matplotlib.pyplot.show",
"tensorflow.summary.merge",
"matplotlib.pyplot.xticks",
"tensorflow.summary.histogram",
"matplotlib.pyplot.ylabel",
"tensorflow.constant",
"matplotlib.pyplot.xlim",
"numpy.prod",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks"
]
] |
synml/horovod-tutorial
|
[
"75fcfd12c0eb23ad5fde1724c45fd795beb9f0d3"
] |
[
"main.py"
] |
[
"import os\nimport random\nimport time\n\nimport filelock\nimport horovod.torch as hvd\nimport numpy as np\nimport torch.backends.cudnn\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.utils.tensorboard\nimport torchvision\nimport tqdm\n\n\ndef train(model, trainloader, criterion, optimizer, device, scaler=None):\n model.train()\n\n train_loss = torch.zeros(1, device=device)\n correct = torch.zeros(1, dtype=torch.int64, device=device)\n for images, targets in tqdm.tqdm(trainloader, desc='Train', leave=False,\n disable=False if hvd.local_rank() == 0 else True):\n images, targets = images.to(device), targets.to(device)\n\n optimizer.zero_grad()\n with torch.cuda.amp.autocast(enabled=True if scaler is not None else False):\n outputs = model(images)\n loss = criterion(outputs, targets)\n scaler.scale(loss).backward()\n optimizer.synchronize()\n scaler.unscale_(optimizer)\n with optimizer.skip_synchronize():\n scaler.step(optimizer)\n scaler.update()\n\n train_loss += loss\n pred = torch.argmax(outputs, dim=1)\n correct += torch.eq(pred, targets).sum()\n\n train_loss /= len(trainloader)\n train_loss = hvd.allreduce(train_loss, op=hvd.Average)\n\n correct = hvd.allreduce(correct, op=hvd.Sum)\n accuracy = correct / len(trainloader.dataset) * 100\n return train_loss.item(), accuracy.item()\n\n\ndef evaluate(model, testloader, criterion, amp_enabled, device):\n model.eval()\n\n test_loss = torch.zeros(1, device=device)\n correct = torch.zeros(1, dtype=torch.int64, device=device)\n for images, targets in tqdm.tqdm(testloader, desc='Eval', leave=False,\n disable=False if hvd.local_rank() == 0 else True):\n images, targets = images.to(device), targets.to(device)\n\n with torch.cuda.amp.autocast(amp_enabled):\n with torch.no_grad():\n outputs = model(images)\n test_loss += criterion(outputs, targets)\n pred = torch.argmax(outputs, dim=1)\n correct += torch.eq(pred, targets).sum()\n\n test_loss /= len(testloader)\n test_loss = hvd.allreduce(test_loss, op=hvd.Average)\n\n correct = hvd.allreduce(correct, op=hvd.Sum)\n accuracy = correct / len(testloader.dataset) * 100\n return test_loss.item(), accuracy.item()\n\n\nif __name__ == '__main__':\n # Hyper parameters\n batch_size = 256\n epoch = 5\n lr = 0.1\n momentum = 0.9\n weight_decay = 0\n num_workers = 4\n pin_memory = True\n amp_enabled = False\n use_fp16_compressor = False # horovod\n reproducibility = True\n\n # Pytorch reproducibility\n if reproducibility:\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n np.random.seed(0)\n random.seed(0)\n\n # 1. Horovod: initialize library\n hvd.init()\n assert hvd.is_initialized()\n local_rank = hvd.local_rank()\n torch.set_num_threads(1) # ํ๋ก์ธ์ค๋น ์ฌ์ฉ๋๋ CPU ์ค๋ ๋์ ์๋ฅผ ์กฐ์ (OMP_NUM_THREADS์ ๋์ผ)\n\n # 2. Horovod: local_rank๋ก GPU ๊ณ ์ \n if torch.cuda.is_available():\n torch.cuda.set_device(local_rank)\n device = torch.device('cuda', local_rank)\n else:\n device = torch.device('cpu')\n\n # 3. Dataset (sampler ์ฌ์ฉ)\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n ])\n with filelock.FileLock('horovod.lock'):\n trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform)\n testset = torchvision.datasets.CIFAR10(root='data', train=False, transform=transform)\n train_sampler = torch.utils.data.distributed.DistributedSampler(trainset, num_replicas=hvd.size(), rank=hvd.rank())\n test_sampler = torch.utils.data.distributed.DistributedSampler(testset, num_replicas=hvd.size(), rank=hvd.rank())\n trainloader = torch.utils.data.DataLoader(trainset, batch_size, sampler=train_sampler,\n num_workers=num_workers, pin_memory=pin_memory)\n testloader = torch.utils.data.DataLoader(testset, batch_size, sampler=test_sampler,\n num_workers=num_workers, pin_memory=pin_memory)\n\n # Model\n with filelock.FileLock('horovod.lock'):\n model = torchvision.models.resnet101(num_classes=10).to(device)\n model_name = model.__str__().split('(')[0]\n\n # Loss function, optimizer, scaler\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr, momentum=momentum, weight_decay=weight_decay)\n scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, 1, 0, epoch)\n scaler = torch.cuda.amp.GradScaler(enabled=amp_enabled)\n\n # 4. Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n # (optional) Horovod: compression algorithm.\n compression = hvd.Compression.fp16 if use_fp16_compressor else hvd.Compression.none\n\n # 5. Horovod: wrap optimizer with DistributedOptimizer.\n optimizer = hvd.DistributedOptimizer(optimizer, model.named_parameters(), compression)\n\n # Tensorboard\n if local_rank == 0:\n writer = torch.utils.tensorboard.SummaryWriter(os.path.join('runs', model_name))\n writer.add_graph(model, trainloader.__iter__().__next__()[0].to(device))\n tqdm_disabled = False\n else:\n writer = None\n tqdm_disabled = True\n\n # Train and test\n prev_accuracy = 0\n images_per_sec = []\n for eph in tqdm.tqdm(range(epoch), desc='Epoch', disable=tqdm_disabled):\n trainloader.sampler.set_epoch(eph)\n\n epoch_time = time.time()\n train_loss, train_accuracy = train(model, trainloader, criterion, optimizer, device, scaler)\n epoch_time = time.time() - epoch_time\n images_per_sec.append(str(round(len(trainloader.dataset) / epoch_time)) + '\\n')\n\n test_loss, test_accuracy = evaluate(model, testloader, criterion, amp_enabled, device)\n scheduler.step()\n\n if writer is not None:\n writer.add_scalar('Loss/train', train_loss, eph)\n writer.add_scalar('Loss/test', test_loss, eph)\n writer.add_scalars('Loss/mix', {'train': train_loss, 'test': test_loss}, eph)\n writer.add_scalar('Accuracy/train', train_accuracy, eph)\n writer.add_scalar('Accuracy/test', test_accuracy, eph)\n writer.add_scalars('Accuracy/mix', {'train': train_accuracy, 'test': test_accuracy}, eph)\n\n if local_rank == 0:\n # Save latest model weight\n os.makedirs('weights', exist_ok=True)\n state_dict = model.state_dict()\n torch.save(state_dict, os.path.join('weights', f'{model_name}_latest.pth'))\n\n # Save best accuracy model\n if test_accuracy > prev_accuracy:\n torch.save(state_dict, os.path.join('weights', f'{model_name}_best_accuracy.pth'))\n prev_accuracy = test_accuracy\n\n if writer is not None:\n writer.close()\n\n if local_rank == 0:\n with open(f'np{hvd.size()}_images_per_epoch.txt', 'w', encoding='utf-8') as f:\n f.writelines(images_per_sec)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.random.seed"
]
] |
charliec443/feast
|
[
"8b3a97a9774fa6a2cefcf64c237a36212a158ab1"
] |
[
"sdk/python/tests/test_historical_retrieval.py"
] |
[
"import os\nimport random\nimport string\nimport time\nfrom datetime import datetime, timedelta\nfrom tempfile import TemporaryDirectory\n\nimport assertpy\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom google.cloud import bigquery\nfrom pandas.testing import assert_frame_equal\nfrom pytz import utc\n\nimport feast.driver_test_data as driver_data\nfrom feast import BigQuerySource, FileSource, RepoConfig, errors, utils\nfrom feast.entity import Entity\nfrom feast.errors import FeatureNameCollisionError\nfrom feast.feature import Feature\nfrom feast.feature_store import FeatureStore, _validate_feature_refs\nfrom feast.feature_view import FeatureView\nfrom feast.infra.offline_stores.bigquery import (\n BigQueryOfflineStoreConfig,\n _get_entity_df_timestamp_bounds,\n)\nfrom feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig\nfrom feast.infra.provider import DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL\nfrom feast.value_type import ValueType\n\nnp.random.seed(0)\n\nPROJECT_NAME = \"default\"\n\n\ndef generate_entities(date, infer_event_timestamp_col, order_count: int = 1000):\n end_date = date\n before_start_date = end_date - timedelta(days=365)\n start_date = end_date - timedelta(days=7)\n after_end_date = end_date + timedelta(days=365)\n customer_entities = list(range(1001, 1110))\n driver_entities = list(range(5001, 5110))\n orders_df = driver_data.create_orders_df(\n customers=customer_entities,\n drivers=driver_entities,\n start_date=before_start_date,\n end_date=after_end_date,\n order_count=order_count,\n infer_event_timestamp_col=infer_event_timestamp_col,\n )\n return customer_entities, driver_entities, end_date, orders_df, start_date\n\n\ndef stage_driver_hourly_stats_parquet_source(directory, df):\n # Write to disk\n driver_stats_path = os.path.join(directory, \"driver_stats.parquet\")\n df.to_parquet(path=driver_stats_path, allow_truncated_timestamps=True)\n return FileSource(\n path=driver_stats_path,\n event_timestamp_column=\"datetime\",\n created_timestamp_column=\"\",\n )\n\n\ndef stage_driver_hourly_stats_bigquery_source(df, table_id):\n client = bigquery.Client()\n job_config = bigquery.LoadJobConfig()\n df.reset_index(drop=True, inplace=True)\n job = client.load_table_from_dataframe(df, table_id, job_config=job_config)\n job.result()\n\n\ndef create_driver_hourly_stats_feature_view(source):\n driver_stats_feature_view = FeatureView(\n name=\"driver_stats\",\n entities=[\"driver\"],\n features=[\n Feature(name=\"conv_rate\", dtype=ValueType.FLOAT),\n Feature(name=\"acc_rate\", dtype=ValueType.FLOAT),\n Feature(name=\"avg_daily_trips\", dtype=ValueType.INT32),\n ],\n input=source,\n ttl=timedelta(hours=2),\n )\n return driver_stats_feature_view\n\n\ndef stage_customer_daily_profile_parquet_source(directory, df):\n customer_profile_path = os.path.join(directory, \"customer_profile.parquet\")\n df.to_parquet(path=customer_profile_path, allow_truncated_timestamps=True)\n return FileSource(\n path=customer_profile_path,\n event_timestamp_column=\"datetime\",\n created_timestamp_column=\"created\",\n )\n\n\ndef stage_customer_daily_profile_bigquery_source(df, table_id):\n client = bigquery.Client()\n job_config = bigquery.LoadJobConfig()\n df.reset_index(drop=True, inplace=True)\n job = client.load_table_from_dataframe(df, table_id, job_config=job_config)\n job.result()\n\n\ndef create_customer_daily_profile_feature_view(source):\n customer_profile_feature_view = FeatureView(\n name=\"customer_profile\",\n entities=[\"customer_id\"],\n features=[\n Feature(name=\"current_balance\", dtype=ValueType.FLOAT),\n Feature(name=\"avg_passenger_count\", dtype=ValueType.FLOAT),\n Feature(name=\"lifetime_trip_count\", dtype=ValueType.INT32),\n Feature(name=\"avg_daily_trips\", dtype=ValueType.INT32),\n ],\n input=source,\n ttl=timedelta(days=2),\n )\n return customer_profile_feature_view\n\n\n# Converts the given column of the pandas records to UTC timestamps\ndef convert_timestamp_records_to_utc(records, column):\n for record in records:\n record[column] = utils.make_tzaware(record[column]).astimezone(utc)\n return records\n\n\n# Find the latest record in the given time range and filter\ndef find_asof_record(records, ts_key, ts_start, ts_end, filter_key, filter_value):\n found_record = {}\n for record in records:\n if record[filter_key] == filter_value and ts_start <= record[ts_key] <= ts_end:\n if not found_record or found_record[ts_key] < record[ts_key]:\n found_record = record\n return found_record\n\n\ndef get_expected_training_df(\n customer_df: pd.DataFrame,\n customer_fv: FeatureView,\n driver_df: pd.DataFrame,\n driver_fv: FeatureView,\n orders_df: pd.DataFrame,\n event_timestamp: str,\n full_feature_names: bool = False,\n):\n # Convert all pandas dataframes into records with UTC timestamps\n order_records = convert_timestamp_records_to_utc(\n orders_df.to_dict(\"records\"), event_timestamp\n )\n driver_records = convert_timestamp_records_to_utc(\n driver_df.to_dict(\"records\"), driver_fv.input.event_timestamp_column\n )\n customer_records = convert_timestamp_records_to_utc(\n customer_df.to_dict(\"records\"), customer_fv.input.event_timestamp_column\n )\n\n # Manually do point-in-time join of orders to drivers and customers records\n for order_record in order_records:\n driver_record = find_asof_record(\n driver_records,\n ts_key=driver_fv.input.event_timestamp_column,\n ts_start=order_record[event_timestamp] - driver_fv.ttl,\n ts_end=order_record[event_timestamp],\n filter_key=\"driver_id\",\n filter_value=order_record[\"driver_id\"],\n )\n customer_record = find_asof_record(\n customer_records,\n ts_key=customer_fv.input.event_timestamp_column,\n ts_start=order_record[event_timestamp] - customer_fv.ttl,\n ts_end=order_record[event_timestamp],\n filter_key=\"customer_id\",\n filter_value=order_record[\"customer_id\"],\n )\n\n order_record.update(\n {\n (f\"driver_stats__{k}\" if full_feature_names else k): driver_record.get(\n k, None\n )\n for k in (\"conv_rate\", \"avg_daily_trips\")\n }\n )\n\n order_record.update(\n {\n (\n f\"customer_profile__{k}\" if full_feature_names else k\n ): customer_record.get(k, None)\n for k in (\n \"current_balance\",\n \"avg_passenger_count\",\n \"lifetime_trip_count\",\n )\n }\n )\n\n # Convert records back to pandas dataframe\n expected_df = pd.DataFrame(order_records)\n\n # Move \"datetime\" column to front\n current_cols = expected_df.columns.tolist()\n current_cols.remove(event_timestamp)\n expected_df = expected_df[[event_timestamp] + current_cols]\n\n # Cast some columns to expected types, since we lose information when converting pandas DFs into Python objects.\n if full_feature_names:\n expected_column_types = {\n \"order_is_success\": \"int32\",\n \"driver_stats__conv_rate\": \"float32\",\n \"customer_profile__current_balance\": \"float32\",\n \"customer_profile__avg_passenger_count\": \"float32\",\n }\n else:\n expected_column_types = {\n \"order_is_success\": \"int32\",\n \"conv_rate\": \"float32\",\n \"current_balance\": \"float32\",\n \"avg_passenger_count\": \"float32\",\n }\n\n for col, typ in expected_column_types.items():\n expected_df[col] = expected_df[col].astype(typ)\n\n return expected_df\n\n\ndef stage_orders_bigquery(df, table_id):\n client = bigquery.Client()\n job_config = bigquery.LoadJobConfig()\n df.reset_index(drop=True, inplace=True)\n job = client.load_table_from_dataframe(df, table_id, job_config=job_config)\n job.result()\n\n\nclass BigQueryDataSet:\n def __init__(self, dataset_name):\n self.name = dataset_name\n\n def __enter__(self):\n client = bigquery.Client()\n dataset = bigquery.Dataset(f\"{client.project}.{self.name}\")\n dataset.location = \"US\"\n dataset = client.create_dataset(dataset, exists_ok=True)\n return dataset\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n print(\"Tearing down BigQuery dataset\")\n client = bigquery.Client()\n dataset_id = f\"{client.project}.{self.name}\"\n\n client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)\n print(f\"Deleted dataset '{dataset_id}'\")\n if exc_type:\n print(\n \"***Logging exception {}***\".format(\n (exc_type, exc_value, exc_traceback)\n )\n )\n\n\n@pytest.mark.parametrize(\n \"infer_event_timestamp_col\", [False, True],\n)\n@pytest.mark.parametrize(\n \"full_feature_names\", [False, True],\n)\ndef test_historical_features_from_parquet_sources(\n infer_event_timestamp_col, full_feature_names\n):\n start_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n (\n customer_entities,\n driver_entities,\n end_date,\n orders_df,\n start_date,\n ) = generate_entities(start_date, infer_event_timestamp_col)\n\n with TemporaryDirectory() as temp_dir:\n driver_df = driver_data.create_driver_hourly_stats_df(\n driver_entities, start_date, end_date\n )\n driver_source = stage_driver_hourly_stats_parquet_source(temp_dir, driver_df)\n driver_fv = create_driver_hourly_stats_feature_view(driver_source)\n customer_df = driver_data.create_customer_daily_profile_df(\n customer_entities, start_date, end_date\n )\n customer_source = stage_customer_daily_profile_parquet_source(\n temp_dir, customer_df\n )\n customer_fv = create_customer_daily_profile_feature_view(customer_source)\n driver = Entity(name=\"driver\", join_key=\"driver_id\", value_type=ValueType.INT64)\n customer = Entity(name=\"customer_id\", value_type=ValueType.INT64)\n\n store = FeatureStore(\n config=RepoConfig(\n registry=os.path.join(temp_dir, \"registry.db\"),\n project=\"default\",\n provider=\"local\",\n online_store=SqliteOnlineStoreConfig(\n path=os.path.join(temp_dir, \"online_store.db\")\n ),\n )\n )\n\n store.apply([driver, customer, driver_fv, customer_fv])\n\n job = store.get_historical_features(\n entity_df=orders_df,\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n ],\n full_feature_names=full_feature_names,\n )\n\n actual_df = job.to_df()\n event_timestamp = (\n DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL\n if DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL in orders_df.columns\n else \"e_ts\"\n )\n expected_df = get_expected_training_df(\n customer_df,\n customer_fv,\n driver_df,\n driver_fv,\n orders_df,\n event_timestamp,\n full_feature_names=full_feature_names,\n )\n assert_frame_equal(\n expected_df.sort_values(\n by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"]\n ).reset_index(drop=True),\n actual_df.sort_values(\n by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"]\n ).reset_index(drop=True),\n )\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"provider_type\", [\"local\", \"gcp\", \"gcp_custom_offline_config\"],\n)\n@pytest.mark.parametrize(\n \"infer_event_timestamp_col\", [False, True],\n)\n@pytest.mark.parametrize(\n \"full_feature_names\", [False, True],\n)\ndef test_historical_features_from_bigquery_sources(\n provider_type, infer_event_timestamp_col, capsys, full_feature_names\n):\n start_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n (\n customer_entities,\n driver_entities,\n end_date,\n orders_df,\n start_date,\n ) = generate_entities(start_date, infer_event_timestamp_col)\n\n bigquery_dataset = (\n f\"test_hist_retrieval_{int(time.time_ns())}_{random.randint(1000, 9999)}\"\n )\n\n with BigQueryDataSet(bigquery_dataset), TemporaryDirectory() as temp_dir:\n gcp_project = bigquery.Client().project\n\n # Orders Query\n table_id = f\"{bigquery_dataset}.orders\"\n stage_orders_bigquery(orders_df, table_id)\n entity_df_query = f\"SELECT * FROM {gcp_project}.{table_id}\"\n\n # Driver Feature View\n driver_df = driver_data.create_driver_hourly_stats_df(\n driver_entities, start_date, end_date\n )\n driver_table_id = f\"{gcp_project}.{bigquery_dataset}.driver_hourly\"\n stage_driver_hourly_stats_bigquery_source(driver_df, driver_table_id)\n driver_source = BigQuerySource(\n table_ref=driver_table_id,\n event_timestamp_column=\"datetime\",\n created_timestamp_column=\"created\",\n )\n driver_fv = create_driver_hourly_stats_feature_view(driver_source)\n\n # Customer Feature View\n customer_df = driver_data.create_customer_daily_profile_df(\n customer_entities, start_date, end_date\n )\n customer_table_id = f\"{gcp_project}.{bigquery_dataset}.customer_profile\"\n\n stage_customer_daily_profile_bigquery_source(customer_df, customer_table_id)\n customer_source = BigQuerySource(\n table_ref=customer_table_id,\n event_timestamp_column=\"datetime\",\n created_timestamp_column=\"\",\n )\n customer_fv = create_customer_daily_profile_feature_view(customer_source)\n\n driver = Entity(name=\"driver\", join_key=\"driver_id\", value_type=ValueType.INT64)\n customer = Entity(name=\"customer_id\", value_type=ValueType.INT64)\n\n if provider_type == \"local\":\n store = FeatureStore(\n config=RepoConfig(\n registry=os.path.join(temp_dir, \"registry.db\"),\n project=\"default\",\n provider=\"local\",\n online_store=SqliteOnlineStoreConfig(\n path=os.path.join(temp_dir, \"online_store.db\"),\n ),\n offline_store=BigQueryOfflineStoreConfig(\n type=\"bigquery\", dataset=bigquery_dataset\n ),\n )\n )\n elif provider_type == \"gcp\":\n store = FeatureStore(\n config=RepoConfig(\n registry=os.path.join(temp_dir, \"registry.db\"),\n project=\"\".join(\n random.choices(string.ascii_uppercase + string.digits, k=10)\n ),\n provider=\"gcp\",\n offline_store=BigQueryOfflineStoreConfig(\n type=\"bigquery\", dataset=bigquery_dataset\n ),\n )\n )\n elif provider_type == \"gcp_custom_offline_config\":\n store = FeatureStore(\n config=RepoConfig(\n registry=os.path.join(temp_dir, \"registry.db\"),\n project=\"\".join(\n random.choices(string.ascii_uppercase + string.digits, k=10)\n ),\n provider=\"gcp\",\n offline_store=BigQueryOfflineStoreConfig(\n type=\"bigquery\", dataset=\"foo\"\n ),\n )\n )\n else:\n raise Exception(\"Invalid provider used as part of test configuration\")\n\n store.apply([driver, customer, driver_fv, customer_fv])\n\n event_timestamp = (\n DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL\n if DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL in orders_df.columns\n else \"e_ts\"\n )\n expected_df = get_expected_training_df(\n customer_df,\n customer_fv,\n driver_df,\n driver_fv,\n orders_df,\n event_timestamp,\n full_feature_names,\n )\n\n job_from_sql = store.get_historical_features(\n entity_df=entity_df_query,\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n ],\n full_feature_names=full_feature_names,\n )\n\n start_time = datetime.utcnow()\n actual_df_from_sql_entities = job_from_sql.to_df()\n end_time = datetime.utcnow()\n with capsys.disabled():\n print(\n str(\n f\"\\nTime to execute job_from_sql.to_df() = '{(end_time - start_time)}'\"\n )\n )\n\n assert sorted(expected_df.columns) == sorted(\n actual_df_from_sql_entities.columns\n )\n assert_frame_equal(\n expected_df.sort_values(\n by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"]\n ).reset_index(drop=True),\n actual_df_from_sql_entities[expected_df.columns]\n .sort_values(by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"])\n .reset_index(drop=True),\n check_dtype=False,\n )\n\n table_from_sql_entities = job_from_sql.to_arrow()\n assert_frame_equal(\n actual_df_from_sql_entities, table_from_sql_entities.to_pandas()\n )\n\n timestamp_column = (\n \"e_ts\"\n if infer_event_timestamp_col\n else DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL\n )\n\n entity_df_query_with_invalid_join_key = (\n f\"select order_id, driver_id, customer_id as customer, \"\n f\"order_is_success, {timestamp_column}, FROM {gcp_project}.{table_id}\"\n )\n # Rename the join key; this should now raise an error.\n assertpy.assert_that(store.get_historical_features).raises(\n errors.FeastEntityDFMissingColumnsError\n ).when_called_with(\n entity_df=entity_df_query_with_invalid_join_key,\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n ],\n )\n\n job_from_df = store.get_historical_features(\n entity_df=orders_df,\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n ],\n full_feature_names=full_feature_names,\n )\n\n # Rename the join key; this should now raise an error.\n orders_df_with_invalid_join_key = orders_df.rename(\n {\"customer_id\": \"customer\"}, axis=\"columns\"\n )\n assertpy.assert_that(store.get_historical_features).raises(\n errors.FeastEntityDFMissingColumnsError\n ).when_called_with(\n entity_df=orders_df_with_invalid_join_key,\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n ],\n )\n\n # Make sure that custom dataset name is being used from the offline_store config\n if provider_type == \"gcp_custom_offline_config\":\n assertpy.assert_that(job_from_df.query).contains(\"foo.entity_df\")\n else:\n assertpy.assert_that(job_from_df.query).contains(\n f\"{bigquery_dataset}.entity_df\"\n )\n\n start_time = datetime.utcnow()\n actual_df_from_df_entities = job_from_df.to_df()\n end_time = datetime.utcnow()\n with capsys.disabled():\n print(\n str(\n f\"Time to execute job_from_df.to_df() = '{(end_time - start_time)}'\\n\"\n )\n )\n\n assert sorted(expected_df.columns) == sorted(actual_df_from_df_entities.columns)\n assert_frame_equal(\n expected_df.sort_values(\n by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"]\n ).reset_index(drop=True),\n actual_df_from_df_entities[expected_df.columns]\n .sort_values(by=[event_timestamp, \"order_id\", \"driver_id\", \"customer_id\"])\n .reset_index(drop=True),\n check_dtype=False,\n )\n\n table_from_df_entities = job_from_df.to_arrow()\n assert_frame_equal(\n actual_df_from_df_entities, table_from_df_entities.to_pandas()\n )\n\n\n@pytest.mark.integration\ndef test_timestamp_bound_inference_from_entity_df_using_bigquery():\n start_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n (_, _, _, entity_df, start_date) = generate_entities(\n start_date, infer_event_timestamp_col=True\n )\n\n table_id = f\"foo.table_id_{int(time.time_ns())}_{random.randint(1000, 9999)}\"\n stage_orders_bigquery(entity_df, table_id)\n\n client = bigquery.Client()\n table = client.get_table(table=table_id)\n\n # Ensure that the table expires after some time\n table.expires = datetime.utcnow() + timedelta(minutes=30)\n client.update_table(table, [\"expires\"])\n\n min_timestamp, max_timestamp = _get_entity_df_timestamp_bounds(\n client, str(table.reference), \"e_ts\"\n )\n\n assert min_timestamp.astimezone(\"UTC\") == min(entity_df[\"e_ts\"]).astimezone(\"UTC\")\n assert max_timestamp.astimezone(\"UTC\") == max(entity_df[\"e_ts\"]).astimezone(\"UTC\")\n\n\ndef test_feature_name_collision_on_historical_retrieval():\n\n # _validate_feature_refs is the function that checks for colliding feature names\n # check when feature names collide and 'full_feature_names=False'\n with pytest.raises(FeatureNameCollisionError) as error:\n _validate_feature_refs(\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n \"customer_profile:avg_daily_trips\",\n ],\n full_feature_names=False,\n )\n\n expected_error_message = (\n \"Duplicate features named avg_daily_trips found.\\n\"\n \"To resolve this collision, either use the full feature name by setting \"\n \"'full_feature_names=True', or ensure that the features in question have different names.\"\n )\n\n assert str(error.value) == expected_error_message\n\n # check when feature names collide and 'full_feature_names=True'\n with pytest.raises(FeatureNameCollisionError) as error:\n _validate_feature_refs(\n feature_refs=[\n \"driver_stats:conv_rate\",\n \"driver_stats:avg_daily_trips\",\n \"driver_stats:avg_daily_trips\",\n \"customer_profile:current_balance\",\n \"customer_profile:avg_passenger_count\",\n \"customer_profile:lifetime_trip_count\",\n \"customer_profile:avg_daily_trips\",\n ],\n full_feature_names=True,\n )\n\n expected_error_message = (\n \"Duplicate features named driver_stats__avg_daily_trips found.\\n\"\n \"To resolve this collision, please ensure that the features in question \"\n \"have different names.\"\n )\n assert str(error.value) == expected_error_message\n"
] |
[
[
"numpy.random.seed",
"pandas.DataFrame"
]
] |
binyao2020/ElegantRL
|
[
"bf79f0d071d00cd93be03f1ca005020c3ab8dfe0"
] |
[
"BetaWarning/Tutorial.py"
] |
[
"import time\nimport numpy as np\nimport numpy.random as rd\n\nimport gym\nimport torch\nimport torch.nn as nn\n\n\nclass EvaluateRewardSV: # SV: Simplify Version. Only for tutorial.\n def __init__(self, env):\n self.env = env\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def get_eva_reward__sv(self, act, max_step, action_max, is_discrete, is_render=False):\n reward_sum = 0\n state = self.env.reset()\n for _ in range(max_step):\n states = torch.tensor((state,), dtype=torch.float32, device=self.device)\n actions = act(states)\n if is_discrete:\n actions = actions.argmax(dim=1) # discrete action space\n action = actions.cpu().data.numpy()[0]\n next_state, reward, done, _ = self.env.step(action * action_max)\n reward_sum += reward\n\n if is_render: # open a window and show this env\n self.env.render()\n if done:\n break\n state = next_state\n return reward_sum\n\n\nclass QNet(nn.Module): # class AgentQLearning\n def __init__(self, state_dim, action_dim, mid_dim):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, action_dim), )\n\n def forward(self, s):\n q = self.net(s)\n return q\n\n\nclass Actor(nn.Module):\n def __init__(self, state_dim, action_dim, mid_dim):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, action_dim), nn.Tanh(), )\n\n def forward(self, s):\n a = self.net(s)\n return a\n\n\nclass Critic(nn.Module): # 2020-05-05 fix bug\n def __init__(self, state_dim, action_dim, mid_dim):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, 1), )\n\n def forward(self, s, a):\n x = torch.cat((s, a), dim=1)\n q = self.net(x)\n return q\n\n\ndef soft_target_update(target, online, tau=5e-3):\n for target_param, param in zip(target.parameters(), online.parameters()):\n target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)\n\n\ndef run__tutorial__dqn_discrete_action():\n \"\"\"It is a DQN tutorial, we need 1min for training.\n This simplify DQN can't work well on harder task.\n Other RL algorithms can work well on harder task but complicated.\n You can change this code and make the training finish in (10 sec, 10k step) as an execrise.\n \"\"\"\n\n env_name = 'CartPole-v0' # a tutorial RL env. We need 10s for training.\n env = gym.make(env_name) # an OpenAI standard env\n state_dim = 4\n action_dim = 2\n action_max = int(1)\n target_reward = 195.0\n is_discrete = True\n # from AgentRun import get_env_info\n # state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)\n # assert is_discrete is True # DQN is for discrete action space.\n \"\"\" You will see the following:\n | env_name: <CartPoleEnv<CartPole-v0>>, action space: Discrete\n | state_dim: 4, action_dim: 2, action_max: 1, target_reward: 195.0\n \"\"\"\n\n ''' I copy the code from AgentDQN to the following for tutorial.'''\n net_dim = 2 ** 7 # the dimension (or width) of network\n learning_rate = 2e-4 # learning rate for Adam Optimizer (ADAM = RMSProp + Momentum)\n max_buffer = 2 ** 12 # the max storage number of replay buffer.\n max_epoch = 2 ** 12 # epoch or episodes when training step\n max_step = 2 ** 9 # the max step that actor interact with env before training critic\n gamma = 0.99 # reward discount factor (gamma must less than 1.0)\n batch_size = 2 ** 6 # batch_size for network training\n criterion = torch.nn.MSELoss() # criterion for critic's q_value estimate\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # choose GPU or CPU automatically\n\n ''' QNet is an actor or critic? DQN is not a Actor-Critic Method.\n AgentDQN chooses action with the largest q value outputing by Q_Network. Q_Network is an actor.\n AgentDQN outputs q_value by Q_Network. Q_Network is also a critic.\n '''\n act = QNet(state_dim, action_dim, net_dim).to(device)\n act.train()\n act_optim = torch.optim.Adam(act.parameters(), lr=learning_rate)\n\n act_target = QNet(state_dim, action_dim, net_dim).to(device)\n act_target.load_state_dict(act.state_dict())\n act_target.eval()\n\n # from AgentRun import BufferList # simpler but slower\n from AgentZoo import BufferArray # faster but a bit complicated\n buffer = BufferArray(max_buffer, state_dim, action_dim=1) # experiment replay buffer, discrete action is an int\n\n '''training loop'''\n self_state = env.reset()\n self_steps = 0 # steps of an episode\n self_r_sum = 0.0 # sum of rewards of an episode with exploration\n total_step = 0 # total step before training st0p\n\n evaluator = EvaluateRewardSV(env) # SV: Simplify Version for tutorial\n max_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)\n # the max r_sum without exploration\n\n start_time = time.time()\n for epoch in range(max_epoch):\n '''update_buffer'''\n explore_rate = 0.1 # explore rate when update_buffer(), epsilon-greedy\n rewards = list()\n steps = list()\n for _ in range(max_step):\n if rd.rand() < explore_rate: # epsilon-Greedy: explored policy for DQN\n action = rd.randint(action_dim)\n else:\n states = torch.tensor((self_state,), dtype=torch.float32, device=device)\n actions = act_target(states).argmax(dim=1).cpu().data.numpy() # discrete action space\n action = actions[0]\n next_state, reward, done, _ = env.step(action)\n\n self_r_sum += reward\n self_steps += 1\n\n mask = 0.0 if done else gamma\n buffer.add_memo((reward, mask, self_state, action, next_state))\n\n self_state = next_state\n if done:\n rewards.append(self_r_sum)\n self_r_sum = 0.0\n\n steps.append(self_steps)\n self_steps = 0\n\n self_state = env.reset()\n\n total_step += sum(steps)\n avg_reward = np.average(rewards)\n print(end=f'Reward:{avg_reward:6.1f} Step:{total_step:8} ')\n\n '''update_parameters'''\n loss_c_sum = 0.0\n update_times = max_step\n buffer.init_before_sample() # update the buffer.now_len\n for _ in range(update_times):\n with torch.no_grad():\n rewards, masks, states, actions, next_states = buffer.random_sample(batch_size, device)\n\n next_q_target = act_target(next_states).max(dim=1, keepdim=True)[0]\n q_target = rewards + masks * next_q_target\n\n act.train()\n actions = actions.type(torch.long)\n q_eval = act(states).gather(1, actions)\n critic_loss = criterion(q_eval, q_target)\n loss_c_sum += critic_loss.item()\n\n act_optim.zero_grad()\n critic_loss.backward()\n act_optim.step()\n\n soft_target_update(act_target, act, tau=5e-2)\n # soft_target_update(act_target, act, tau=5e-3)\n ''' A small tau can stabilize training in harder env. \n You can change tau into smaller tau 5e-3. But this env is too easy. \n You can try the harder env and other DRL Algorithms in run__xx() in AgentRun.py\n '''\n\n # loss_a_avg = 0.0\n loss_c_avg = loss_c_sum / update_times\n print(end=f'Loss:{loss_c_avg:6.1f} ')\n\n # evaluate the true reward of this agent without exploration\n eva_reward_list = [evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)\n for _ in range(3)]\n eva_reward = np.average(eva_reward_list)\n print(f'TrueRewward:{eva_reward:6.1f}')\n if eva_reward > max_reward:\n max_reward = eva_reward\n\n if max_reward > target_reward:\n print(f\"|\\tReach target_reward: {max_reward:6.1f} > {target_reward:6.1f}\")\n break\n\n used_time = int(time.time() - start_time)\n print(f\"|\\tTraining UsedTime: {used_time}s\")\n\n '''open a window and show the env'''\n for _ in range(4):\n eva_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete, is_render=True)\n print(f'|Evaluated reward is: {eva_reward}')\n\n\ndef run__tutorial__ddpg_continuous_action():\n \"\"\"It is a DDPG tutorial, we need about 300s for training.\n I hate OU Process because of its lots of hyper-parameters. So this DDPG has no OU Process.\n This simplify DDPG can't work well on harder task.\n Other RL algorithms can work well on harder task but complicated.\n You can change this code and make the training finish in 100s.\n \"\"\"\n\n env_name = 'Pendulum-v0' # a tutorial RL env. We need 300s for training.\n env = gym.make(env_name) # an OpenAI standard env\n state_dim = 3\n action_dim = 1\n action_max = 2.0\n target_reward = -200.0\n is_discrete = False\n # from AgentRun import get_env_info\n # state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)\n # assert is_discrete is False # DDPG is for discrete action space.\n \"\"\" You will see the following:\n | env_name: <PendulumEnv<Pendulum-v0>>, action space: Continuous\n | state_dim: 3, action_dim: 1, action_max: 2.0, target_reward: -200.0\n \"\"\"\n\n ''' I copy the code from AgentDQN to the following for tutorial.'''\n net_dim = 2 ** 5 # the dimension (or width) of network\n learning_rate = 2e-4 # learning rate for Adam Optimizer (ADAM = RMSProp + Momentum)\n max_buffer = 2 ** 14 # the max storage number of replay buffer.\n max_epoch = 2 ** 12 # epoch or episodes when training step\n max_step = 2 ** 8 # the max step that actor interact with env before training critic\n gamma = 0.99 # reward discount factor (gamma must less than 1.0)\n batch_size = 2 ** 7 # batch_size for network training\n update_freq = 2 ** 7\n criterion = torch.nn.SmoothL1Loss() # criterion for critic's q_value estimate\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # choose GPU or CPU automatically\n\n act_dim = net_dim\n act = Actor(state_dim, action_dim, act_dim).to(device)\n act.train()\n act_optim = torch.optim.Adam(act.parameters(), lr=learning_rate)\n\n act_target = Actor(state_dim, action_dim, act_dim).to(device)\n act_target.load_state_dict(act.state_dict())\n act_target.eval()\n\n cri_dim = int(net_dim * 1.25)\n cri = Critic(state_dim, action_dim, cri_dim).to(device)\n cri.train()\n cri_optim = torch.optim.Adam(cri.parameters(), lr=learning_rate)\n\n cri_target = Critic(state_dim, action_dim, cri_dim).to(device)\n cri_target.load_state_dict(cri.state_dict())\n cri_target.eval()\n\n # from AgentRun import BufferList # simpler but slower\n from AgentZoo import BufferArray # faster but a bit complicated\n buffer = BufferArray(max_buffer, state_dim, action_dim) # experiment replay buffer\n\n '''training loop'''\n self_state = env.reset()\n self_steps = 0 # the steps of an episode\n self_r_sum = 0.0 # the sum of rewards of an episode with exploration\n total_step = 0\n explore_noise = 0.05\n\n evaluator = EvaluateRewardSV(env) # SV: Simplify Version for tutorial\n max_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)\n # the max r_sum without exploration\n\n start_time = time.time()\n while total_step < max_step: # collect buffer before training\n for _ in range(max_step):\n action = rd.uniform(-1, 1, size=action_dim)\n next_state, reward, done, _ = env.step(action * action_max)\n mask = 0.0 if done else gamma\n buffer.add_memo((reward, mask, self_state, action, next_state))\n total_step += 1\n if done:\n self_state = env.reset()\n break\n self_state = next_state\n\n for epoch in range(max_epoch):\n '''update_buffer'''\n explore_rate = 0.5 # explore rate when update_buffer(), epsilon-greedy\n reward_list = list()\n step_list = list()\n for _ in range(max_step):\n states = torch.tensor((self_state,), dtype=torch.float32, device=device)\n actions = act_target(states).cpu().data.numpy() # discrete action space\n action = actions[0]\n if rd.rand() < explore_rate:\n action = rd.normal(action, explore_noise).clip(-1, +1)\n\n next_state, reward, done, _ = env.step(action * action_max)\n\n self_r_sum += reward\n self_steps += 1\n\n mask = 0.0 if done else gamma\n buffer.add_memo((reward, mask, self_state, action, next_state))\n\n self_state = next_state\n if done:\n reward_list.append(self_r_sum)\n self_r_sum = 0.0\n\n step_list.append(self_steps)\n self_steps = 0\n\n self_state = env.reset()\n\n total_step += sum(step_list)\n avg_reward = np.average(reward_list)\n print(end=f'Reward:{avg_reward:8.1f} Step:{total_step:8} ')\n\n '''update_parameters'''\n loss_a_sum = 0.0\n loss_c_sum = 0.0\n update_times = max_step\n buffer.init_before_sample() # update the buffer.now_len\n for i in range(update_times):\n for _ in range(2): # Two Time-scale Update Rule (TTUR)\n with torch.no_grad():\n reward, mask, state, action, next_state = buffer.random_sample(batch_size, device)\n\n next_action = act_target(next_state)\n next_q_target = cri_target(next_state, next_action)\n q_target = reward + mask * next_q_target\n\n q_eval = cri(state, action)\n critic_loss = criterion(q_eval, q_target)\n loss_c_sum += critic_loss.item()\n\n cri_optim.zero_grad()\n critic_loss.backward()\n cri_optim.step()\n\n action_pg = act(state) # policy gradient\n actor_loss = -cri(state, action_pg).mean() # policy gradient\n loss_a_sum += actor_loss.item()\n\n act_optim.zero_grad()\n actor_loss.backward()\n act_optim.step()\n\n '''soft target update'''\n # soft_target_update(cri_target, cri, tau=5e-3)\n # soft_target_update(act_target, act, tau=5e-3)\n '''hard target update'''\n if i % update_freq == 0:\n cri_target.load_state_dict(cri.state_dict())\n act_target.load_state_dict(act.state_dict())\n\n loss_c_avg = loss_c_sum / (update_times * 2)\n loss_a_avg = loss_a_sum / update_times\n print(end=f'LossC:{loss_c_avg:6.1f} LossA:{loss_a_avg:6.1f} ')\n\n # evaluate the true reward of this agent without exploration\n eva_reward_list = [evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)\n for _ in range(3)]\n eva_reward = np.average(eva_reward_list)\n print(f'TrueRewward:{eva_reward:8.1f}')\n if eva_reward > max_reward:\n max_reward = eva_reward\n\n if max_reward > target_reward:\n print(f\"|\\tReach target_reward: {max_reward:6.1f} > {target_reward:6.1f}\")\n break\n\n used_time = int(time.time() - start_time)\n print(f\"|\\tTraining UsedTime: {used_time}s\")\n\n '''open a window and show the env'''\n for _ in range(4):\n eva_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete, is_render=True)\n print(f'| Evaluated reward is: {eva_reward}')\n\n\nif __name__ == '__main__':\n # import os\n # os.environ['CUDA_VISIBLE_DEVICES'] = str(0)\n '''If you want to set GPU id, use above codes.'''\n\n run__tutorial__dqn_discrete_action()\n # run__tutorial__ddpg_continuous_action()\n"
] |
[
[
"torch.nn.SmoothL1Loss",
"torch.cat",
"torch.tensor",
"torch.nn.Tanh",
"torch.nn.Linear",
"numpy.random.normal",
"torch.no_grad",
"numpy.random.rand",
"torch.cuda.is_available",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.average",
"torch.nn.MSELoss",
"numpy.random.randint"
]
] |
vishalbelsare/UGFraud
|
[
"29f9486802eb1d57705028b3e9db704570f5b66f"
] |
[
"UGFraud/Utils/helper.py"
] |
[
"from sklearn.metrics import average_precision_score\nfrom sklearn.metrics import roc_auc_score\nimport gzip\nimport numpy as np\nimport networkx as nx\nimport time\nimport functools\nimport warnings\n\n\ndef create_ground_truth(user_data):\n \"\"\"Given user data, return a dictionary of labels of users and reviews\n Args:\n user_data: key = user_id, value = list of review tuples.\n Return:\n user_ground_truth: key = user id (not prefixed), value = 0 (non-spam) /1 (spam)\n review_ground_truth: review id (not prefixed), value = 0 (non-spam) /1 (spam)\n \"\"\"\n user_ground_truth = {}\n review_ground_truth = {}\n\n for user_id, reviews in user_data.items():\n\n user_ground_truth[user_id] = 0\n\n for r in reviews:\n prod_id = r[0]\n label = r[2]\n\n if label == -1:\n review_ground_truth[(user_id, prod_id)] = 1\n user_ground_truth[user_id] = 1\n else:\n review_ground_truth[(user_id, prod_id)] = 0\n\n return user_ground_truth, review_ground_truth\n\n\ndef evaluate(y, pred_y):\n \"\"\"\n Revise: test when a key is a review/account.\n Evaluate the prediction of account and review by SpEagle\n Args:\n y: dictionary with key = user_id/review_id and value = ground truth (1 means spam, 0 means non-spam)\n pred_y: dictionary with key = user_id/review_id and value = p(y=spam | x) produced by SpEagle.\n the keys in pred_y must be a subset of the keys in y\n \"\"\"\n posteriors = []\n ground_truth = []\n\n for k, v in pred_y.items():\n if k in y:\n posteriors.append(v)\n ground_truth.append(y[k])\n\n if len(np.unique(ground_truth)) < 2:\n warnings.warn(\"Only one class present in ground_truth, ROC AUC score will be omitted\")\n ap = average_precision_score(ground_truth, posteriors)\n return None, ap\n else:\n auc = roc_auc_score(ground_truth, posteriors)\n ap = average_precision_score(ground_truth, posteriors)\n return auc, ap\n\n\ndef scale_value(value_dict):\n \"\"\"\n Calculate and return a dict of the value of input dict scaled to (0, 1)\n \"\"\"\n\n ranked_dict = [(user, value_dict[user]) for user in value_dict.keys()]\n ranked_dict = sorted(ranked_dict, reverse=True, key=lambda x: x[1])\n\n up_max, up_mean, up_min = ranked_dict[0][1], ranked_dict[int(len(ranked_dict) / 2)][1], ranked_dict[-1][1]\n\n scale_dict = {}\n for i, p in value_dict.items():\n norm_value = (p - up_min) / (up_max - up_min)\n if norm_value == 0: # avoid the 0\n scale_dict[i] = 0 + 1e-7\n elif norm_value == 1: # avoid the 1\n scale_dict[i] = 1 - 1e-7\n else:\n scale_dict[i] = norm_value\n\n return scale_dict\n\n\ndef nor_priors(priors):\n \"\"\"\n Normalize the node priors for GANG\n :param priors:\n :return:\n \"\"\"\n new_upriors, new_rpriors, new_ppriors = priors\n\n # normalize the node priors to (0,1)\n # if we normalize the prior, we need to set nor_flg to True for the gang model\n ranked_upriors = [(user, new_upriors[user]) for user in new_upriors.keys()]\n ranked_upriors = sorted(ranked_upriors, reverse=True, key=lambda x: x[1])\n ranked_rpriors = [(user, new_rpriors[user]) for user in new_rpriors.keys()]\n ranked_rpriors = sorted(ranked_rpriors, reverse=True, key=lambda x: x[1])\n ranked_ppriors = [(user, new_ppriors[user]) for user in new_ppriors.keys()]\n ranked_ppriors = sorted(ranked_ppriors, reverse=True, key=lambda x: x[1])\n u_max, u_mean, u_min = ranked_upriors[0][1], ranked_upriors[int(len(ranked_upriors) / 2)][1], ranked_upriors[-1][1]\n p_max, p_mean, p_min = ranked_ppriors[0][1], ranked_ppriors[int(len(ranked_ppriors) / 2)][1], ranked_ppriors[-1][1]\n r_max, r_mean, r_min = ranked_rpriors[0][1], ranked_rpriors[int(len(ranked_rpriors) / 2)][1], ranked_rpriors[-1][1]\n for i, p in priors[0].items():\n priors[0][i] = (p - u_min) / (u_max - u_min)\n for i, p in priors[1].items():\n priors[1][i] = (p - r_min) / (r_max - r_min)\n for i, p in priors[2].items():\n priors[2][i] = (p - p_min) / (p_max - p_min)\n\n return priors, [u_mean, r_mean, p_mean]\n\n\ndef get_hash(data):\n import hashlib\n return hashlib.md5(data).hexdigest()\n\n\ndef read_graph_data(metadata_filename, adj=False):\n \"\"\" Read the user-review-product graph from file. Can output the graph in different formats\n Args:\n metadata_filename: a gzipped file containing the graph.\n adj: if True: create adjacent data, default is False\n Return:\n graph: user-review / prod-review / list of adjacent(adj=True)\n \"\"\"\n\n user_data = {}\n\n prod_data = {}\n\n adj_data = []\n\n # use the rt mode to read ascii strings instead of binary\n if adj is False:\n with gzip.open(metadata_filename, 'rt') as f:\n # file format: each line is a tuple (user id, product id, rating, label, date)\n for line in f:\n items = line.strip().split()\n u_id = items[0]\n p_id = items[1]\n if items[2] != 'None':\n rating = float(items[2])\n else:\n rating = 'None'\n label = int(items[3])\n date = items[4]\n\n if u_id not in user_data:\n user_data[u_id] = []\n user_data[u_id].append((p_id, rating, label, date))\n\n if p_id not in prod_data:\n prod_data[p_id] = []\n prod_data[p_id].append((u_id, rating, label, date))\n\n # create adj_list [u_id, p_id, 1/2], where 1 indicates positive rating (4, 5)\n # and 2 indicates negative rating (1, 2, 3)\n\n print('read reviews from %s' % metadata_filename)\n print('number of users = %d' % len(user_data))\n print('number of products = %d' % len(prod_data))\n return user_data, prod_data\n else:\n # create adj_list [u_id, p_id, 1/2], where 1 indicates positive rating (4, 5)\n # and 2 indicates negative rating (1, 2, 3)\n with gzip.open(metadata_filename, 'rt') as f:\n # file format: each line is a tuple (user id, product id, rating, label, date)\n for line in f:\n items = line.strip().split()\n u_id = items[0]\n p_id = items[1]\n if items[2] != 'None':\n rating = float(items[2])\n else:\n rating = 'None'\n label = int(items[3])\n date = items[4]\n\n if u_id not in user_data:\n user_data[u_id] = []\n user_data[u_id].append((p_id, rating, label, date))\n\n if p_id not in prod_data:\n prod_data[p_id] = []\n prod_data[p_id].append((u_id, rating, label, date))\n\n if int(rating) <= 3:\n rating = int(2)\n else:\n rating = int(1)\n adj_data.append([u_id, p_id, rating])\n\n print('read reviews from %s' % metadata_filename)\n print('number of users = %d' % len(user_data))\n print('number of products = %d' % len(prod_data))\n print('number of ratings = %d' % len(adj_data))\n return user_data, prod_data, np.array(adj_data, dtype='int32')\n\n\ndef depth(data):\n \"\"\"\n Get the depth of a dictionary\n Args:\n data: data in dictionary type\n\n Returns: the depth of a dictionary\n\n \"\"\"\n if isinstance(data, dict):\n return 1 + (max(map(depth, data.values())) if data else 0)\n return 0\n\n\ndef data_checker(data):\n \"\"\"\n data validation\n Args:\n data: data in dictionary type\n\n Returns: pass the validation\n\n \"\"\"\n if isinstance(data, dict):\n if depth(data) < 3:\n raise Exception(\"The minimum depth of data must be 3. For example: {\\'node1\\':{\\'node1_neighbor\\':{\"\n \"neighbor's attribute}}}\")\n else:\n raise AttributeError(\"Data must be stored in dictionary.\")\n\n\ndef dict_to_networkx(data):\n \"\"\"\n Convert data into networkx graph\n Args:\n data: data in dictionary type\n\n Returns: networkx graph\n\n \"\"\"\n data_checker(data)\n G = nx.Graph(data)\n return G\n\n\ndef add_attribute_to_graph(graph, attribute, adding_type):\n \"\"\"\n Add new attributes to nodes/edges\n Args:\n graph: networkx graph\n attribute: dictionary of attributes for nodes/edges\n adding_type: string of node or edge\n\n Returns:\n networkx graph with new attributes\n \"\"\"\n if isinstance(attribute, dict):\n if isinstance(graph, nx.classes.graph.Graph):\n if adding_type == 'node':\n nx.set_node_attributes(graph, attribute)\n return graph\n elif adding_type == 'edge':\n nx.set_edge_attributes(graph, attribute)\n return graph\n else:\n raise Exception(\"Adding type must be \\'node\\' or \\'edge\\'.\")\n else:\n raise Exception(\"The graph must be a networkx graph.\")\n else:\n raise AttributeError(\"Attribute must be stored in dictionary.\")\n\n\ndef get_node_attributes_index(graph, attr):\n \"\"\"\n get node index for each attributes\n Args:\n graph: networkx graph\n attr: nodes' attribute\n\n Returns:\n a dict of list which contains every attribute index\n For example: {'user': ['201','202','203','204'], 'prod': ['0', '1', '2']}\n \"\"\"\n from collections import defaultdict\n node_temp = nx.get_node_attributes(graph, attr)\n reversed_dict = defaultdict(list)\n for key, value in node_temp.items():\n reversed_dict[value].append(key)\n return reversed_dict\n\n\ndef get_edge_attributes_index(graph, attr):\n \"\"\"\n get edge index for each attributes\n Args:\n graph: networkx graph\n attr: edges' attribute\n\n Returns:\n a dict of list which contains every attribute index\n For example: {'review': [('201', '0'), ('202', '0'), ('203', '0'), ('204', '0')]}\n \"\"\"\n from collections import defaultdict\n node_temp = nx.get_edge_attributes(graph, attr)\n reversed_dict = defaultdict(list)\n for key, value in node_temp.items():\n reversed_dict[value].append(key)\n return reversed_dict\n\n\ndef node_attr_filter(graph, attr, specific_attr, into_attr):\n \"\"\"\n get specific keys, values in conditions\n Args:\n graph: networkx graph\n attr: which attribute index you want to get\n specific_attr: which specific attribute index you want to get depending on attr\n into_attr: use specific attribute index to filter the attribute\n\n Returns:\n dict(node: into_attr values)\n For example: node_attr_filter(graph, 'types', 'user', 'prior)\n will return the dict( user_id: user_id_prior)\n\n \"\"\"\n attr_dict_index = get_node_attributes_index(graph, attr)\n specific_dict = attr_dict_index[specific_attr]\n filtered_dict = dict()\n into_dict = nx.get_node_attributes(graph, into_attr)\n for i in specific_dict:\n filtered_dict[i] = into_dict[i]\n return filtered_dict\n\n\ndef edge_attr_filter(graph, attr, specific_attr, into_attr):\n \"\"\"\n get specific keys, values in conditions\n Args:\n graph: networkx graph\n attr: which attribute index you want to get\n specific_attr: which specific attribute index you want to get depending on attr\n into_attr: use specific attribute index to filter the attribute\n\n Returns:\n dict(edge: into_attr values)\n For example: edge_attr_filter(graph, 'types', 'review', 'prior)\n will return the dict(review_id: review_id_prior)\n\n \"\"\"\n attr_dict_index = get_edge_attributes_index(graph, attr)\n specific_dict = attr_dict_index[specific_attr]\n filtered_dict = dict()\n into_dict = nx.get_edge_attributes(graph, into_attr)\n for i in specific_dict:\n filtered_dict[i] = into_dict[i]\n return filtered_dict\n\n\ndef save_graph(graph, graph_name=False):\n \"\"\"\n\n Args:\n graph: network graph\n graph_name: the file name of the graph, if graph_name=False, use default name\n\n Returns:\n None\n \"\"\"\n from networkx.readwrite import json_graph\n import json\n data = json_graph.node_link_data(graph)\n if graph_name is False:\n graph_name = 'graph_data.json'\n with open(graph_name, 'w') as f:\n json.dump(data, f)\n f.close()\n print('Saved graph data as {}'.format(graph_name))\n\n\ndef load_graph(json_name):\n \"\"\"\n\n Args:\n json_name: json file name\n\n Returns:\n networkx graph\n \"\"\"\n from networkx.readwrite import json_graph\n import json\n with open(json_name, 'r') as f:\n data = json.load(f)\n f.close()\n graph = json_graph.node_link_graph(data)\n print('Loaded {} into the nextorkx graph'.format(json_name))\n return graph\n\n\ndef timer(func):\n \"\"\"Print the runtime of the decorated function\"\"\"\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n print(\"Finished {} in {} secs\".format(func.__name__, round(run_time, 3)))\n return value\n return wrapper_timer\n\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.metrics.average_precision_score",
"numpy.unique"
]
] |
winiar93/viadot
|
[
"7d8f8b0a30de3d40da161615639532012de072b0"
] |
[
"viadot/utils.py"
] |
[
"import re\nfrom typing import Any, Dict, List\n\nimport pandas as pd\nimport prefect\nimport pyodbc\nimport requests\nfrom prefect.utilities.graphql import EnumValue, with_args\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import ConnectionError, HTTPError, ReadTimeout, Timeout\nfrom requests.packages.urllib3.util.retry import Retry\nfrom urllib3.exceptions import ProtocolError\nfrom itertools import chain\nfrom .exceptions import APIError\n\n\ndef slugify(name: str) -> str:\n return name.replace(\" \", \"_\").lower()\n\n\ndef handle_api_response(\n url: str,\n auth: tuple = None,\n params: Dict[str, Any] = None,\n headers: Dict[str, Any] = None,\n timeout: tuple = (3.05, 60 * 30),\n) -> requests.models.Response:\n \"\"\"Handle and raise Python exceptions during request with retry strategy for specyfic status.\n\n Args:\n url (str): the URL which trying to connect.\n auth (tuple, optional): authorization information. Defaults to None.\n params (Dict[str, Any], optional): the request params also includes parameters such as the content type. Defaults to None.\n headers: (Dict[str, Any], optional): the request headers. Defaults to None.\n timeout (tuple, optional): the request times out. Defaults to (3.05, 60 * 30).\n\n Raises:\n ReadTimeout: stop waiting for a response after a given number of seconds with the timeout parameter.\n HTTPError: exception that indicates when HTTP status codes returned values different than 200.\n ConnectionError: exception that indicates when client is unable to connect to the server.\n APIError: defined by user.\n\n Returns:\n requests.models.Response\n \"\"\"\n try:\n session = requests.Session()\n retry_strategy = Retry(\n total=3,\n status_forcelist=[429, 500, 502, 503, 504],\n backoff_factor=1,\n )\n adapter = HTTPAdapter(max_retries=retry_strategy)\n\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n response = session.get(\n url,\n auth=auth,\n params=params,\n headers=headers,\n timeout=timeout,\n )\n\n response.raise_for_status()\n\n except ReadTimeout as e:\n msg = \"The connection was successful, \"\n msg += f\"however the API call to {url} timed out after {timeout[1]}s \"\n msg += \"while waiting for the server to return data.\"\n raise APIError(msg)\n except HTTPError as e:\n raise APIError(\n f\"The API call to {url} failed. \"\n \"Perhaps your account credentials need to be refreshed?\",\n ) from e\n except (ConnectionError, Timeout) as e:\n raise APIError(f\"The API call to {url} failed due to connection issues.\") from e\n except ProtocolError as e:\n raise APIError(f\"Did not receive any reponse for the API call to {url}.\")\n except Exception as e:\n raise APIError(\"Unknown error.\") from e\n\n return response\n\n\ndef get_flow_last_run_date(flow_name: str) -> str:\n \"\"\"\n Retrieve a flow's last run date as an ISO datetime string.\n\n This function assumes you are already authenticated with Prefect Cloud.\n \"\"\"\n client = prefect.Client()\n result = client.graphql(\n {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\n \"flow\": {\"name\": {\"_eq\": flow_name}},\n \"start_time\": {\"_is_null\": False},\n \"state\": {\"_eq\": \"Success\"},\n },\n \"order_by\": {\"start_time\": EnumValue(\"desc\")},\n \"limit\": 1,\n },\n ): {\"start_time\"}\n }\n }\n )\n flow_run_data = result.get(\"data\", {}).get(\"flow_run\")\n\n if not flow_run_data:\n return None\n\n last_run_date_raw_format = flow_run_data[0][\"start_time\"]\n last_run_date = last_run_date_raw_format.split(\".\")[0] + \"Z\"\n return last_run_date\n\n\ndef get_sql_server_table_dtypes(\n table, con: pyodbc.Connection, schema: str = None\n) -> dict:\n \"\"\"Get column names and types from a SQL Server database table.\n\n Args:\n table (_type_): The table for which to fetch dtypes.\n con (pyodbc.Connection): The connection to the database where the table is located.\n schema (str, optional): The schema where the table is located. Defaults to None.\n\n Returns:\n dict: A dictionary of the form {column_name: dtype, column_name2: dtype2, ...}.\n \"\"\"\n\n query = f\"\"\"\n SELECT \n col.name,\n t.name,\n col.max_length\n FROM sys.tables AS tab\n INNER JOIN sys.columns AS col\n ON tab.object_id = col.object_id\n LEFT JOIN sys.types AS t\n ON col.user_type_id = t.user_type_id\n WHERE tab.name = '{table}'\n AND schema_name(tab.schema_id) = '{schema}'\n ORDER BY column_id;\n \"\"\"\n cursor = con.cursor()\n query_result = cursor.execute(query).fetchall()\n cursor.close()\n\n dtypes = {}\n for row in query_result:\n column_name = row[0]\n dtype = row[1]\n length = row[2]\n if dtype == \"varchar\":\n dtypes[column_name] = dtype + f\"({length})\"\n else:\n dtypes[column_name] = dtype\n\n return dtypes\n\n\ndef _cast_df_cols(df):\n\n df = df.replace({\"False\": False, \"True\": True})\n\n datetime_cols = (col for col, dtype in df.dtypes.items() if dtype.kind == \"M\")\n bool_cols = (col for col, dtype in df.dtypes.items() if dtype.kind == \"b\")\n int_cols = (col for col, dtype in df.dtypes.items() if dtype.kind == \"i\")\n\n for col in datetime_cols:\n df[col] = df[col].dt.strftime(\"%Y-%m-%d %H:%M:%S+00:00\")\n\n for col in bool_cols:\n df[col] = df[col].astype(pd.Int64Dtype())\n\n for col in int_cols:\n df[col] = df[col].astype(pd.Int64Dtype())\n\n return df\n\n\ndef build_merge_query(\n stg_schema: str,\n stg_table: str,\n schema: str,\n table: str,\n primary_key: str,\n con: pyodbc.Connection,\n) -> str:\n \"\"\"\n Build a merge query for the simplest possible upsert scenario:\n - updating and inserting all fields\n - merging on a single column, which has the same name in both tables\n\n Args:\n stg_schema (str): The schema where the staging table is located.\n stg_table (str): The table with new/updated data.\n schema (str): The schema where the table is located.\n table (str): The table to merge into.\n primary_key (str): The column on which to merge.\n con (pyodbc.Connection) The connection to the database on which the\n query will be executed.\n \"\"\"\n\n # Get column names\n columns_query = f\"\"\"\n SELECT \n col.name\n FROM sys.tables AS tab\n INNER JOIN sys.columns AS col\n ON tab.object_id = col.object_id\n WHERE tab.name = '{table}'\n AND schema_name(tab.schema_id) = '{schema}'\n ORDER BY column_id;\n \"\"\"\n cursor = con.cursor()\n columns_query_result = cursor.execute(columns_query).fetchall()\n cursor.close()\n\n columns = [tup[0] for tup in columns_query_result]\n columns_stg_fqn = [f\"stg.{col}\" for col in columns]\n\n # Build merge query\n update_pairs = [f\"existing.{col} = stg.{col}\" for col in columns]\n merge_query = f\"\"\"\n MERGE INTO {schema}.{table} existing\n USING {stg_schema}.{stg_table} stg\n ON stg.{primary_key} = existing.{primary_key}\n WHEN MATCHED\n THEN UPDATE SET {\", \".join(update_pairs)}\n WHEN NOT MATCHED\n THEN INSERT({\", \".join(columns)})\n VALUES({\", \".join(columns_stg_fqn)});\n \"\"\"\n return merge_query\n\n\ndef gen_bulk_insert_query_from_df(\n df: pd.DataFrame, table_fqn: str, chunksize=1000, **kwargs\n) -> str:\n \"\"\"\n Converts a DataFrame to a bulk INSERT query.\n\n Args:\n df (pd.DataFrame): The DataFrame which data should be put into the INSERT query.\n table_fqn (str): The fully qualified name (schema.table) of the table to be inserted into.\n\n Returns:\n str: A bulk insert query that will insert all data from `df` into `table_fqn`.\n\n Examples:\n >>> data = [(1, \"_suffixnan\", 1), (2, \"Noneprefix\", 0), (3, \"fooNULLbar\", 1, 2.34)]\n >>> df = pd.DataFrame(data, columns=[\"id\", \"name\", \"is_deleted\", \"balance\"])\n >>> df\n id name is_deleted balance\n 0 1 _suffixnan 1 NaN\n 1 2 Noneprefix 0 NaN\n 2 3 fooNULLbar 1 2.34\n >>> query = gen_bulk_insert_query_from_df(df, \"users\", status=\"APPROVED\", address=None)\n >>> print(query)\n INSERT INTO users (id, name, is_deleted, balance, status, address)\n VALUES (1, '_suffixnan', 1, NULL, 'APPROVED', NULL),\n (2, 'Noneprefix', 0, NULL, 'APPROVED', NULL),\n (3, 'fooNULLbar', 1, 2.34, 'APPROVED', NULL);\n \"\"\"\n if df.shape[1] == 1:\n raise NotImplementedError(\n \"Currently, this function only handles DataFrames with at least two columns.\"\n )\n\n def _gen_insert_query_from_records(records: List[tuple]) -> str:\n\n tuples = map(str, tuple(records))\n\n # Change Nones to NULLs\n none_nan_pattern = r\"(?<=\\W)(nan|None)(?=\\W)\"\n values = re.sub(none_nan_pattern, \"NULL\", (\",\\n\" + \" \" * 7).join(tuples))\n\n # Change the double quotes into single quotes, as explained above.\n # Note this pattern should be improved at a later time to cover more edge cases.\n double_quotes_pattern = r'(\")(.*)(\")(\\)|,)'\n values_clean = re.sub(double_quotes_pattern, r\"'\\2'\\4\", values)\n # Hacky - replaces starting and ending double quotes.\n values_clean = (\n values.replace('\",', \"',\").replace(', \"', \", '\").replace('(\"', \"('\")\n )\n\n return f\"INSERT INTO {table_fqn} ({columns})\\n\\nVALUES {values_clean}\"\n\n df = df.copy().assign(**kwargs)\n df = _cast_df_cols(df)\n\n columns = \", \".join(df.columns)\n\n tuples_raw = df.itertuples(index=False, name=None)\n # Escape values with single quotes inside by adding another single quote\n # (\"val'ue\" -> \"val''ue\").\n # As Python wraps such strings in double quotes, which are interpreted as\n # column names by SQL Server, we later also replace the double quotes with\n # single quotes.\n tuples_escaped = [\n tuple(\n f\"\"\"{value.replace(\"'\", \"''\")}\"\"\" if type(value) == str else value\n for value in row\n )\n for row in tuples_raw\n ]\n\n if len(tuples_escaped) > chunksize:\n insert_query = \"\"\n chunk_start = 0\n for chunk_end in range(chunksize, len(tuples_escaped), chunksize):\n chunk = tuples_escaped[chunk_start:chunk_end]\n chunk_start += chunksize\n if len(tuples_escaped) - chunk_end < chunksize:\n chunk = tuples_escaped[chunk_end:]\n chunk_insert_query = _gen_insert_query_from_records(chunk)\n insert_query += chunk_insert_query + \";\\n\\n\"\n return insert_query\n else:\n return _gen_insert_query_from_records(tuples_escaped)\n\n\ndef union_dict(*dicts):\n \"\"\"\n Function that union list of dictionaries\n\n Args:\n dicts (List[Dict]): list of dictionaries with credentials.\n\n Returns:\n Dict: A single dictionary createb by union method.\n\n Examples:\n\n >>> a = {\"a\":1}\n >>> b = {\"b\":2}\n >>> union_credentials_dict(a ,b)\n {'a': 1, 'b': 2}\n\n \"\"\"\n return dict(chain.from_iterable(dct.items() for dct in dicts))\n"
] |
[
[
"pandas.Int64Dtype"
]
] |
eM7RON/Image-Evolution
|
[
"9e554d038c14269a3ad600b38b62e8d627cbf656"
] |
[
"main.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport sys, os, time, copy, re #, shutil\n\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QApplication, QFileDialog, QDesktopWidget, QStyleFactory, \\\n QDialog, QPushButton, QVBoxLayout, QToolBar, QMessageBox, QProgressBar\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, QThread, QUrl, Qt, QTimer\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage, QPainter\nfrom PyQt5.QtSvg import QSvgWidget, QSvgRenderer\nfrom PIL import Image, ImageFilter\nfrom matplotlib import rcParams\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\nfrom ui.load_ui import Ui_LoadWindow\nfrom ui.main_menu_ui import Ui_MainMenu\nfrom ui.gpso_setup_ui import Ui_GpsoSetupWindow\nfrom ui.gpso_display_ui import Ui_GpsoDisplayWindow\nfrom ui.confirmation_prompt_ui import Ui_ConfirmationPrompt\nfrom ui.video_maker_setup_ui import Ui_VideoMakerSetup\nfrom ui.video_maker_prompt_ui import Ui_VideoMakerPrompt\nfrom ui.video_maker_ui import Ui_VideoMaker\nfrom ui.about_ui import Ui_AboutWindow\nfrom ui.image_editor_ui import Ui_ImageEditor\nfrom utils import utils\n\nplt.style.use('dark_background')\n\nif hasattr(Qt, 'AA_UseHighDpiPixmaps'):\n QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)\nif hasattr(Qt, 'AA_UseHighDpiPixmaps'):\n QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)\n\nVALID_COLOR = \"#c4df9b\" # Green\nWARNING_COLOR = \"#fff79a\" # Yellow\nINVALID_COLOR = \"#f6989d\" # Red\nCWDIR = os.path.dirname(os.path.realpath(__file__))\nICON_PATH = os.path.join(CWDIR, 'img', 'dna.svg')\nIMG_FORMATS = (\".jpg\", \".jpeg\", \".jpe\", \".jfif\", \".png\")\nIMG_FORMATS_STR = \"Image formats (*.jpg *.jpeg *.jpe *.jfif *.png)\"\n\nprev_ = None # Will hold previous screen for navigation back buttons\n\n\nclass VideoMakerPrompt(QWidget, Ui_VideoMakerPrompt):\n '''\n Prompts user to open VideoMaker after the algorithm has finished\n or has been stopped\n '''\n id_ = 'video_maker_prompt'\n\n def __init__(self, svg_dir=None, parent=None):\n super(VideoMakerPrompt, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n self.yes_button.clicked.connect(lambda: navigate(self, VideoMakerSetup(svg_dir)))\n self.no_button.clicked.connect(end_program)\n\n\nclass VideoMakerSetup(QWidget, Ui_VideoMakerSetup):\n\n id_ = 'video_maker_setup'\n container_map = {\n 'H264': 'mp4',\n 'H265': 'mp4',\n 'DIVX': 'avi',\n }\n def __init__(self, svg_dir=None, parent=None):\n super(VideoMakerSetup, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n # Override self.show to switch back/exit \n # button text depending on previous screen\n # when self.show is called\n self.__show = copy.copy(self.show)\n self.show = self._show\n\n self.sampling_method_map = {\n 'start': self.validate_strt_stop_value,\n 'stop': self.validate_strt_stop_value,\n 'step': self.validate_step_value,\n }\n\n ##############################################################################\n # Validated Control Variables #\n ##############################################################################\n\n self.inputs = {k: v for k, v in self.__dict__.items() if k.endswith((\"_entry\", \"_line_edit\"))}\n\n for v in self.inputs.values():\n v.valid = False\n\n self.validated_controls = [self.run_button]\n self.close_prompt = ConfirmationPrompt(end_program, self, window_title='๐ideo๐aker', message='Quit')\n\n self.video_codecs = ['h264', 'x264', 'X264', 'h265', 'H264', 'H265', 'DIVX', 'avc1', 'avc3', 'hev1', 'hvc1', 'vc-1', 'drac',\n 'vp09', 'av01', 'ac-3', 'ec-3', 'fLaC', 'tx3g', 'gpmd', 'mp4v', 'Opus', ]\n self.video_codec_dropdown.addItems(self.video_codecs)\n self.video_codec_dropdown.setCurrentText('avc3')\n\n self.sampling_methods = ['None', \n 'Linear forward', \n 'Linear backward', \n 'Exponential growth forward', \n 'Exponential growth backward', \n 'Exponential decay forward', \n 'Exponential decay backward']\n self.sampling_method_dropdown.addItems(self.sampling_methods)\n self.sampling_method_dropdown.activated.connect(self.validate_sampling_method_controls)\n self.sampling_method_dropdown.setCurrentText('None')\n\n self.sampling_method_controls = [self.start_entry, self.stop_entry, self.step_entry]\n self.start_entry.textChanged.connect(self.validate_sampling_method_controls)\n self.start_entry.setText(' ')\n self.stop_entry.textChanged.connect(self.validate_sampling_method_controls)\n self.stop_entry.setText(' ')\n self.step_entry.textChanged.connect(self.validate_sampling_method_controls)\n self.step_entry.setText(' ')\n\n\n self.containers = ['mp4', 'mov', 'mkv', 'avi', 'divx', 'flv', 'mpg', 'mpeg']\n self.valid_exts = tuple('.' + x for x in self.containers)\n self.container_dropdown.addItems(self.containers)\n self.container_dropdown.setCurrentIndex(self.containers.index('mp4'))\n self.container_dropdown.activated.connect(self.update_save_as)\n\n self.resolutions = ['720p', '1080p', '1440p', '4K', '8K']\n self.resolution_dropdown.addItems(self.resolutions)\n self.resolution_dropdown.setCurrentIndex(self.resolutions.index('1080p'))\n\n self.save_as_line_edit.textChanged.connect(lambda: validate_save_name(self, self.valid_exts))\n self.save_as_line_edit.textChanged.emit('')\n self.save_as_file_browser.clicked.connect(lambda: self.save_as(\n 'Where would you like to save?', \n 'Video ( ' + ''.join([f'*{x} ' for x in self.containers])[: -1] + ')',\n ))\n\n self.svg_folder_line_edit.textChanged.connect(self.validate_dir)\n self.svg_folder_line_edit.setText(svg_dir)\n self.svg_folder_line_edit.textChanged.emit(svg_dir)\n self.svg_folder_browser.clicked.connect(lambda: get_dir(self, self.svg_folder_line_edit))\n\n self.fps_entry.textChanged.connect(lambda: self.validate_value(self.fps_entry))\n self.fps_entry.textChanged.emit(self.fps_entry.text())\n\n self.run_button.clicked.connect(self.run)\n\n def _show(self):\n '''\n This method overwrites the built-in self.show method during __init__(). \n It preserves the orginal self.show functionality but adds the ability to \n switch between back and exit for one of the buttons depending on the \n previous screen. It is called with self.show().\n '''\n if prev_.id_ == 'main_menu':\n self.exit_button.setText('Back')\n disconnect(self.exit_button)\n self.exit_button.clicked.connect(lambda: navigate(self, prev_))\n else:\n self.exit_button.setText('Exit')\n disconnect(self.exit_button)\n self.exit_button.clicked.connect(self.close_prompt.show)\n self.__show()\n\n @pyqtSlot()\n def validate_dir(self):\n dir_ = self.svg_folder_line_edit.text().strip()\n if dir_ and os.path.exists(os.path.normpath(dir_)):\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.svg_folder_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.svg_folder_line_edit.valid = valid\n ready_check(self)\n\n def save_as(self, message='Save as...', exts_str='All Files (*);;Text Files (*.txt)'):\n '''\n Generic dialog for selectring where to save\n '''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fname, _ = QFileDialog.getSaveFileName(self, message, '', exts_str, options=options)\n if fname:\n if fname.endswith(self.valid_exts):\n self.save_as_line_edit.setText(fname)\n ext = fname.split('.')[-1]\n self.container_dropdown.setCurrentIndex(self.containers.index(ext))\n else:\n self.save_as_line_edit.setText(utils.replace_extension(fname, self.container_dropdown.currentText()))\n\n def update_save_as(self):\n if self.save_as_line_edit.valid:\n path = self.save_as_line_edit.text()\n if path.strip():\n dir_, fname = os.path.split(path)\n fname = utils.replace_extension(fname, self.container_dropdown.currentText())\n self.save_as_line_edit.setText(os.path.join(dir_, fname))\n\n def validate_sampling_method_controls(self):\n for control, label in zip(self.sampling_method_controls, ['start', 'stop', 'step']):\n self.sampling_method_map[label](control)\n for control, label in zip(self.sampling_method_controls, ['start', 'stop', 'step']):\n self.sampling_method_map[label](control)\n\n @pyqtSlot()\n def validate_strt_stop_value(self, control):\n control_text = control.text()\n method_text = self.sampling_method_dropdown.currentText()\n if not control_text.strip() and method_text == 'None':\n if all(c.valid for c in self.sampling_method_controls):\n color = VALID_COLOR\n valid = True\n else:\n color = WARNING_COLOR\n valid = True\n else:\n try:\n num = round(float(control_text))\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in control_text\n if method_text != 'None' and not u_score:\n if all(c.valid for c in self.sampling_method_controls):\n color = VALID_COLOR\n valid = True\n else:\n color = WARNING_COLOR\n valid = True\n elif method_text == 'None' and not u_score:\n color = WARNING_COLOR\n valid = False\n else:\n color = INVALID_COLOR\n valid = False\n control.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n control.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validate_step_value(self, control):\n control_text = control.text()\n method_text = self.sampling_method_dropdown.currentText()\n if not control_text.strip() and method_text == 'None':\n if all(c.valid for c in self.sampling_method_controls):\n color = VALID_COLOR\n valid = True\n else:\n color = WARNING_COLOR\n valid = True\n else:\n try:\n num = float(control_text)\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in control_text\n if method_text != 'None' and num > 0 and not u_score:\n if all(c.valid for c in self.sampling_method_controls):\n color = VALID_COLOR\n valid = True\n else:\n color = WARNING_COLOR\n valid = True\n elif method_text == 'None' and num > 0 and not u_score:\n color = WARNING_COLOR\n valid = False\n else:\n color = INVALID_COLOR\n valid = False\n control.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n control.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validate_value(self, control):\n text = control.text()\n try:\n num = round(float(text))\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if num >= 1 and not u_score:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n control.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n control.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def run(self):\n\n sampling_method = self.sampling_method_dropdown.currentText()\n if sampling_method == 'None':\n strt = None\n stop = None\n step = None\n else:\n strt = round(float(self.start_entry.text()))\n stop = round(float(self.stop_entry.text()))\n step = float(self.step_entry.text())\n\n kwargs = dict(\n save_name = self.save_as_line_edit.text(),\n input_dir = self.svg_folder_line_edit.text(),\n video_codec = self.video_codec_dropdown.currentText(),\n resolution = self.resolution_dropdown.currentText(),\n fps = round(float(self.fps_entry.text())),\n sampling_method = sampling_method,\n strt = strt,\n stop = stop,\n step = step,\n )\n\n navigate(self, VideoMaker(**kwargs))\n\n\nclass VideoMaker(QWidget, Ui_VideoMaker):\n '''\n Makes videos of the evolving images\n '''\n id_ = 'video_maker'\n\n resolution_map = {\n '720p' : (1280, 720),\n '1080p': (1920, 1080),\n '1440p': (2560, 1440),\n '4K' : (3840, 2160),\n '8K' : (7680, 4320),\n }\n\n def __init__(self, parent=None, **kwargs):\n super(VideoMaker, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n self.__show = copy.copy(self.show)\n self.show = self._show\n\n self.close_prompt = ConfirmationPrompt(end_program, self, window_title='๐ideo๐aker', message='Quit')\n self.back_button.setEnabled(False)\n self.exit_button.setEnabled(False)\n self.back_button.clicked.connect(lambda: navigate(self, prev_))\n self.exit_button.clicked.connect(self.close_prompt.show)\n\n self.resolution = kwargs.get('resolution', '1080p')\n self.resolution = self.resolution_map[self.resolution]\n self.fps = kwargs.get('fps', 30)\n self.video_codec = kwargs.get('video_codec', 'avc3')\n self.input_dir = kwargs.get('input_dir', '.\\\\')\n self.save_name = kwargs['save_name']\n self.sampling_method = kwargs['sampling_method']\n self.strt = kwargs['strt']\n self.stop = kwargs['stop']\n self.step = kwargs['step']\n\n def _show(self):\n\n self.__show()\n self.progress_bar_label.setText('Status: Scanning for SVG images...')\n # Load up all the images and sort them in natural sorting order\n fnames = sorted(utils.directory_explorer('svg', self.input_dir), key=utils.natural_order)\n # Sub sample fnames\n if self.sampling_method != 'None':\n if self.sampling_method.startswith('Linear'):\n fnames = utils.expo_sample(fnames, self.strt, self.stop, round(self.step))\n if self.sampling_method.endswith('backward'):\n fnames.reverse()\n else:\n fnames = utils.expo_sample(\n fnames,\n self.strt,\n self.stop, \n self.step,\n decay =self.sampling_method.split()[1]=='decay',\n reverse=self.sampling_method.endswith('backward'))\n # Navigate to input dir\n os.chdir(self.input_dir)\n # get the dimensions of the original image\n img_dims = utils.get_svg_dimensions(fnames[0])\n # Resize to a screen resolution whilst attempting to preserve aspect ratio of original image\n width, height = utils.fit_to_screen(self.resolution, img_dims)\n\n n = len(fnames)\n step = 100. / n\n progress = 0.\n done = 0\n self.total_value.setText(str(n))\n self.done_value.setText('0')\n\n svg_widget = QSvgWidget()\n svg_widget.setFixedSize(width, height)\n pixmap = QPixmap(svg_widget.size())\n # X264, .mkv, 60fps, 1080p\n # avc1, .mov \n # https://github.com/cisco/openh264/releases 'X264', 'DIVX'\n video_writer = cv2.VideoWriter(self.save_name, cv2.VideoWriter_fourcc(*self.video_codec), self.fps, (width, height))\n\n self.progress_bar_label.setText('Status: Rendering video...')\n\n for fn in fnames:\n svg_widget.renderer().load(fn)\n svg_widget.render(pixmap)\n svg_widget.hide()\n svg_widget.close()\n\n img = pixmap.toImage()\n arr = utils.qimage_to_array(img)[:, :, :3]\n video_writer.write(arr)\n\n progress += step\n done += 1\n self.progress_bar.setValue(round(progress))\n self.done_value.setText(str(done))\n QApplication.processEvents()\n\n cv2.destroyAllWindows()\n video_writer.release()\n\n self.exit_button.setEnabled(True)\n self.back_button.setEnabled(True)\n self.progress_bar_label.setText('Status: Finished')\n\n\nclass AboutWindow(QWidget, Ui_AboutWindow):\n '''\n A simple explanation of the sofware\n '''\n id_ = 'about_window'\n\n def __init__(self, parent=None):\n super(AboutWindow, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n self.back_button.clicked.connect(lambda: navigate(self, prev_))\n\n\nclass MainMenu(QWidget, Ui_MainMenu):\n '''\n The main hub of the software where different algorithms and options can be selected\n '''\n id_ = 'main_menu'\n\n def __init__(self, parent=None):\n super(MainMenu, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n # Centre the application on the current display\n screen_geometry = QDesktopWidget().screenGeometry()\n w, h = self.geometry().width(), self.geometry().height()\n x = round(screen_geometry.width() / 2 - w / 2)\n y = round(screen_geometry.height() / 2 - h / 2)\n self.setGeometry(x, y, w, h)\n # Initialize the 'exit dialog'\n self.close_prompt = ConfirmationPrompt(end_program, self, message='Quit')\n\n self.load_button.clicked.connect(lambda: navigate(self, load_window))\n self.gpso_button.clicked.connect(lambda: navigate(self, gpso_setup_window))\n self.about_button.clicked.connect(lambda: navigate(self, about_window))\n self.image_editor_button.clicked.connect(lambda: navigate(self, image_editor))\n self.video_maker_button.clicked.connect(lambda: navigate(self, video_maker_setup))\n self.exit_button.clicked.connect(self.close_prompt.show)\n\n\nclass LoadWindow(QWidget, Ui_LoadWindow):\n '''\n Load up a previously saved algorithm\n '''\n id_ = 'load_window'\n\n def __init__(self, parent=None):\n super(LoadWindow, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n ##############################################################################\n # Validated Control Variables #\n ##############################################################################\n\n self.inputs = {k: v for k, v in self.__dict__.items() if k.endswith((\"_input\", \"_line_edit\"))}\n\n for v in self.inputs.values():\n v.valid = False\n\n self.validated_controls = [self.run_button]\n\n ##############################################################################\n # Set Connections #\n ##############################################################################\n\n # IO\n self.progress_file_browser.clicked.connect(lambda: get_file(self, self.progress_file_line_edit, \n \"Select a previously saved optimizer\", \"Pickle files (*.pkl)\"))\n self.output_folder_browser.clicked.connect(lambda: get_dir(self, self.output_folder_line_edit))\n\n # Text input\n self.output_folder_line_edit.textChanged.connect(self.validateOutputDir)\n self.output_folder_line_edit.textChanged.emit(self.output_folder_line_edit.text())\n self.progress_file_line_edit.textChanged.connect(self.validateProgressDir)\n self.progress_file_line_edit.textChanged.emit(self.progress_file_line_edit.text())\n\n # Navigation\n self.back_button.clicked.connect(lambda: navigate(self, prev_))\n self.run_button.clicked.connect(self.run)\n\n @pyqtSlot()\n def validateProgressDir(self):\n dir_ = self.progress_file_line_edit.text().strip()\n if dir_ and os.path.exists(os.path.normpath(dir_)) and dir_.endswith('.pkl'):\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.progress_file_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.progress_file_line_edit.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateOutputDir(self):\n dir_ = self.output_folder_line_edit.text().strip()\n if dir_ and os.path.exists(os.path.normpath(dir_)):\n color = VALID_COLOR\n valid = True\n elif not dir_:\n color = WARNING_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.output_folder_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.output_folder_line_edit.valid = valid\n ready_check(self)\n \n @pyqtSlot()\n def run(self):\n kwargs = dict(\n output_dir = self.output_folder_line_edit.text(),\n progress_dir = self.progress_file_line_edit.text(),\n load_flag = True,\n )\n\n global gpso_display_window\n gpso_display_window = GpsoDisplayWindow()\n gpso_display_window.init(**kwargs)\n self.close()\n\n\nclass ImageEditor(QWidget, Ui_ImageEditor):\n '''\n Setup for the GPSO algorithm\n '''\n id_ = 'gpso_setup_window'\n preset_filters = [\n '', 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more',\n 'emboss', 'sharpen', \n 'smooth', 'smooth_more'\n ]\n filters = [\n '', 'box', 'gaussian', 'median', 'mode', 'min', 'max'\n ]\n filter_map = {\n 'box' : 'BoxBlur',\n 'gaussian': 'GaussianBlur',\n 'median' : 'MedianFilter',\n 'mode' : 'ModeFilter',\n 'min' : 'MinFilter',\n 'max' : 'MaxFilter',\n }\n\n def __init__(self, parent=None):\n super(ImageEditor, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n self.message_timer = QTimer(self)\n self.message_timer.setInterval(2500)\n self.message_timer.timeout.connect(self.clear_message)\n\n # For Validating inputs and identifying ready state\n self.inputs = {k: v for k, v in self.__dict__.items() if k.endswith((\"_entry\", \"_line_edit\"))}\n\n for v in self.inputs.values():\n v.valid = False\n\n self.validated_controls = [self.save_button, self.apply_button]\n\n self.input_file_browser.clicked.connect(lambda: get_file (\n self, \n self.input_file_line_edit, \n \"Select an input image\", \n IMG_FORMATS_STR,\n ))\n self.input_file_line_edit.textChanged.connect(lambda: validate_file(self, IMG_FORMATS))\n self.input_file_line_edit.textChanged.connect(self.load_img)\n self.input_file_line_edit.textChanged.emit(self.input_file_line_edit.text())\n\n self.save_as_browser.clicked.connect(lambda: self.save_as(\n exts_str=f'Image (*.{self.input_ext()})'))\n self.save_as_line_edit.textChanged.connect(lambda: validate_save_name(self, self.input_ext()))\n self.save_as_line_edit.textChanged.emit('')\n \n self.size_dropdown.addItems(['64', '128', '256', '512', '1024', '2048', '4096'])\n self.size_dropdown.setCurrentText('256')\n\n self.kernel_size_entry.textChanged.connect(self.validate_kernel_size)\n self.kernel_size_entry.textChanged.emit('')\n\n self.filter_dropdown.addItems(self.filters)\n self.filter_dropdown.activated.connect(lambda: self.preset_filter_dropdown.setCurrentText(''))\n self.filter_dropdown.setCurrentText('')\n self.preset_filter_dropdown.addItems(self.preset_filters)\n self.preset_filter_dropdown.activated.connect(lambda: self.filter_dropdown.setCurrentText(''))\n self.preset_filter_dropdown.setCurrentText('')\n\n self.back_button.clicked.connect(lambda: navigate(self, prev_))\n self.save_button.clicked.connect(self.save_img)\n self.apply_button.clicked.connect(self.apply_changes)\n\n def load_img(self):\n path = os.path.normpath(self.input_file_line_edit.text())\n if path.endswith(IMG_FORMATS):\n try:\n self.img = Image.open(path).convert('RGBA')\n self.display_img()\n except (FileNotFoundError, AttributeError, PermissionError):\n pass\n\n def resize_img(self):\n target = int(self.size_dropdown.currentText())\n w, h = utils.fit_to_screen((target, target), self.img.size)\n self.img = self.img.resize((w, h))\n\n def save_img(self):\n self.img.save(self.save_as_line_edit.text(), 'PNG')\n self.display_message('Saved...')\n\n def display_img(self):\n size = utils.fit_to_screen((192, 108), self.img.size)\n img = self.img.resize(size)\n data = img.tobytes()\n img = QImage(data, *img.size, QImage.Format_RGBA8888)\n pixmap = QPixmap.fromImage(img)\n self.img_widget.setPixmap(pixmap)\n self.img_widget.show()\n\n def apply_changes(self):\n\n self.resize_img()\n filter_required = False\n\n if self.preset_filter_dropdown.currentText():\n filter_name = self.preset_filter_dropdown.currentText().upper()\n filter_required = True\n elif self.filter_dropdown.currentText():\n filter_name = self.filter_map[self.filter_dropdown.currentText()]\n filter_required = True\n \n if filter_required:\n filter_ = getattr(ImageFilter, filter_name)\n\n self.img = Image.filter(filter_(float(self.kernel_size_entry.text())))\n self.display_img()\n self.display_message('Changes applied...')\n \n\n def input_ext(self):\n return self.input_file_line_edit.text().split('.')[-1]\n \n def save_as(self, message='Save as...', exts_str='All Files (*);;Text Files (*.txt)'):\n '''\n Generic dialog for selectring where to save\n '''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fname, _ = QFileDialog.getSaveFileName(self, message, '', exts_str, options=options)\n if fname:\n if fname.endswith(IMG_FORMATS):\n self.save_as_line_edit.setText(fname)\n else:\n self.save_as_line_edit.setText(utils.replace_extension(fname, self.input_ext()))\n\n @pyqtSlot()\n def validate_kernel_size(self):\n text = self.kernel_size_entry.text()\n kernel1 = self.filter_dropdown.currentText()\n kernel2 = self.preset_filter_dropdown.currentText()\n if not any([text, kernel1, kernel2]):\n color = VALID_COLOR\n valid = True\n elif any([kernel1, kernel2]) and text:\n try:\n num = float(text)\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if num > 0 and not u_score:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n else:\n color = INVALID_COLOR\n valid = False\n self.kernel_size_entry.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.kernel_size_entry.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def display_message(self, message):\n self.message_label.setText(message)\n self.message_timer.start()\n\n @pyqtSlot()\n def clear_message(self):\n self.message_timer.start()\n self.message_label.setText('')\n\n \nclass GpsoSetupWindow(QWidget, Ui_GpsoSetupWindow):\n '''\n Setup for the GPSO algorithm\n '''\n id_ = 'gpso_setup_window'\n\n def __init__(self, parent=None):\n super(GpsoSetupWindow, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n\n # For Validating inputs and identifying ready state\n self.inputs = {k: v for k, v in self.__dict__.items() if k.endswith((\"_entry\", \"_line_edit\"))}\n\n for v in self.inputs.values():\n v.valid = False\n\n self.validated_controls = [self.run_button]\n\n ##############################################################################\n # Set Defaults #\n ##############################################################################\n\n # Shape types dropdown\n self.shape_types = ['circle', 'ellipse', 'square', 'rectangle', 'polygon']\n self.shape_type_dropdown.addItems(self.shape_types)\n\n self.shape_type_dropdown.setCurrentIndex(self.shape_types.index('polygon'))\n\n self.bg_color_dropdown.addItems(['black', 'white'])\n self.bg_color_dropdown.setCurrentText('black')\n\n self.shape_management_methods = ['velocity', 'probabilistic', 'periodic']\n self.shape_management_method_dropdown.addItems(self.shape_management_methods)\n self.shape_management_method_dropdown.setCurrentIndex(self.shape_management_methods.index('probabilistic'))\n\n ##############################################################################\n # Set Connections #\n ############################################################################## \n\n # I O\n self.input_file_browser.clicked.connect(lambda: get_file (\n self, \n self.input_file_line_edit, \n \"Select an input image\", \n IMG_FORMATS_STR,\n ))\n self.output_folder_browser.clicked.connect(lambda: get_dir(self, self.output_folder_line_edit))\n self.progress_folder_browser.clicked.connect(lambda: get_dir(self, self.progress_folder_line_edit))\n\n # Shape type dropdown\n self.shape_type_dropdown.currentIndexChanged.connect(self.shape_type_state)\n\n # Shape increase method dropdown\n self.shape_management_method_dropdown.currentIndexChanged.connect(self.shapeManagementMethodState)\n self.shape_management_method_dropdown.setCurrentIndex(self.shape_management_methods.index('velocity'))\n\n # Slider connections\n self.n_pop_slider.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.n_pop_slider, self.n_pop_spinb))\n self.n_vert_slider.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.n_vert_slider, self.n_vert_spinb))\n self.init_shape_slider.id = 'init_shape_slider'\n self.max_shape_slider.id = 'max_shape_slider'\n self.init_shape_slider.valueChanged.connect(lambda: self.update_n_shapespinBoxSlider(self.init_shape_slider, self.init_shape_spinb))\n self.max_shape_slider.valueChanged.connect(lambda: self.update_n_shapespinBoxSlider(self.max_shape_slider, self.max_shape_spinb))\n\n self.x_bits_slider.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.x_bits_slider, self.x_bits_spinb))\n self.y_bits_slider.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.y_bits_slider, self.y_bits_spinb))\n self.c_bits_slider.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.c_bits_slider, self.c_bits_spinb))\n\n # Spin box connections\n self.n_pop_spinb.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.n_pop_spinb, self.n_pop_slider))\n self.n_vert_spinb.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.n_vert_spinb, self.n_vert_slider))\n self.init_shape_spinb.id = 'init_shape_spinb'\n self.max_shape_spinb.id = 'max_shape_spinb'\n self.init_shape_spinb.valueChanged.connect(lambda: self.update_n_shapespinBoxSlider(self.init_shape_spinb, self.init_shape_slider))\n self.max_shape_spinb.valueChanged.connect(lambda: self.update_n_shapespinBoxSlider(self.max_shape_spinb, self.max_shape_slider))\n\n self.x_bits_spinb.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.x_bits_spinb, self.x_bits_slider))\n self.y_bits_spinb.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.y_bits_spinb, self.y_bits_slider))\n self.c_bits_spinb.valueChanged.connect(lambda: self.updateSpinBoxSlider(self.c_bits_spinb, self.c_bits_slider))\n\n # Navigation\n self.back_button.clicked.connect(lambda: navigate(self, prev_))\n self.run_button.clicked.connect(self.run)\n\n # Text input\n self.input_file_line_edit.textChanged.connect(lambda: validate_file(self, IMG_FORMATS))\n self.input_file_line_edit.textChanged.emit(self.input_file_line_edit.text())\n self.input_file_line_edit.textChanged.connect(self.validateBitLengths)\n self.input_file_line_edit.textChanged.emit(self.input_file_line_edit.text())\n\n self.output_freq_entry.textChanged.connect(self.validateOutputFreq)\n self.output_freq_entry.textChanged.emit(self.output_freq_entry.text())\n self.output_freq_entry.textChanged.connect(self.validateOutputDir)\n self.output_freq_entry.textChanged.emit(self.output_freq_entry.text())\n self.output_folder_line_edit.textChanged.connect(self.validateOutputDir)\n self.output_folder_line_edit.textChanged.emit(self.output_folder_line_edit.text())\n self.save_freq_entry.textChanged.connect(self.validateSaveFreq)\n self.save_freq_entry.textChanged.emit(self.save_freq_entry.text())\n self.progress_folder_line_edit.textChanged.connect(self.validateProgressDir)\n self.progress_folder_line_edit.textChanged.emit(self.progress_folder_line_edit.text())\n\n self.shape_management_value_entry.textChanged.connect(lambda: self.validateShapeManagementValue(self.shape_management_value_entry))\n self.shape_management_value_entry.textChanged.emit(self.shape_management_value_entry.text())\n\n # Weights\n self.w0_entry.textChanged.connect(lambda: self.validateWeight(self.w0_entry))\n self.w0_entry.textChanged.emit(self.w0_entry.text())\n self.w1_entry.textChanged.connect(lambda: self.validateWeight(self.w1_entry))\n self.w1_entry.textChanged.emit(self.w0_entry.text())\n self.w2_entry.textChanged.connect(lambda: self.validateWeight(self.w2_entry))\n self.w2_entry.textChanged.emit(self.w0_entry.text())\n\n # Mutation rate\n self.m_bit_flip_entry.textChanged.connect(lambda: self.validateMutationRate(self.m_bit_flip_entry))\n self.m_bit_flip_entry.textChanged.emit(self.m_bit_flip_entry.text())\n self.m_shape_swap_entry.textChanged.connect(lambda: self.validateMutationRate(self.m_shape_swap_entry))\n self.m_shape_swap_entry.textChanged.emit(self.m_shape_swap_entry.text())\n\n @pyqtSlot()\n def updateSpinBoxSlider(self, sender, receiver):\n receiver.setValue(sender.value())\n\n @pyqtSlot()\n def update_n_shapespinBoxSlider(self, sender, receiver):\n value = sender.value()\n receiver.setValue(value)\n if sender.id.startswith('init') and value > self.max_shape_slider.value():\n self.max_shape_slider.setValue(value)\n self.max_shape_spinb.setValue(value)\n if sender.id.startswith('max') and value < self.init_shape_slider.value():\n self.init_shape_slider.setValue(value)\n self.init_shape_spinb.setValue(value)\n\n def set_uni_verts(self):\n self.n_vert_spinb.setValue(1)\n self.n_vert_spinb.setReadOnly(True)\n self.n_vert_slider.setMinimum(1)\n self.n_vert_slider.setMaximum(1)\n\n def set_dual_verts(self):\n self.n_vert_spinb.setValue(2)\n self.n_vert_spinb.setReadOnly(True)\n self.n_vert_slider.setMinimum(2)\n self.n_vert_slider.setMaximum(2)\n\n def set_multi_verts(self):\n self.n_vert_spinb.setReadOnly(False)\n self.n_vert_slider.setMinimum(3)\n self.n_vert_slider.setMaximum(20)\n\n @pyqtSlot()\n def shape_type_state(self):\n shape_type = self.shape_type_dropdown.currentText()\n if shape_type in {'circle', 'square'}:\n self.set_uni_verts()\n elif shape_type in {'ellipse', 'rectangle'}:\n self.set_dual_verts()\n else:\n self.set_multi_verts()\n\n @pyqtSlot()\n def shapeManagementMethodState(self):\n shape_management_method = self.shape_management_method_dropdown.currentText()\n if shape_management_method == 'probabilistic':\n self.shape_management_value_label.setText('probability')\n self.shape_management_value_entry.setText('1e-3')\n else:\n self.shape_management_value_label.setText('iterations')\n self.shape_management_value_entry.setText('150')\n\n @pyqtSlot()\n def validateShapeManagementValue(self, sender):\n shape_management_method = self.shape_management_method_dropdown.currentText()\n if shape_management_method == 'probabilistic':\n self.validateWeight(sender)\n else:\n self.validateIterations(sender)\n ready_check(self) \n\n @pyqtSlot()\n def validateIterations(self, sender):\n text = sender.text()\n try:\n num = round(float(text))\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if num >= 1 and not u_score:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n self.shape_management_value_entry.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.shape_management_value_entry.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateOutputDir(self):\n dir_ = self.output_folder_line_edit.text().strip()\n if dir_ and os.path.exists(os.path.normpath(dir_)):# \\\n #or not dir_ and self.output_freq_entry.valid:\n color = VALID_COLOR\n valid = True\n elif not dir_:\n color = WARNING_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.output_folder_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.output_folder_line_edit.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateProgressDir(self):\n dir_ = self.progress_folder_line_edit.text().strip()\n if dir_ and os.path.exists(os.path.normpath(dir_)):# \\\n #or not dir_ and self.save_freq_entry.valid:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.progress_folder_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.progress_folder_line_edit.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateOutputFreq(self):\n text = self.output_freq_entry.text().strip()\n try:\n num = round(float(text))\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n not_output_dir = self.output_folder_line_edit.text().strip()\n if num >= 1 and not u_score \\\n or num == 0 and not not_output_dir \\\n or not text == '' and not not_output_dir:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n self.output_freq_entry.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.output_freq_entry.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateSaveFreq(self):\n text = self.save_freq_entry.text().strip()\n try:\n num = round(float(text))\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if num >= 1 or num == 0:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n self.save_freq_entry.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.save_freq_entry.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateWeight(self, sender):\n text = sender.text()\n try:\n num = float(text)\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if 0 <= num <= 1 and not u_score:\n color = VALID_COLOR\n valid = True\n elif num > 0 and not u_score:\n color = WARNING_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n sender.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n sender.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateMutationRate(self, sender):\n text = sender.text()\n try:\n num = float(text)\n except ValueError:\n color = INVALID_COLOR\n valid = False\n else:\n u_score = '_' in text\n if 0 <= num <= 1 and not u_score:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n finally:\n sender.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n sender.valid = valid\n ready_check(self)\n\n @pyqtSlot()\n def validateBitLengths(self):\n if self.input_file_line_edit.valid:\n img = Image.open(os.path.normpath(self.input_file_line_edit.text()))\n x, y = img.size\n x, y = utils.bit_max(x), utils.bit_max(y)\n self.x_bits_slider.setMaximum(x)\n self.y_bits_slider.setMaximum(y)\n ready_check(self)\n \n @pyqtSlot()\n def run(self):\n kwargs = dict(\n img_path = self.input_file_line_edit.text(),\n output_dir = self.output_folder_line_edit.text(),\n progress_dir = self.progress_folder_line_edit.text(),\n output_freq = round(float(self.output_freq_entry.text().strip())),\n save_freq = round(float(self.save_freq_entry.text().strip())),\n bg_color = self.bg_color_dropdown.currentText(),\n w = [\n float(self.w0_entry.text()), \n float(self.w1_entry.text()),\n float(self.w2_entry.text()),\n ],\n m = [\n float(self.m_bit_flip_entry.text()),\n float(self.m_shape_swap_entry.text()),\n ],\n shape_type = self.shape_type_dropdown.currentText(),\n n_vert = self.n_vert_slider.value(),\n n_pop = self.n_pop_slider.value(),\n n_shape = self.init_shape_slider.value(),\n max_shapes = self.max_shape_slider.value(),\n x_bits = self.x_bits_slider.value(),\n y_bits = self.y_bits_slider.value(),\n c_bits = self.c_bits_slider.value(),\n oaat_mode = self.oaat_checkbox.isChecked(),\n rollback_mode = self.rollback_checkbox.isChecked(),\n shape_management_func = self.shape_management_method_dropdown.currentText(),\n shape_management_probability = round(float(self.shape_management_value_entry.text())),\n shape_management_delta = round(float(self.shape_management_value_entry.text())),\n shape_management_interval = round(float(self.shape_management_value_entry.text())), \n )\n\n global gpso_display_window\n gpso_display_window = GpsoDisplayWindow()\n gpso_display_window.init(**kwargs)\n navigate(self, gpso_display_window)\n\n\nclass ConfirmationPrompt(QWidget, Ui_ConfirmationPrompt):\n '''\n A generic window that can be used for multiple tasks. It prompts the user\n to make sure they want to do whatever they clicked on with \"Are you sure?\".\n Above this a custom message can be displayed by using the 'message' attribute.\n '''\n id_ = 'confirmation_prompt'\n\n def __init__(self, yes_action, owner, no_action=None, window_title=None, message=None, parent=None):\n super(ConfirmationPrompt, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n if window_title is not None:\n self.setWindowTitle(window_title)\n\n self.__show = copy.copy(self.show)\n self.show = self._show\n\n self.owner = owner\n self.message.setText(message if message is not None else '')\n\n self.yes_button.clicked.connect(yes_action)\n self.no_button.clicked.connect(no_action if no_action is not None else self.close)\n\n def _show(self):\n position_next_window(self.owner, self)\n self.__show()\n\n\nclass GpsoDisplayWindow(QWidget, Ui_GpsoDisplayWindow):\n '''\n When running, the GPSO algorithm preogress is displayed in this window\n '''\n id_ = 'gpso_disply_window'\n save_pressed = pyqtSignal()\n pause_pressed = pyqtSignal()\n\n def __init__(self, parent=None):\n super(GpsoDisplayWindow, self).__init__(parent=parent)\n self.setupUi(self)\n self.setWindowIcon(ICON)\n #shape = QDesktopWidget().screenGeometry()\n #self.setGeometry(shape.width() // 2, shape.height() // 2, 400, 400)\n\n self.button_map = {\n 'Save state': self.save_pressed,\n 'Pause': self.pause_pressed,\n 'Unpause': self.pause_pressed,\n }\n\n def init(self, **kwargs):\n from algorithms.gpso import GPSO\n self.show()\n self.svg_widget.show()\n kwargs['parent_display'] = self\n\n self.alg = GPSO(**kwargs)\n self.alg_thread = utils.EventLoopThread()\n self.alg.moveToThread(self.alg_thread)\n\n self.message_timer = QTimer(self)\n self.message_timer.setInterval(2500)\n self.message_timer.timeout.connect(self.clear_message)\n\n current_size = self.svg_widget.geometry().width(), self.svg_widget.geometry().height()\n scaled_size = utils.fit_to_screen(current_size, self.alg.img_size)\n self.svg_widget.setFixedSize(*scaled_size)\n\n # get progress messages from worker:\n self.n_pop_indicator.setText(str(self.alg.n_pop))\n self.n_shape_indicator.setText(str(self.alg.n_shape))\n\n self.alg.display_signal.connect(self.update_display)\n self.alg.iter_indicators_signal.connect(self.update_iter_indicators)\n self.alg.n_shape_signal.connect(self.update_n_shapes)\n self.alg.fitness_indicator_signal.connect(self.update_fitness_indicator)\n self.alg.performance_metrics_signal.connect(self.MplDisplay.updateFigure)\n self.alg.status_signal.connect(self.display_message)\n self.bst_checkbox.toggled.connect(lambda: self.MplDisplay.toggleVisible('best'))\n self.wst_checkbox.toggled.connect(lambda: self.MplDisplay.toggleVisible('worst'))\n self.avg_checkbox.toggled.connect(lambda: self.MplDisplay.toggleVisible('avg'))\n self.std_checkbox.toggled.connect(lambda: self.MplDisplay.toggleVisible('std'))\n\n self.close_prompt = ConfirmationPrompt(lambda: navigate_to_video_maker_prompt(self), self, message='Quit')\n\n self.pause_button.clicked.connect(lambda: self.execute_action_button(self.pause_button))\n self.save_button.clicked.connect(lambda: self.execute_action_button(self.save_button))\n self.exit_button.clicked.connect(self.close_prompt.show)\n\n self.alg_thread.started.connect(self.alg.run)\n self.alg_thread.start()\n\n @pyqtSlot(int)\n def update_n_shapes(self, n_shape):\n self.n_shape_indicator.setText(str(n_shape))\n\n @pyqtSlot(object)\n def update_iter_indicators(self, iter_data):\n self.iter_indicator.setText(str(iter_data[0]))\n self.isi_indicator.setText(str(iter_data[1]))\n\n @pyqtSlot(float)\n def update_fitness_indicator(self, distance):\n fitness = 1. / distance\n self.distance_indicator.setText('%.3e' % distance)\n self.fitness_indicator.setText('%.3e' % fitness)\n\n @pyqtSlot(str)\n def update_display(self, svg_str):\n svg_bytes = bytearray(svg_str, encoding='utf-8')\n self.svg_widget.renderer().load(svg_bytes)\n self.svg_widget.show()\n\n @pyqtSlot(str)\n def display_message(self, message):\n self.message_timer.start()\n self.message_label.setText(message)\n\n @pyqtSlot()\n def clear_message(self):\n self.message_timer.start()\n self.message_label.setText('')\n\n @pyqtSlot()\n def execute_action_button(self, button):\n text = button.text()\n self.button_map[button.text()].emit()\n if text == 'Pause':\n button.setText('Unpause')\n elif text == 'Unpause':\n button.setText('Pause')\n\n\n###########################################################\n# Shared methods / functions #\n###########################################################\n\ndef disconnect(control):\n '''\n Disconnects a PyQt control from its socket\n '''\n try:\n control.clicked.disconnect() \n except TypeError:\n pass\n\ndef navigate(live_, next_):\n '''\n navigate between two windows\n '''\n global prev_\n prev_ = live_\n position_next_window(prev_, next_)\n live_.close()\n next_.show()\n\ndef position_next_window(live_, next_):\n '''\n Sets the position of the next_ window to the position of live_ window\n whilst preserving the geometry of the next_ window\n '''\n w = next_.geometry().width()\n h = next_.geometry().height()\n x = round(live_.geometry().x() + live_.geometry().width() / 2 - w / 2)\n y = round(live_.geometry().y() + live_.geometry().height() / 2 - h / 2)\n next_.setGeometry(x, y, w, h)\n\ndef end_program():\n '''\n Ends the entire application\n '''\n app.quit()\n\ndef ready_check(self):\n '''\n Enables/disables a PyQt control when all vaidation checks return positive/negative\n '''\n for control in self.validated_controls:\n control.setEnabled(all(input_.valid for input_ in self.inputs.values()))\n\ndef center_window(self):\n frame_geometry = self.frameGeometry()\n screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())\n centerPoint = QApplication.desktop().screenGeometry(screen).center()\n frame_geometry.moveCenter(centerPoint)\n self.move(frame_geometry.topLeft())\n\ndef get_file(self, line_edit, message, exts_str):\n '''\n Generic dialog for selecting a file\n '''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fname, _ = QFileDialog.getOpenFileName(self, message, '', exts_str, options=options)\n line_edit.setText(fname)\n\ndef get_dir(self, line_edit, message='Select a directory'):\n '''\n Generic dialog for selecting a directory\n '''\n options = QFileDialog.Options() | QFileDialog.DontUseNativeDialog | QFileDialog.ShowDirsOnly\n folder = QFileDialog.getExistingDirectory(self, message, '', options=options)\n line_edit.setText(folder)\n\n@pyqtSlot()\ndef validate_file(self, valid_exts):\n path = self.input_file_line_edit.text()\n if os.path.exists(os.path.normpath(path)) and path.endswith(valid_exts):\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.input_file_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.input_file_line_edit.valid = valid\n ready_check(self)\n\n@pyqtSlot()\ndef validate_save_name(self, valid_exts):\n path = os.path.normpath(self.save_as_line_edit.text())\n dir_, fname = os.path.split(path)\n dir_exists = os.path.exists(dir_)\n fname_exists = os.path.exists(fname)\n if path.endswith(valid_exts) and dir_exists and not fname_exists:\n color = VALID_COLOR\n valid = True\n else:\n color = INVALID_COLOR\n valid = False\n self.save_as_line_edit.setStyleSheet(f\"QLineEdit {{ background-color: {color} }}\")\n self.save_as_line_edit.valid = valid\n ready_check(self)\n\ndef navigate_to_video_maker_prompt(self):\n self.pause_button.clicked.emit()\n global video_maker_prompt\n video_maker_prompt = VideoMakerPrompt(svg_dir=self.alg.output_dir)\n position_next_window(self, video_maker_prompt)\n self.alg.thread_enabled = False\n self.alg.display_signal.disconnect()\n self.alg_thread.exit()\n self.close_prompt.close()\n self.close()\n video_maker_prompt.show()\n\n\nif __name__ == '__main__':\n\n app = QApplication([])\n\n ICON = QIcon(ICON_PATH) # Needs to be run after QApplication()\n\n if 'Fusion' in QStyleFactory.keys():\n app.setStyle('Fusion')\n\n main_menu = MainMenu()\n gpso_setup_window = GpsoSetupWindow()\n image_editor = ImageEditor()\n load_window = LoadWindow()\n video_maker_setup = VideoMakerSetup()\n about_window = AboutWindow()\n\n prev_ = main_menu\n\n main_menu.show()\n\n sys.exit(app.exec_())"
] |
[
[
"matplotlib.pyplot.style.use"
]
] |
SCIInstitute/shapeworks
|
[
"cbd44fdeb83270179c2331f2ba8431cf7330a4ff"
] |
[
"Python/ShapeCohortGenPackage/ShapeCohortGen/CohortGenUtils.py"
] |
[
"import os\nimport numpy as np\nimport scipy\nimport shutil\nimport vtk\nfrom vtk.util.numpy_support import vtk_to_numpy\nimport shapely\nimport matplotlib.pyplot as plt\nimport shapeworks as sw\n\n'''\nMake folder\n'''\ndef make_dir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n os.makedirs(dir_path)\n\n'''\nGet list of full paths for files in dir\n'''\ndef get_files(folder):\n file_list = []\n for file in os.listdir(folder):\n file_path = folder + file\n file_path = file_path.replace(\" \",\"\")\n file_list.append(file_path)\n file_list = sorted(file_list)\n return file_list\n\n'''\nGet files with specific extensions\n'''\ndef get_file_with_ext(file_list,extension):\n extList =[]\n for file in file_list:\n ext = file.split(\".\")[-1]\n if(ext==extension):\n extList.append(file)\n extList = sorted(extList)\n return extList\n\n'''\nTakes inname path and replaces dir with outdir and adds extension before file type\n'''\ndef rename(inname, outDir, extension_addition, extension_change=''):\n initPath = os.path.dirname(inname)\n outname = inname.replace(initPath, outDir)\n current_extension = \".\" + inname.split(\".\")[-1]\n if extension_addition != '':\n outname = outname.replace(current_extension, '.' + extension_addition + current_extension)\n if extension_change != '':\n outname = outname.replace(current_extension, extension_change)\n return outname\n\n'''\nGenerate segmentations from mesh list:\n - by default these will be the size of the region containing ALL meshes (with some padding)\n - if allow_on_boundary, a random subset of these will be exactly the size of the mesh\n - if randomize_size, meshes not in allow_on_boundary subset will be ALL mesh region with different padding\n - this is an example of using Mesh.toImage, not a useful tool\n'''\ndef generate_segmentations(meshList, out_dir, randomize_size=True, spacing=[1.0,1.0,1.0], allow_on_boundary=True):\n\n # get list of meshs to be converted\n segDir = out_dir + \"segmentations/\"\n make_dir(segDir)\n\n # get region that includes all of these meshes\n bball = sw.MeshUtils.boundingBox(meshList)\n\n # randomly select 20% meshes for boundary touching samples\n numMeshes = len(meshList)\n meshIndexArray = np.array(list(range(numMeshes)))\n subSampleSize = int(0.2*numMeshes)\n randomBoundarySamples = np.random.choice(meshIndexArray,subSampleSize,replace=False)\n\n # loop through meshes and turn to images\n segList = []\n meshIndex = 0\n for mesh_ in meshList:\n print(\"Generating seg \" + str(meshIndex + 1) + \" out of \" + str(len(meshList)))\n segFile = rename(mesh_, segDir, \"\", \".nrrd\")\n segList.append(segFile)\n\n # load .ply mesh and get its bounding box\n mesh = sw.Mesh(mesh_)\n bb = mesh.boundingBox()\n\n # if mesh isn't in the set for allow_on_boundary, add [random] padding\n if not (allow_on_boundary and (meshIndex in randomBoundarySamples)):\n bb = bball\n\n pad = 5\n if randomize_size: \n pad = np.random.randint(5, high=15, size=3)\n else:\n pad = np.array([5,5,5])\n bb.min -= pad\n bb.max += pad\n\n # sample the given region of Mesh to an image\n image = mesh.toImage(region=bb, spacing=spacing)\n\n # write the result to disk and move to the next mesh\n image.write(segFile, compressed=True)\n meshIndex += 1\n\n # return list of new image filenames\n return segList\n\n'''\nGenerate 2D segmentations from contour list:\n - by default these will be the size of the region containing ALL contours (with some padding)\n - if allow_on_boundary, a random subset of these will be exactly the size of the contour\n - if randomize_size, contours not in allow_on_boundary subset will be ALL contour region with different padding\n'''\ndef generate_2Dsegmentations(contour_list, out_dir, randomize_size=True, spacing=[1.0,1.0,1.0], allow_on_boundary=True):\n\n # get list of meshs to be converted\n segDir = out_dir + \"segmentations/\"\n make_dir(segDir)\n\n # get region that includes all of these contours\n bball = get_contours_bounding_box(contour_list)\n\n # randomly select 20% contours for boundary touching samples\n num_contours = len(contour_list)\n contourIndexArray = np.array(list(range(num_contours)))\n subSampleSize = int(0.2*num_contours)\n randomBoundarySamples = np.random.choice(contourIndexArray,subSampleSize,replace=False)\n\n # loop through meshes and turn to images\n segList = []\n contourIndex = 0\n for contour in contour_list:\n print(\"Generating seg \" + str(contourIndex + 1) + \" out of \" + str(len(contour_list)))\n segFile = rename(contour, segDir, \"\", \".png\")\n segList.append(segFile)\n\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(contour)\n reader.Update()\n polydata = reader.GetOutput()\n points = polydata.GetPoints()\n array = points.GetData()\n point_coordinates = vtk_to_numpy(array)\n\n bb = [np.max(point_coordinates[:,0]), np.min(point_coordinates[:,0]), np.max(point_coordinates[:,1]), np.min(point_coordinates[:,1])]\n if not (allow_on_boundary and (contourIndex in randomBoundarySamples)):\n bb = bball\n padx = 5\n pady = 5\n if randomize_size: \n padx = np.random.randint(5, high=15)\n pady = np.random.randint(5, high=15)\n bb[0] += padx\n bb[1] -= padx\n bb[2] += pady\n bb[3] -= pady\n\n poly = list()\n for ii in range(point_coordinates.shape[0]):\n poly.append((point_coordinates[ii,0], point_coordinates[ii,1]))\n polygon = shapely.geometry.Polygon(poly)\n\n X = np.arange(bb[1], bb[0])\n Y = np.arange(bb[3], bb[2])\n mask = np.zeros((Y.shape[0],X.shape[0]))\n for ii in range(X.shape[0]):\n for jj in range(Y.shape[0]):\n point = shapely.geometry.Point(X[ii], Y[jj])\n if polygon.contains(point):\n mask[jj,ii] = 1\n\n plt.imsave(segFile, mask, cmap=\"gray\")\n contourIndex += 1\n\n # return list of new image filenames\n return segList\n\n'''\nReturns smallest bounding box that contains all contours\n'''\ndef get_contours_bounding_box(contour_list, pad=5):\n max_x = 0\n max_y = 0\n min_x = np.inf \n min_y = np.inf\n for contour in contour_list:\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(contour)\n reader.Update()\n polydata = reader.GetOutput()\n points = polydata.GetPoints()\n array = points.GetData()\n point_coordinates = vtk_to_numpy(array)\n if max_x < np.max(point_coordinates[:,0]):\n max_x = np.max(point_coordinates[:,0])\n if min_x > np.min(point_coordinates[:,0]):\n min_x = np.min(point_coordinates[:,0])\n if max_y < np.max(point_coordinates[:,1]):\n max_y = np.max(point_coordinates[:,1])\n if min_y > np.min(point_coordinates[:,1]):\n min_y = np.min(point_coordinates[:,1])\n return [max_x, min_x, max_y, min_y]\n\n'''\nGenerates image by blurring and adding noise to segmentation\n'''\ndef generate_images(segs, outDir, blur_factor, foreground_mean, foreground_var, background_mean, background_var):\n imgDir = outDir + 'images/'\n make_dir(imgDir)\n index = 1\n for seg in segs:\n print(\"Generating image \" + str(index) + \" out of \" + str(len(segs)))\n name = seg.replace('segmentations/','images/').replace('_seg.nrrd', '_blur' + str(blur_factor) + '.nrrd')\n img = sw.Image(seg)\n origin = img.origin()\n img_array = blur(img.toArray(), blur_factor)\n img_array = apply_noise(img_array, foreground_mean, foreground_var, background_mean, background_var)\n img_array = np.float32(img_array)\n img = sw.Image(np.float32(img_array)).setOrigin(origin)\n img.write(name,compressed=True)\n index += 1\n return get_files(imgDir)\n\n'''\nGenerates image by blurring and adding noise to segmentation\n'''\ndef generate_2Dimages(segs, outDir, blur_factor, foreground_mean, foreground_var, background_mean, background_var):\n imgDir = outDir + 'images/'\n make_dir(imgDir)\n index = 1\n for seg in segs:\n print(\"Generating image \" + str(index) + \" out of \" + str(len(segs)))\n name = seg.replace('segmentations/','images/').replace('_seg.png', '_blur' + str(blur_factor) + '.png')\n img_array = plt.imread(seg, format=\"png\")\n img_array = img_array/np.max(img_array)\n img_array = blur(img_array, blur_factor)\n img_array = apply_noise(img_array, foreground_mean, foreground_var, background_mean, background_var)\n img_array = img_array.astype(np.uint8)\n plt.imsave(name, img_array, cmap=\"gray\")\n index += 1\n return get_files(imgDir)\n\n'''\nget_image helper\n'''\ndef blur(img, size):\n blur = scipy.ndimage.filters.gaussian_filter(img, size)\n return blur\n\n'''\nget_image helper\n'''\ndef apply_noise(img, foreground_mean, foreground_var, background_mean, background_var):\n background_indices = np.where(img < 0.5)\n foreground_indices = np.where(img > 0.5)\n img = img*(foreground_mean-background_mean)\n img = img + np.ones(img.shape)*background_mean\n foreground_noise = np.random.normal(0, foreground_var**0.5, img.shape)\n foreground_noise[background_indices] = 0\n background_noise = np.random.normal(0, background_var**0.5, img.shape)\n background_noise[foreground_indices] = 0\n noisy_img = img + foreground_noise + background_noise\n return noisy_img\n\ndef compute_line_indices(n, is_closed=True):\n \"\"\"\n Given a number of points, return indices for lines(as np.ndarray) between successive pairs of points.\n n: number of points\n is_closed: whether or not the last vertex is to to be connected to the first vertex\n \"\"\"\n lines = np.zeros((n if is_closed else n-1, 2), dtype=int)\n for i in range(lines.shape[0]):\n lines[i] = [i, (i+1)%n]\n\n return lines\n\ndef save_contour_as_vtp(points, lines, filename):\n \"\"\"\n Generates a .vtp file for the given contour to use in ShapeWorks optimizer\n points: Nx3 np.ndarray of points in the contour\n lines: Mx2 np.ndarray of lines in the contour\n filename: output .vtp filename\n \"\"\"\n vtk_pts = vtk.vtkPoints()\n n = points.shape[0]\n for j in range(n):\n x, y, z = points[j]\n vtk_pts.InsertNextPoint((x,y,z))\n\n vtk_lines = vtk.vtkCellArray()\n m = lines.shape[0]\n for j in range(m):\n vtk_line = vtk.vtkLine()\n vtk_line.GetPointIds().SetId(0, lines[j][0])\n vtk_line.GetPointIds().SetId(1, lines[j][1])\n vtk_lines.InsertNextCell(vtk_line)\n\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtk_pts)\n polydata.SetLines(vtk_lines)\n\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetFileName(filename)\n writer.SetInputData(polydata)\n writer.Write()\n"
] |
[
[
"matplotlib.pyplot.imsave",
"numpy.random.choice",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.imread",
"numpy.ones",
"numpy.max",
"numpy.random.normal",
"scipy.ndimage.filters.gaussian_filter",
"numpy.float32",
"numpy.array",
"numpy.where",
"numpy.zeros",
"numpy.random.randint"
]
] |
s183983/HodgeNet
|
[
"09a84d5bf4c1cc5b01d4583a685a257d12ceb5ae"
] |
[
"hodgenet.py"
] |
[
"import scipy\nimport scipy.sparse.linalg\nimport torch\nimport torch.nn as nn\n\nfrom hodgeautograd import HodgeEigensystem\n\n\nclass HodgeNetModel(nn.Module):\n \"\"\"Main HodgeNet model.\n\n The model inputs a batch of meshes and outputs features per vertex or \n pooled to faces or the entire mesh.\n \"\"\"\n def __init__(self, num_edge_features, num_triangle_features,\n num_output_features=32, num_eigenvectors=64,\n num_extra_eigenvectors=16, mesh_feature=False, min_star=1e-2,\n resample_to_triangles=False, num_bdry_edge_features=None,\n num_vector_dimensions=1):\n super(HodgeNetModel, self).__init__()\n\n self.num_triangle_features = num_triangle_features\n self.hodgefunc = HodgeEigensystem.apply\n self.num_eigenvectors = num_eigenvectors\n self.num_extra_eigenvectors = num_extra_eigenvectors\n self.num_output_features = num_output_features\n self.min_star = min_star\n self.resample_to_triangles = resample_to_triangles\n self.mesh_feature = mesh_feature\n self.num_vector_dimensions = num_vector_dimensions\n\n self.to_star1 = nn.Sequential(\n nn.Linear(num_edge_features, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, self.num_vector_dimensions**2)\n )\n\n if num_bdry_edge_features is not None:\n self.to_star1_bdry = nn.Sequential(\n nn.Linear(num_bdry_edge_features, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, self.num_vector_dimensions**2)\n )\n else:\n self.to_star1_bdry = None\n\n self.to_star0_tri = nn.Sequential(\n nn.Linear(num_triangle_features, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, self.num_vector_dimensions *\n self.num_vector_dimensions)\n )\n\n self.eigenvalue_to_matrix = nn.Sequential(\n nn.Linear(1, num_output_features),\n nn.BatchNorm1d(num_output_features),\n nn.LeakyReLU(),\n nn.Linear(num_output_features, num_output_features),\n nn.BatchNorm1d(num_output_features),\n nn.LeakyReLU(),\n nn.Linear(num_output_features, num_output_features),\n nn.BatchNorm1d(num_output_features),\n nn.LeakyReLU(),\n nn.Linear(num_output_features, num_output_features),\n nn.BatchNorm1d(num_output_features),\n nn.LeakyReLU(),\n nn.Linear(num_output_features, num_output_features)\n )\n\n def gather_star0(self, mesh, star0_tri):\n \"\"\"Compute star0 matrix per vertex by gathering from triangles.\"\"\"\n star0 = torch.zeros(mesh['vertices'].shape[0],\n star0_tri.shape[1]).to(star0_tri)\n star0.index_add_(0, mesh['triangles'][:, 0], star0_tri)\n star0.index_add_(0, mesh['triangles'][:, 1], star0_tri)\n star0.index_add_(0, mesh['triangles'][:, 2], star0_tri)\n\n star0 = star0.view(-1, self.num_vector_dimensions,\n self.num_vector_dimensions)\n\n # square the tensor to be semidefinite\n star0 = torch.einsum('ijk,ilk->ijl', star0, star0)\n\n # add min star down the diagonal\n star0 += torch.eye(self.num_vector_dimensions)[None].to(star0) * \\\n self.min_star\n\n return star0\n\n def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):\n \"\"\"Compute eigenvectors and eigenvalues of the learned operator.\"\"\"\n nb = len(mesh)\n\n inputs = []\n for m, s0, s1 in zip(mesh, star0, star1):\n d = m['int_d01']\n if bdry:\n d = scipy.sparse.vstack([d, m['bdry_d01']])\n inputs.extend([s0, s1, d])\n\n eigenvalues, eigenvectors = [], []\n outputs = self.hodgefunc(nb, self.num_eigenvectors,\n self.num_extra_eigenvectors, *inputs)\n for i in range(nb):\n eigenvalues.append(outputs[2*i])\n eigenvectors.append(outputs[2*i+1])\n\n return eigenvalues, eigenvectors\n\n def forward(self, batch):\n nb = len(batch)\n\n all_star0_tri = self.to_star0_tri(\n torch.cat([mesh['triangle_features'] for mesh in batch], dim=0))\n star0_tri_split = torch.split(\n all_star0_tri, [mesh['triangles'].shape[0] for mesh in batch],\n dim=0)\n star0_split = [self.gather_star0(mesh, star0_tri)\n for mesh, star0_tri in zip(batch, star0_tri_split)]\n\n all_star1 = self.to_star1(torch.cat([mesh['int_edge_features']\n for mesh in batch], dim=0))\n all_star1 = all_star1.view(-1, self.num_vector_dimensions,\n self.num_vector_dimensions)\n all_star1 = torch.einsum('ijk,ilk->ijl', all_star1, all_star1)\n all_star1 += torch.eye(\n self.num_vector_dimensions)[None].to(all_star1) * \\\n self.min_star\n star1_split = list(torch.split(all_star1, [mesh['int_d01'].shape[0]\n for mesh in batch], dim=0))\n\n if self.to_star1_bdry is not None:\n all_star1_bdry = self.to_star1_bdry(\n torch.cat([mesh['bdry_edge_features'] for mesh in batch],\n dim=0))\n all_star1_bdry = all_star1_bdry.view(\n -1, self.num_vector_dimensions, self.num_vector_dimensions)\n all_star1_bdry = torch.einsum(\n 'ijk,ilk->ijl', all_star1_bdry, all_star1_bdry)\n all_star1_bdry += torch.eye(\n self.num_vector_dimensions)[None].to(all_star1_bdry) * \\\n self.min_star\n star1_bdry_split = torch.split(\n all_star1_bdry,\n [mesh['bdry_d01'].shape[0] for mesh in batch], dim=0)\n\n for i in range(nb):\n star1_split[i] = torch.cat(\n [star1_split[i], star1_bdry_split[i]], dim=0)\n\n eigenvalues, eigenvectors = self.compute_mesh_eigenfunctions(\n batch, star0_split, star1_split,\n bdry=self.to_star1_bdry is not None)\n\n # glue the eigenvalues back together and run through the nonlinearity\n all_processed_eigenvalues = self.eigenvalue_to_matrix(\n torch.stack(eigenvalues).view(-1, 1)).view(\n nb, -1, self.num_output_features)\n\n # post-multiply the set of eigenvectors by the learned matrix that's a\n # function of eigenvalues (similar to HKS, WKS)\n outer_products = [torch.einsum(\n 'ijk,ijl->ijkl', eigenvectors[i], eigenvectors[i])\n for i in range(nb)] # take outer product of vectors\n\n result = [torch.einsum(\n 'ijkp,jl->ilkp', outer_products[i], all_processed_eigenvalues[i])\n for i in range(nb)] # multiply by learned matrix\n\n result = [result[i].flatten(start_dim=1) for i in range(nb)]\n\n if self.resample_to_triangles:\n result = [result[i][batch[i]['triangles']].max(\n 1)[0] for i in range(nb)]\n\n if self.mesh_feature:\n result = [f.max(0, keepdim=True)[0] for f in result]\n\n return torch.cat(result, dim=0)\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.zeros",
"torch.einsum",
"torch.eye",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"scipy.sparse.vstack",
"torch.split",
"torch.stack"
]
] |
toastisme/dials
|
[
"6bc8ababc33bfe334513677f8adb65c0e90003f3"
] |
[
"command_line/background.py"
] |
[
"# LIBTBX_SET_DISPATCHER_NAME dials.background\n# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1\n\n\nimport math\n\nimport iotbx.phil\nfrom libtbx.phil import parse\nfrom scitbx import matrix\n\nimport dials.util.masking\nfrom dials.algorithms.spot_finding.factory import SpotFinderFactory\nfrom dials.algorithms.spot_finding.factory import phil_scope as spot_phil\nfrom dials.array_family import flex\nfrom dials.util import Sorry, show_mail_handle_errors\nfrom dials.util.options import OptionParser, flatten_experiments\n\nhelp_message = \"\"\"\n\nExamples::\n\n dials.background image_*.cbf\n\n dials.background imported.expt\n\"\"\"\n\nphil_scope = iotbx.phil.parse(\n \"\"\"\\\nn_bins = 100\n .type = int\nimages = None\n .type = ints\n .help = \"Images on which to perform the analysis (otherwise use all images)\"\ncorrected = False\n .type = bool\n .help = \"Use corrected data (i.e after applying pedestal and gain) in analysis\"\n\nmasking {\n include scope dials.util.masking.phil_scope\n}\n\noutput {\n plot = None\n .type = path\n .help = \"Save background plot to file\"\n size_inches = None\n .type = floats(value_min=0, size=2)\n}\n\n\"\"\",\n process_includes=True,\n)\n\n\n@show_mail_handle_errors()\ndef run(args=None):\n usage = \"dials.background [options] image_*.cbf\"\n\n parser = OptionParser(\n usage=usage,\n phil=phil_scope,\n read_experiments=True,\n read_experiments_from_images=True,\n epilog=help_message,\n )\n\n params, options = parser.parse_args(args, show_diff_phil=True)\n\n # Ensure we have either a data block or an experiment list\n experiments = flatten_experiments(params.input.experiments)\n imagesets = experiments.imagesets()\n\n if params.output.plot:\n import matplotlib\n\n matplotlib.use(\"agg\")\n\n import matplotlib.ticker as mticker\n from matplotlib import pyplot\n\n fig = pyplot.figure(figsize=params.output.size_inches)\n ax = fig.add_subplot(111)\n\n for i_imgset, imageset in enumerate(imagesets):\n first, last = imageset.get_sequence().get_image_range()\n images = range(first, last + 1)\n\n if params.images:\n if min(params.images) < first or max(params.images) > last:\n raise Sorry(\"image outside of scan range\")\n images = params.images\n\n d_spacings = []\n intensities = []\n sigmas = []\n\n for indx in images:\n print(f\"For imageset {i_imgset} image {indx}:\")\n d, I, sig = background(\n imageset,\n indx - first, # indices passed to imageset.get_raw_data start from zero\n n_bins=params.n_bins,\n corrected=params.corrected,\n mask_params=params.masking,\n )\n\n print(f\"{'d':>8} {'I':>8} {'sig':>8}\")\n for j in range(len(I)):\n print(f\"{d[j]:8.3f} {I[j]:8.3f} {sig[j]:8.3f}\")\n\n d_spacings.append(d)\n intensities.append(I)\n sigmas.append(sig)\n\n if params.output.plot:\n ax.set_xlabel(r\"resolution ($\\AA$)\")\n ax.set_ylabel(r\"$\\langle I_b \\rangle$\")\n for indx, d, I, sig in zip(images, d_spacings, intensities, sigmas):\n filenames = imageset.reader().paths()\n if len(imagesets) > 1:\n label = (\n f\"{filenames[indx - first]}\"\n if len(filenames) > 1\n else f\"{filenames[0]} image {indx}\"\n )\n else:\n label = f\"image {indx}\" if len(images) > 1 else \"\"\n ds2 = 1 / flex.pow2(d)\n ax.plot(ds2, I, label=label)\n xticks = ax.get_xticks().tolist()\n ax.xaxis.set_major_locator(mticker.FixedLocator(xticks))\n x_tick_labs = [\n \"\" if e <= 0.0 else f\"{math.sqrt(1.0 / e):.2f}\" for e in xticks\n ]\n ax.set_xticklabels(x_tick_labs)\n\n if params.output.plot:\n try:\n if len(imagesets) > 1 or len(images) > 1:\n # Plot a legend if there are fewer lines than the number of colours\n # in the colour cycle\n if len(ax.lines) <= len(\n pyplot.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"]\n ):\n pyplot.gca().legend()\n pyplot.savefig(params.output.plot)\n except ValueError:\n raise Sorry(f\"Unable to save plot to {params.output.plot}\")\n\n\ndef background(imageset, indx, n_bins, corrected=False, mask_params=None):\n if mask_params is None:\n # Default mask params for trusted range\n mask_params = phil_scope.fetch(parse(\"\")).extract().masking\n\n detector = imageset.get_detector()\n beam = imageset.get_beam()\n\n # Only working with single panel detector for now\n assert len(detector) == 1\n panel = detector[0]\n imageset_mask = imageset.get_mask(indx)[0]\n mask = dials.util.masking.generate_mask(imageset, mask_params)[0]\n mask = imageset_mask & mask\n\n n = matrix.col(panel.get_normal()).normalize()\n b = matrix.col(beam.get_s0()).normalize()\n wavelength = beam.get_wavelength()\n\n if math.fabs(b.dot(n)) < 0.95:\n raise Sorry(\"Detector not perpendicular to beam\")\n\n # Use corrected data to determine signal and background regions\n corrected_data = imageset.get_corrected_data(indx)\n assert len(corrected_data) == 1\n corrected_data = corrected_data[0].as_double()\n\n # Use choice of raw or corrected data to evaluate the background values\n if corrected:\n data = corrected_data\n else:\n data = imageset.get_raw_data(indx)[0].as_double()\n\n spot_params = spot_phil.fetch(source=parse(\"\")).extract()\n threshold_function = SpotFinderFactory.configure_threshold(spot_params)\n peak_pixels = threshold_function.compute_threshold(corrected_data, mask)\n signal = data.select(peak_pixels.iselection())\n background_pixels = mask & ~peak_pixels\n background = data.select(background_pixels.iselection())\n\n # print some summary information\n print(f\"Mean background: {flex.sum(background) / background.size():.3f}\")\n if len(signal) > 0:\n print(\n f\"Max/total signal pixels: {flex.max(signal):.0f} / {flex.sum(signal):.0f}\"\n )\n else:\n print(\"No signal pixels on this image\")\n print(\n \"Peak/background/masked pixels: %d / %d / %d\"\n % (peak_pixels.count(True), background.size(), mask.count(False))\n )\n\n # compute histogram of two-theta values, then same weighted\n # by pixel values, finally divide latter by former to get\n # the radial profile out, need to set the number of bins\n # sensibly; inspired by method in PyFAI\n\n two_theta_array = panel.get_two_theta_array(beam.get_s0())\n two_theta_array = two_theta_array.as_1d().select(background_pixels.iselection())\n\n # Use flex.weighted_histogram\n h0 = flex.weighted_histogram(two_theta_array, n_slots=n_bins)\n h1 = flex.weighted_histogram(two_theta_array, background, n_slots=n_bins)\n h2 = flex.weighted_histogram(\n two_theta_array, background * background, n_slots=n_bins\n )\n\n d0 = h0.slots()\n d1 = h1.slots()\n d2 = h2.slots()\n\n I = d1 / d0\n I2 = d2 / d0\n sig = flex.sqrt(I2 - flex.pow2(I))\n\n tt = h0.slot_centers()\n d_spacings = wavelength / (2.0 * flex.sin(0.5 * tt))\n\n return d_spacings, I, sig\n\n\nif __name__ == \"__main__\":\n run()\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.FixedLocator",
"matplotlib.pyplot.figure"
]
] |
TarrySingh/scipy
|
[
"0c42ad1b50c1d6c738c964e2aa60351e34ac2812"
] |
[
"scipy/stats/tests/test_morestats.py"
] |
[
"# Author: Travis Oliphant, 2002\n#\n# Further enhancements and tests added by numerous SciPy developers.\n#\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom numpy.random import RandomState\nfrom numpy.testing import (assert_array_equal,\n assert_almost_equal, assert_array_less, assert_array_almost_equal,\n assert_, assert_allclose, assert_equal, assert_warns)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nfrom scipy import stats\nfrom .common_tests import check_named_results\n\n# Matplotlib is not a scipy dependency but is optionally used in probplot, so\n# check if it's available\ntry:\n import matplotlib.pyplot as plt\n have_matplotlib = True\nexcept:\n have_matplotlib = False\n\n\ng1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]\ng2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]\ng3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]\ng4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]\ng5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]\ng6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]\ng7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]\ng8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]\ng9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]\ng10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]\n\n\nclass TestBayes_mvs(object):\n def test_basic(self):\n # Expected values in this test simply taken from the function. For\n # some checks regarding correctness of implementation, see review in\n # gh-674\n data = [6, 9, 12, 7, 8, 8, 13]\n mean, var, std = stats.bayes_mvs(data)\n assert_almost_equal(mean.statistic, 9.0)\n assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),\n rtol=1e-14)\n\n assert_almost_equal(var.statistic, 10.0)\n assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),\n rtol=1e-09)\n\n assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)\n assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),\n rtol=1e-14)\n\n def test_empty_input(self):\n assert_raises(ValueError, stats.bayes_mvs, [])\n\n def test_result_attributes(self):\n x = np.arange(15)\n attributes = ('statistic', 'minmax')\n res = stats.bayes_mvs(x)\n\n for i in res:\n check_named_results(i, attributes)\n\n\nclass TestMvsdist(object):\n def test_basic(self):\n data = [6, 9, 12, 7, 8, 8, 13]\n mean, var, std = stats.mvsdist(data)\n assert_almost_equal(mean.mean(), 9.0)\n assert_allclose(mean.interval(0.9), (7.1036502226125329,\n 10.896349777387467), rtol=1e-14)\n\n assert_almost_equal(var.mean(), 10.0)\n assert_allclose(var.interval(0.9), (3.1767242068607087,\n 24.45910381334018), rtol=1e-09)\n\n assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)\n assert_allclose(std.interval(0.9), (1.7823367265645145,\n 4.9456146050146312), rtol=1e-14)\n\n def test_empty_input(self):\n assert_raises(ValueError, stats.mvsdist, [])\n\n def test_bad_arg(self):\n # Raise ValueError if fewer than two data points are given.\n data = [1]\n assert_raises(ValueError, stats.mvsdist, data)\n\n def test_warns(self):\n # regression test for gh-5270\n # make sure there are no spurious divide-by-zero warnings\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n [x.mean() for x in stats.mvsdist([1, 2, 3])]\n [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]\n\n\nclass TestShapiro(object):\n def test_basic(self):\n x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,\n 4.43, 0.21, 4.75, 0.71, 1.52, 3.24,\n 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]\n w, pw = stats.shapiro(x1)\n assert_almost_equal(w, 0.90047299861907959, 6)\n assert_almost_equal(pw, 0.042089745402336121, 6)\n x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,\n 3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,\n 0.08, 3.67, 2.81, 3.49]\n w, pw = stats.shapiro(x2)\n assert_almost_equal(w, 0.9590270, 6)\n assert_almost_equal(pw, 0.52460, 3)\n\n # Verified against R\n np.random.seed(12345678)\n x3 = stats.norm.rvs(loc=5, scale=3, size=100)\n w, pw = stats.shapiro(x3)\n assert_almost_equal(w, 0.9772805571556091, decimal=6)\n assert_almost_equal(pw, 0.08144091814756393, decimal=3)\n\n # Extracted from original paper\n x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,\n 0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,\n 3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]\n W_expected = 0.83467\n p_expected = 0.000914\n w, pw = stats.shapiro(x4)\n assert_almost_equal(w, W_expected, decimal=4)\n assert_almost_equal(pw, p_expected, decimal=5)\n\n def test_2d(self):\n x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,\n 4.43, 0.21, 4.75], [0.71, 1.52, 3.24,\n 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]\n w, pw = stats.shapiro(x1)\n assert_almost_equal(w, 0.90047299861907959, 6)\n assert_almost_equal(pw, 0.042089745402336121, 6)\n x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,\n 3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,\n 0.08, 3.67, 2.81, 3.49]]\n w, pw = stats.shapiro(x2)\n assert_almost_equal(w, 0.9590270, 6)\n assert_almost_equal(pw, 0.52460, 3)\n\n def test_empty_input(self):\n assert_raises(ValueError, stats.shapiro, [])\n assert_raises(ValueError, stats.shapiro, [[], [], []])\n\n def test_not_enough_values(self):\n assert_raises(ValueError, stats.shapiro, [1, 2])\n assert_raises(ValueError, stats.shapiro, [[], [2]])\n\n def test_bad_arg(self):\n # Length of x is less than 3.\n x = [1]\n assert_raises(ValueError, stats.shapiro, x)\n\n def test_nan_input(self):\n x = np.arange(10.)\n x[9] = np.nan\n\n w, pw = stats.shapiro(x)\n assert_equal(w, np.nan)\n assert_almost_equal(pw, 1.0)\n\n\nclass TestAnderson(object):\n def test_normal(self):\n rs = RandomState(1234567890)\n x1 = rs.standard_exponential(size=50)\n x2 = rs.standard_normal(size=50)\n A, crit, sig = stats.anderson(x1)\n assert_array_less(crit[:-1], A)\n A, crit, sig = stats.anderson(x2)\n assert_array_less(A, crit[-2:])\n\n v = np.ones(10)\n v[0] = 0\n A, crit, sig = stats.anderson(v)\n # The expected statistic 3.208057 was computed independently of scipy.\n # For example, in R:\n # > library(nortest)\n # > v <- rep(1, 10)\n # > v[1] <- 0\n # > result <- ad.test(v)\n # > result$statistic\n # A\n # 3.208057\n assert_allclose(A, 3.208057)\n\n def test_expon(self):\n rs = RandomState(1234567890)\n x1 = rs.standard_exponential(size=50)\n x2 = rs.standard_normal(size=50)\n A, crit, sig = stats.anderson(x1, 'expon')\n assert_array_less(A, crit[-2:])\n olderr = np.seterr(all='ignore')\n try:\n A, crit, sig = stats.anderson(x2, 'expon')\n finally:\n np.seterr(**olderr)\n assert_(A > crit[-1])\n\n def test_gumbel(self):\n # Regression test for gh-6306. Before that issue was fixed,\n # this case would return a2=inf.\n v = np.ones(100)\n v[0] = 0.0\n a2, crit, sig = stats.anderson(v, 'gumbel')\n # A brief reimplementation of the calculation of the statistic.\n n = len(v)\n xbar, s = stats.gumbel_l.fit(v)\n logcdf = stats.gumbel_l.logcdf(v, xbar, s)\n logsf = stats.gumbel_l.logsf(v, xbar, s)\n i = np.arange(1, n+1)\n expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))\n\n assert_allclose(a2, expected_a2)\n\n def test_bad_arg(self):\n assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')\n\n def test_result_attributes(self):\n rs = RandomState(1234567890)\n x = rs.standard_exponential(size=50)\n res = stats.anderson(x)\n attributes = ('statistic', 'critical_values', 'significance_level')\n check_named_results(res, attributes)\n\n def test_gumbel_l(self):\n # gh-2592, gh-6337\n # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.\n rs = RandomState(1234567890)\n x = rs.gumbel(size=100)\n A1, crit1, sig1 = stats.anderson(x, 'gumbel')\n A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')\n\n assert_allclose(A2, A1)\n\n def test_gumbel_r(self):\n # gh-2592, gh-6337\n # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.\n rs = RandomState(1234567890)\n x1 = rs.gumbel(size=100)\n x2 = np.ones(100)\n A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')\n A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')\n\n assert_array_less(A1, crit1[-2:])\n assert_(A2 > crit2[-1])\n\n\nclass TestAndersonKSamp(object):\n def test_example1a(self):\n # Example data from Scholz & Stephens (1987), originally\n # published in Lehmann (1995, Nonparametrics, Statistical\n # Methods Based on Ranks, p. 309)\n # Pass a mixture of lists and arrays\n t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]\n t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])\n t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])\n t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])\n assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),\n midrank=False)\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)\n\n assert_almost_equal(Tk, 4.449, 3)\n assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],\n tm, 4)\n assert_almost_equal(p, 0.0021, 4)\n\n def test_example1b(self):\n # Example data from Scholz & Stephens (1987), originally\n # published in Lehmann (1995, Nonparametrics, Statistical\n # Methods Based on Ranks, p. 309)\n # Pass arrays\n t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])\n t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])\n t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])\n t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)\n\n assert_almost_equal(Tk, 4.480, 3)\n assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],\n tm, 4)\n assert_almost_equal(p, 0.0020, 4)\n\n def test_example2a(self):\n # Example data taken from an earlier technical report of\n # Scholz and Stephens\n # Pass lists instead of arrays\n t1 = [194, 15, 41, 29, 33, 181]\n t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]\n t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]\n t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,\n 118, 25, 156, 310, 76, 26, 44, 23, 62]\n t5 = [130, 208, 70, 101, 208]\n t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]\n t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]\n t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,\n 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]\n t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,\n 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]\n t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,\n 22, 139, 210, 97, 30, 23, 13, 14]\n t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]\n t12 = [50, 254, 5, 283, 35, 12]\n t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]\n t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,\n 61, 34]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,\n t9, t10, t11, t12, t13, t14),\n midrank=False)\n\n assert_almost_equal(Tk, 3.288, 3)\n assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],\n tm, 4)\n assert_almost_equal(p, 0.0041, 4)\n\n def test_example2b(self):\n # Example data taken from an earlier technical report of\n # Scholz and Stephens\n t1 = [194, 15, 41, 29, 33, 181]\n t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]\n t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]\n t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,\n 118, 25, 156, 310, 76, 26, 44, 23, 62]\n t5 = [130, 208, 70, 101, 208]\n t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]\n t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]\n t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,\n 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]\n t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,\n 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]\n t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,\n 22, 139, 210, 97, 30, 23, 13, 14]\n t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]\n t12 = [50, 254, 5, 283, 35, 12]\n t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]\n t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,\n 61, 34]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,\n t9, t10, t11, t12, t13, t14),\n midrank=True)\n\n assert_almost_equal(Tk, 3.294, 3)\n assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],\n tm, 4)\n assert_almost_equal(p, 0.0041, 4)\n\n def test_not_enough_samples(self):\n assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))\n\n def test_no_distinct_observations(self):\n assert_raises(ValueError, stats.anderson_ksamp,\n (np.ones(5), np.ones(5)))\n\n def test_empty_sample(self):\n assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))\n\n def test_result_attributes(self):\n # Example data from Scholz & Stephens (1987), originally\n # published in Lehmann (1995, Nonparametrics, Statistical\n # Methods Based on Ranks, p. 309)\n # Pass a mixture of lists and arrays\n t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]\n t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])\n t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])\n t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)\n\n attributes = ('statistic', 'critical_values', 'significance_level')\n check_named_results(res, attributes)\n\n def test_overflow(self):\n # when significance_level approximation overflows, should still return\n with suppress_warnings() as sup:\n sup.filter(UserWarning, message='approximate p-value')\n res = stats.anderson_ksamp([[-20, -10] * 100, [-10, 40, 12] * 100])\n assert_almost_equal(res[0], 272.796, 3)\n\n\nclass TestAnsari(object):\n\n def test_small(self):\n x = [1, 2, 3, 3, 4]\n y = [3, 2, 6, 1, 6, 1, 4, 1]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"Ties preclude use of exact statistic.\")\n W, pval = stats.ansari(x, y)\n assert_almost_equal(W, 23.5, 11)\n assert_almost_equal(pval, 0.13499256881897437, 11)\n\n def test_approx(self):\n ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,\n 101, 96, 97, 102, 107, 113, 116, 113, 110, 98))\n parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,\n 100, 96, 108, 103, 104, 114, 114, 113, 108,\n 106, 99))\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"Ties preclude use of exact statistic.\")\n W, pval = stats.ansari(ramsay, parekh)\n\n assert_almost_equal(W, 185.5, 11)\n assert_almost_equal(pval, 0.18145819972867083, 11)\n\n def test_exact(self):\n W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])\n assert_almost_equal(W, 10.0, 11)\n assert_almost_equal(pval, 0.533333333333333333, 7)\n\n def test_bad_arg(self):\n assert_raises(ValueError, stats.ansari, [], [1])\n assert_raises(ValueError, stats.ansari, [1], [])\n\n def test_result_attributes(self):\n x = [1, 2, 3, 3, 4]\n y = [3, 2, 6, 1, 6, 1, 4, 1]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"Ties preclude use of exact statistic.\")\n res = stats.ansari(x, y)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n\nclass TestBartlett(object):\n\n def test_data(self):\n args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\n T, pval = stats.bartlett(*args)\n assert_almost_equal(T, 20.78587342806484, 7)\n assert_almost_equal(pval, 0.0136358632781, 7)\n\n def test_bad_arg(self):\n # Too few args raises ValueError.\n assert_raises(ValueError, stats.bartlett, [1])\n\n def test_result_attributes(self):\n args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\n res = stats.bartlett(*args)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n def test_empty_arg(self):\n args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])\n assert_equal((np.nan, np.nan), stats.bartlett(*args))\n\n\nclass TestLevene(object):\n\n def test_data(self):\n args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\n W, pval = stats.levene(*args)\n assert_almost_equal(W, 1.7059176930008939, 7)\n assert_almost_equal(pval, 0.0990829755522, 7)\n\n def test_trimmed1(self):\n # Test that center='trimmed' gives the same result as center='mean'\n # when proportiontocut=0.\n W1, pval1 = stats.levene(g1, g2, g3, center='mean')\n W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',\n proportiontocut=0.0)\n assert_almost_equal(W1, W2)\n assert_almost_equal(pval1, pval2)\n\n def test_trimmed2(self):\n x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]\n y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]\n np.random.seed(1234)\n x2 = np.random.permutation(x)\n\n # Use center='trimmed'\n W0, pval0 = stats.levene(x, y, center='trimmed',\n proportiontocut=0.125)\n W1, pval1 = stats.levene(x2, y, center='trimmed',\n proportiontocut=0.125)\n # Trim the data here, and use center='mean'\n W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')\n # Result should be the same.\n assert_almost_equal(W0, W2)\n assert_almost_equal(W1, W2)\n assert_almost_equal(pval1, pval2)\n\n def test_equal_mean_median(self):\n x = np.linspace(-1, 1, 21)\n np.random.seed(1234)\n x2 = np.random.permutation(x)\n y = x**3\n W1, pval1 = stats.levene(x, y, center='mean')\n W2, pval2 = stats.levene(x2, y, center='median')\n assert_almost_equal(W1, W2)\n assert_almost_equal(pval1, pval2)\n\n def test_bad_keyword(self):\n x = np.linspace(-1, 1, 21)\n assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)\n\n def test_bad_center_value(self):\n x = np.linspace(-1, 1, 21)\n assert_raises(ValueError, stats.levene, x, x, center='trim')\n\n def test_too_few_args(self):\n assert_raises(ValueError, stats.levene, [1])\n\n def test_result_attributes(self):\n args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\n res = stats.levene(*args)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n\nclass TestBinomP(object):\n\n def test_data(self):\n pval = stats.binom_test(100, 250)\n assert_almost_equal(pval, 0.0018833009350757682, 11)\n pval = stats.binom_test(201, 405)\n assert_almost_equal(pval, 0.92085205962670713, 11)\n pval = stats.binom_test([682, 243], p=3.0/4)\n assert_almost_equal(pval, 0.38249155957481695, 11)\n\n def test_bad_len_x(self):\n # Length of x must be 1 or 2.\n assert_raises(ValueError, stats.binom_test, [1, 2, 3])\n\n def test_bad_n(self):\n # len(x) is 1, but n is invalid.\n # Missing n\n assert_raises(ValueError, stats.binom_test, [100])\n # n less than x[0]\n assert_raises(ValueError, stats.binom_test, [100], n=50)\n\n def test_bad_p(self):\n assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)\n\n def test_alternatives(self):\n res = stats.binom_test(51, 235, p=1./6, alternative='less')\n assert_almost_equal(res, 0.982022657605858)\n\n res = stats.binom_test(51, 235, p=1./6, alternative='greater')\n assert_almost_equal(res, 0.02654424571169085)\n\n res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')\n assert_almost_equal(res, 0.0437479701823997)\n\n\nclass TestFligner(object):\n\n def test_data(self):\n # numbers from R: fligner.test in package stats\n x1 = np.arange(5)\n assert_array_almost_equal(stats.fligner(x1, x1**2),\n (3.2282229927203536, 0.072379187848207877),\n 11)\n\n def test_trimmed1(self):\n # Test that center='trimmed' gives the same result as center='mean'\n # when proportiontocut=0.\n Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')\n Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed',\n proportiontocut=0.0)\n assert_almost_equal(Xsq1, Xsq2)\n assert_almost_equal(pval1, pval2)\n\n def test_trimmed2(self):\n x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]\n y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]\n # Use center='trimmed'\n Xsq1, pval1 = stats.fligner(x, y, center='trimmed',\n proportiontocut=0.125)\n # Trim the data here, and use center='mean'\n Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')\n # Result should be the same.\n assert_almost_equal(Xsq1, Xsq2)\n assert_almost_equal(pval1, pval2)\n\n # The following test looks reasonable at first, but fligner() uses the\n # function stats.rankdata(), and in one of the cases in this test,\n # there are ties, while in the other (because of normal rounding\n # errors) there are not. This difference leads to differences in the\n # third significant digit of W.\n #\n #def test_equal_mean_median(self):\n # x = np.linspace(-1,1,21)\n # y = x**3\n # W1, pval1 = stats.fligner(x, y, center='mean')\n # W2, pval2 = stats.fligner(x, y, center='median')\n # assert_almost_equal(W1, W2)\n # assert_almost_equal(pval1, pval2)\n\n def test_bad_keyword(self):\n x = np.linspace(-1, 1, 21)\n assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)\n\n def test_bad_center_value(self):\n x = np.linspace(-1, 1, 21)\n assert_raises(ValueError, stats.fligner, x, x, center='trim')\n\n def test_bad_num_args(self):\n # Too few args raises ValueError.\n assert_raises(ValueError, stats.fligner, [1])\n\n def test_empty_arg(self):\n x = np.arange(5)\n assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))\n\n\nclass TestMood(object):\n def test_mood(self):\n # numbers from R: mood.test in package stats\n x1 = np.arange(5)\n assert_array_almost_equal(stats.mood(x1, x1**2),\n (-1.3830857299399906, 0.16663858066771478),\n 11)\n\n def test_mood_order_of_args(self):\n # z should change sign when the order of arguments changes, pvalue\n # should not change\n np.random.seed(1234)\n x1 = np.random.randn(10, 1)\n x2 = np.random.randn(15, 1)\n z1, p1 = stats.mood(x1, x2)\n z2, p2 = stats.mood(x2, x1)\n assert_array_almost_equal([z1, p1], [-z2, p2])\n\n def test_mood_with_axis_none(self):\n # Test with axis = None, compare with results from R\n x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,\n 1.59528080213779, 0.329507771815361, -0.820468384118015,\n 0.487429052428485, 0.738324705129217, 0.575781351653492,\n -0.305388387156356, 1.51178116845085, 0.389843236411431,\n -0.621240580541804, -2.2146998871775, 1.12493091814311,\n -0.0449336090152309, -0.0161902630989461, 0.943836210685299,\n 0.821221195098089, 0.593901321217509]\n\n x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,\n -1.13037567424629, -0.0802517565509893, 0.132420284381094,\n 0.707954729271733, -0.23969802417184, 1.98447393665293,\n -0.138787012119665, 0.417650750792556, 0.981752777463662,\n -0.392695355503813, -1.03966897694891, 1.78222896030858,\n -2.31106908460517, 0.878604580921265, 0.035806718015226,\n 1.01282869212708, 0.432265154539617, 2.09081920524915,\n -1.19992581964387, 1.58963820029007, 1.95465164222325,\n 0.00493777682814261, -2.45170638784613, 0.477237302613617,\n -0.596558168631403, 0.792203270299649, 0.289636710177348]\n\n x1 = np.array(x1)\n x2 = np.array(x2)\n x1.shape = (10, 2)\n x2.shape = (15, 2)\n assert_array_almost_equal(stats.mood(x1, x2, axis=None),\n [-1.31716607555, 0.18778296257])\n\n def test_mood_2d(self):\n # Test if the results of mood test in 2-D case are consistent with the\n # R result for the same inputs. Numbers from R mood.test().\n ny = 5\n np.random.seed(1234)\n x1 = np.random.randn(10, ny)\n x2 = np.random.randn(15, ny)\n z_vectest, pval_vectest = stats.mood(x1, x2)\n\n for j in range(ny):\n assert_array_almost_equal([z_vectest[j], pval_vectest[j]],\n stats.mood(x1[:, j], x2[:, j]))\n\n # inverse order of dimensions\n x1 = x1.transpose()\n x2 = x2.transpose()\n z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)\n\n for i in range(ny):\n # check axis handling is self consistent\n assert_array_almost_equal([z_vectest[i], pval_vectest[i]],\n stats.mood(x1[i, :], x2[i, :]))\n\n def test_mood_3d(self):\n shape = (10, 5, 6)\n np.random.seed(1234)\n x1 = np.random.randn(*shape)\n x2 = np.random.randn(*shape)\n\n for axis in range(3):\n z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)\n # Tests that result for 3-D arrays is equal to that for the\n # same calculation on a set of 1-D arrays taken from the\n # 3-D array\n axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis\n for i in range(shape[axes_idx[axis][0]]):\n for j in range(shape[axes_idx[axis][1]]):\n if axis == 0:\n slice1 = x1[:, i, j]\n slice2 = x2[:, i, j]\n elif axis == 1:\n slice1 = x1[i, :, j]\n slice2 = x2[i, :, j]\n else:\n slice1 = x1[i, j, :]\n slice2 = x2[i, j, :]\n\n assert_array_almost_equal([z_vectest[i, j],\n pval_vectest[i, j]],\n stats.mood(slice1, slice2))\n\n def test_mood_bad_arg(self):\n # Raise ValueError when the sum of the lengths of the args is\n # less than 3\n assert_raises(ValueError, stats.mood, [1], [])\n\n\nclass TestProbplot(object):\n\n def test_basic(self):\n np.random.seed(12345)\n x = stats.norm.rvs(size=20)\n osm, osr = stats.probplot(x, fit=False)\n osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,\n -0.73908135, -0.5857176, -0.44506467, -0.31273668,\n -0.18568928, -0.06158146, 0.06158146, 0.18568928,\n 0.31273668, 0.44506467, 0.5857176, 0.73908135,\n 0.91222575, 1.11829229, 1.38768012, 1.8241636]\n assert_allclose(osr, np.sort(x))\n assert_allclose(osm, osm_expected)\n\n res, res_fit = stats.probplot(x, fit=True)\n res_fit_expected = [1.05361841, 0.31297795, 0.98741609]\n assert_allclose(res_fit, res_fit_expected)\n\n def test_sparams_keyword(self):\n np.random.seed(123456)\n x = stats.norm.rvs(size=100)\n # Check that None, () and 0 (loc=0, for normal distribution) all work\n # and give the same results\n osm1, osr1 = stats.probplot(x, sparams=None, fit=False)\n osm2, osr2 = stats.probplot(x, sparams=0, fit=False)\n osm3, osr3 = stats.probplot(x, sparams=(), fit=False)\n assert_allclose(osm1, osm2)\n assert_allclose(osm1, osm3)\n assert_allclose(osr1, osr2)\n assert_allclose(osr1, osr3)\n # Check giving (loc, scale) params for normal distribution\n osm, osr = stats.probplot(x, sparams=(), fit=False)\n\n def test_dist_keyword(self):\n np.random.seed(12345)\n x = stats.norm.rvs(size=20)\n osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))\n osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))\n assert_allclose(osm1, osm2)\n assert_allclose(osr1, osr2)\n\n assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')\n assert_raises(AttributeError, stats.probplot, x, dist=[])\n\n class custom_dist(object):\n \"\"\"Some class that looks just enough like a distribution.\"\"\"\n def ppf(self, q):\n return stats.norm.ppf(q, loc=2)\n\n osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)\n osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)\n assert_allclose(osm1, osm2)\n assert_allclose(osr1, osr2)\n\n @pytest.mark.skipif(not have_matplotlib, reason=\"no matplotlib\")\n def test_plot_kwarg(self):\n np.random.seed(7654321)\n fig = plt.figure()\n fig.add_subplot(111)\n x = stats.t.rvs(3, size=100)\n res1, fitres1 = stats.probplot(x, plot=plt)\n plt.close()\n res2, fitres2 = stats.probplot(x, plot=None)\n res3 = stats.probplot(x, fit=False, plot=plt)\n plt.close()\n res4 = stats.probplot(x, fit=False, plot=None)\n # Check that results are consistent between combinations of `fit` and\n # `plot` keywords.\n assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)\n assert_allclose(res1, res2)\n assert_allclose(res1, res3)\n assert_allclose(res1, res4)\n assert_allclose(fitres1, fitres2)\n\n # Check that a Matplotlib Axes object is accepted\n fig = plt.figure()\n ax = fig.add_subplot(111)\n stats.probplot(x, fit=False, plot=ax)\n plt.close()\n\n def test_probplot_bad_args(self):\n # Raise ValueError when given an invalid distribution.\n assert_raises(ValueError, stats.probplot, [1], dist=\"plate_of_shrimp\")\n\n def test_empty(self):\n assert_equal(stats.probplot([], fit=False),\n (np.array([]), np.array([])))\n assert_equal(stats.probplot([], fit=True),\n ((np.array([]), np.array([])),\n (np.nan, np.nan, 0.0)))\n\n def test_array_of_size_one(self):\n with np.errstate(invalid='ignore'):\n assert_equal(stats.probplot([1], fit=True),\n ((np.array([0.]), np.array([1])),\n (np.nan, np.nan, 0.0)))\n\n\ndef test_wilcoxon_bad_arg():\n # Raise ValueError when two args of different lengths are given or\n # zero_method is unknown.\n assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])\n assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], \"dummy\")\n\n\ndef test_wilcoxon_arg_type():\n # Should be able to accept list as arguments.\n # Address issue 6070.\n arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]\n\n _ = stats.wilcoxon(arr, zero_method=\"pratt\")\n _ = stats.wilcoxon(arr, zero_method=\"zsplit\")\n _ = stats.wilcoxon(arr, zero_method=\"wilcox\")\n\n\nclass TestKstat(object):\n def test_moments_normal_distribution(self):\n np.random.seed(32149)\n data = np.random.randn(12345)\n moments = []\n for n in [1, 2, 3, 4]:\n moments.append(stats.kstat(data, n))\n\n expected = [0.011315, 1.017931, 0.05811052, 0.0754134]\n assert_allclose(moments, expected, rtol=1e-4)\n\n # test equivalence with `stats.moment`\n m1 = stats.moment(data, moment=1)\n m2 = stats.moment(data, moment=2)\n m3 = stats.moment(data, moment=3)\n assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)\n\n def test_empty_input(self):\n assert_raises(ValueError, stats.kstat, [])\n\n def test_nan_input(self):\n data = np.arange(10.)\n data[6] = np.nan\n\n assert_equal(stats.kstat(data), np.nan)\n\n def test_kstat_bad_arg(self):\n # Raise ValueError if n > 4 or n < 1.\n data = np.arange(10)\n for n in [0, 4.001]:\n assert_raises(ValueError, stats.kstat, data, n=n)\n\n\nclass TestKstatVar(object):\n def test_empty_input(self):\n assert_raises(ValueError, stats.kstatvar, [])\n\n def test_nan_input(self):\n data = np.arange(10.)\n data[6] = np.nan\n\n assert_equal(stats.kstat(data), np.nan)\n\n def test_bad_arg(self):\n # Raise ValueError is n is not 1 or 2.\n data = [1]\n n = 10\n assert_raises(ValueError, stats.kstatvar, data, n=n)\n\n\nclass TestPpccPlot(object):\n def setup_method(self):\n np.random.seed(7654321)\n self.x = stats.loggamma.rvs(5, size=500) + 5\n\n def test_basic(self):\n N = 5\n svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)\n ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,\n 0.93519298]\n assert_allclose(svals, np.linspace(-10, 10, num=N))\n assert_allclose(ppcc, ppcc_expected)\n\n def test_dist(self):\n # Test that we can specify distributions both by name and as objects.\n svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')\n svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,\n dist=stats.tukeylambda)\n assert_allclose(svals1, svals2, rtol=1e-20)\n assert_allclose(ppcc1, ppcc2, rtol=1e-20)\n # Test that 'tukeylambda' is the default dist\n svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)\n assert_allclose(svals1, svals3, rtol=1e-20)\n assert_allclose(ppcc1, ppcc3, rtol=1e-20)\n\n @pytest.mark.skipif(not have_matplotlib, reason=\"no matplotlib\")\n def test_plot_kwarg(self):\n # Check with the matplotlib.pyplot module\n fig = plt.figure()\n fig.add_subplot(111)\n stats.ppcc_plot(self.x, -20, 20, plot=plt)\n plt.close()\n\n # Check that a Matplotlib Axes object is accepted\n fig.add_subplot(111)\n ax = fig.add_subplot(111)\n stats.ppcc_plot(self.x, -20, 20, plot=ax)\n plt.close()\n\n def test_invalid_inputs(self):\n # `b` has to be larger than `a`\n assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)\n\n # Raise ValueError when given an invalid distribution.\n assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,\n dist=\"plate_of_shrimp\")\n\n def test_empty(self):\n # For consistency with probplot return for one empty array,\n # ppcc contains all zeros and svals is the same as for normal array\n # input.\n svals, ppcc = stats.ppcc_plot([], 0, 1)\n assert_allclose(svals, np.linspace(0, 1, num=80))\n assert_allclose(ppcc, np.zeros(80, dtype=float))\n\n\nclass TestPpccMax(object):\n def test_ppcc_max_bad_arg(self):\n # Raise ValueError when given an invalid distribution.\n data = [1]\n assert_raises(ValueError, stats.ppcc_max, data, dist=\"plate_of_shrimp\")\n\n def test_ppcc_max_basic(self):\n np.random.seed(1234567)\n x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4\n # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7\n # it is accurate up to 16 decimals\n assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)\n\n def test_dist(self):\n np.random.seed(1234567)\n x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4\n\n # Test that we can specify distributions both by name and as objects.\n max1 = stats.ppcc_max(x, dist='tukeylambda')\n max2 = stats.ppcc_max(x, dist=stats.tukeylambda)\n assert_almost_equal(max1, -0.71215366521264145, decimal=5)\n assert_almost_equal(max2, -0.71215366521264145, decimal=5)\n\n # Test that 'tukeylambda' is the default dist\n max3 = stats.ppcc_max(x)\n assert_almost_equal(max3, -0.71215366521264145, decimal=5)\n\n def test_brack(self):\n np.random.seed(1234567)\n x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4\n assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))\n\n # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7\n # it is accurate up to 16 decimals\n assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),\n -0.71215366521264145, decimal=5)\n\n # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7\n # it is accurate up to 16 decimals\n assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),\n -0.71215366521264145, decimal=5)\n\n\nclass TestBoxcox_llf(object):\n\n def test_basic(self):\n np.random.seed(54321)\n x = stats.norm.rvs(size=10000, loc=10)\n lmbda = 1\n llf = stats.boxcox_llf(lmbda, x)\n llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))\n assert_allclose(llf, llf_expected)\n\n def test_array_like(self):\n np.random.seed(54321)\n x = stats.norm.rvs(size=100, loc=10)\n lmbda = 1\n llf = stats.boxcox_llf(lmbda, x)\n llf2 = stats.boxcox_llf(lmbda, list(x))\n assert_allclose(llf, llf2, rtol=1e-12)\n\n def test_2d_input(self):\n # Note: boxcox_llf() was already working with 2-D input (sort of), so\n # keep it like that. boxcox() doesn't work with 2-D input though, due\n # to brent() returning a scalar.\n np.random.seed(54321)\n x = stats.norm.rvs(size=100, loc=10)\n lmbda = 1\n llf = stats.boxcox_llf(lmbda, x)\n llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)\n assert_allclose([llf, llf], llf2, rtol=1e-12)\n\n def test_empty(self):\n assert_(np.isnan(stats.boxcox_llf(1, [])))\n\n\nclass TestBoxcox(object):\n\n def test_fixed_lmbda(self):\n np.random.seed(12345)\n x = stats.loggamma.rvs(5, size=50) + 5\n xt = stats.boxcox(x, lmbda=1)\n assert_allclose(xt, x - 1)\n xt = stats.boxcox(x, lmbda=-1)\n assert_allclose(xt, 1 - 1/x)\n\n xt = stats.boxcox(x, lmbda=0)\n assert_allclose(xt, np.log(x))\n\n # Also test that array_like input works\n xt = stats.boxcox(list(x), lmbda=0)\n assert_allclose(xt, np.log(x))\n\n def test_lmbda_None(self):\n np.random.seed(1234567)\n # Start from normal rv's, do inverse transform to check that\n # optimization function gets close to the right answer.\n np.random.seed(1245)\n lmbda = 2.5\n x = stats.norm.rvs(loc=10, size=50000)\n x_inv = (x * lmbda + 1)**(-lmbda)\n xt, maxlog = stats.boxcox(x_inv)\n\n assert_almost_equal(maxlog, -1 / lmbda, decimal=2)\n\n def test_alpha(self):\n np.random.seed(1234)\n x = stats.loggamma.rvs(5, size=50) + 5\n\n # Some regular values for alpha, on a small sample size\n _, _, interval = stats.boxcox(x, alpha=0.75)\n assert_allclose(interval, [4.004485780226041, 5.138756355035744])\n _, _, interval = stats.boxcox(x, alpha=0.05)\n assert_allclose(interval, [1.2138178554857557, 8.209033272375663])\n\n # Try some extreme values, see we don't hit the N=500 limit\n x = stats.loggamma.rvs(7, size=500) + 15\n _, _, interval = stats.boxcox(x, alpha=0.001)\n assert_allclose(interval, [0.3988867, 11.40553131])\n _, _, interval = stats.boxcox(x, alpha=0.999)\n assert_allclose(interval, [5.83316246, 5.83735292])\n\n def test_boxcox_bad_arg(self):\n # Raise ValueError if any data value is negative.\n x = np.array([-1])\n assert_raises(ValueError, stats.boxcox, x)\n\n def test_empty(self):\n assert_(stats.boxcox([]).shape == (0,))\n\n\nclass TestBoxcoxNormmax(object):\n def setup_method(self):\n np.random.seed(12345)\n self.x = stats.loggamma.rvs(5, size=50) + 5\n\n def test_pearsonr(self):\n maxlog = stats.boxcox_normmax(self.x)\n assert_allclose(maxlog, 1.804465, rtol=1e-6)\n\n def test_mle(self):\n maxlog = stats.boxcox_normmax(self.x, method='mle')\n assert_allclose(maxlog, 1.758101, rtol=1e-6)\n\n # Check that boxcox() uses 'mle'\n _, maxlog_boxcox = stats.boxcox(self.x)\n assert_allclose(maxlog_boxcox, maxlog)\n\n def test_all(self):\n maxlog_all = stats.boxcox_normmax(self.x, method='all')\n assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)\n\n\nclass TestBoxcoxNormplot(object):\n def setup_method(self):\n np.random.seed(7654321)\n self.x = stats.loggamma.rvs(5, size=500) + 5\n\n def test_basic(self):\n N = 5\n lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)\n ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,\n 0.95843297]\n assert_allclose(lmbdas, np.linspace(-10, 10, num=N))\n assert_allclose(ppcc, ppcc_expected)\n\n @pytest.mark.skipif(not have_matplotlib, reason=\"no matplotlib\")\n def test_plot_kwarg(self):\n # Check with the matplotlib.pyplot module\n fig = plt.figure()\n fig.add_subplot(111)\n stats.boxcox_normplot(self.x, -20, 20, plot=plt)\n plt.close()\n\n # Check that a Matplotlib Axes object is accepted\n fig.add_subplot(111)\n ax = fig.add_subplot(111)\n stats.boxcox_normplot(self.x, -20, 20, plot=ax)\n plt.close()\n\n def test_invalid_inputs(self):\n # `lb` has to be larger than `la`\n assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)\n # `x` can not contain negative values\n assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)\n\n def test_empty(self):\n assert_(stats.boxcox_normplot([], 0, 1).size == 0)\n\n\nclass TestCircFuncs(object):\n def test_circfuncs(self):\n x = np.array([355, 5, 2, 359, 10, 350])\n M = stats.circmean(x, high=360)\n Mval = 0.167690146\n assert_allclose(M, Mval, rtol=1e-7)\n\n V = stats.circvar(x, high=360)\n Vval = 42.51955609\n assert_allclose(V, Vval, rtol=1e-7)\n\n S = stats.circstd(x, high=360)\n Sval = 6.520702116\n assert_allclose(S, Sval, rtol=1e-7)\n\n def test_circfuncs_small(self):\n x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])\n M1 = x.mean()\n M2 = stats.circmean(x, high=360)\n assert_allclose(M2, M1, rtol=1e-5)\n\n V1 = x.var()\n V2 = stats.circvar(x, high=360)\n assert_allclose(V2, V1, rtol=1e-4)\n\n S1 = x.std()\n S2 = stats.circstd(x, high=360)\n assert_allclose(S2, S1, rtol=1e-4)\n\n def test_circmean_axis(self):\n x = np.array([[355, 5, 2, 359, 10, 350],\n [351, 7, 4, 352, 9, 349],\n [357, 9, 8, 358, 4, 356]])\n M1 = stats.circmean(x, high=360)\n M2 = stats.circmean(x.ravel(), high=360)\n assert_allclose(M1, M2, rtol=1e-14)\n\n M1 = stats.circmean(x, high=360, axis=1)\n M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]\n assert_allclose(M1, M2, rtol=1e-14)\n\n M1 = stats.circmean(x, high=360, axis=0)\n M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]\n assert_allclose(M1, M2, rtol=1e-14)\n\n def test_circvar_axis(self):\n x = np.array([[355, 5, 2, 359, 10, 350],\n [351, 7, 4, 352, 9, 349],\n [357, 9, 8, 358, 4, 356]])\n\n V1 = stats.circvar(x, high=360)\n V2 = stats.circvar(x.ravel(), high=360)\n assert_allclose(V1, V2, rtol=1e-11)\n\n V1 = stats.circvar(x, high=360, axis=1)\n V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]\n assert_allclose(V1, V2, rtol=1e-11)\n\n V1 = stats.circvar(x, high=360, axis=0)\n V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]\n assert_allclose(V1, V2, rtol=1e-11)\n\n def test_circstd_axis(self):\n x = np.array([[355, 5, 2, 359, 10, 350],\n [351, 7, 4, 352, 9, 349],\n [357, 9, 8, 358, 4, 356]])\n\n S1 = stats.circstd(x, high=360)\n S2 = stats.circstd(x.ravel(), high=360)\n assert_allclose(S1, S2, rtol=1e-11)\n\n S1 = stats.circstd(x, high=360, axis=1)\n S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]\n assert_allclose(S1, S2, rtol=1e-11)\n\n S1 = stats.circstd(x, high=360, axis=0)\n S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]\n assert_allclose(S1, S2, rtol=1e-11)\n\n def test_circfuncs_array_like(self):\n x = [355, 5, 2, 359, 10, 350]\n assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)\n assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)\n assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)\n\n def test_empty(self):\n assert_(np.isnan(stats.circmean([])))\n assert_(np.isnan(stats.circstd([])))\n assert_(np.isnan(stats.circvar([])))\n\n def test_circmean_scalar(self):\n x = 1.\n M1 = x\n M2 = stats.circmean(x)\n assert_allclose(M2, M1, rtol=1e-5)\n\n def test_circmean_range(self):\n # regression test for gh-6420: circmean(..., high, low) must be\n # between `high` and `low`\n m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)\n assert_(m < np.pi)\n assert_(m > -np.pi)\n\n def test_circfuncs_unit8(self):\n # regression test for gh-7255: overflow when working with\n # numpy uint8 data type\n x = np.array([150, 10], dtype='uint8')\n assert_equal(stats.circmean(x, high=180), 170.0)\n assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)\n assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)\n\ndef test_accuracy_wilcoxon():\n freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]\n nums = range(-4, 5)\n x = np.concatenate([[u] * v for u, v in zip(nums, freq)])\n y = np.zeros(x.size)\n\n T, p = stats.wilcoxon(x, y, \"pratt\")\n assert_allclose(T, 423)\n assert_allclose(p, 0.00197547303533107)\n\n T, p = stats.wilcoxon(x, y, \"zsplit\")\n assert_allclose(T, 441)\n assert_allclose(p, 0.0032145343172473055)\n\n T, p = stats.wilcoxon(x, y, \"wilcox\")\n assert_allclose(T, 327)\n assert_allclose(p, 0.00641346115861)\n\n # Test the 'correction' option, using values computed in R with:\n # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})\n x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])\n y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])\n T, p = stats.wilcoxon(x, y, correction=False)\n assert_equal(T, 34)\n assert_allclose(p, 0.6948866, rtol=1e-6)\n T, p = stats.wilcoxon(x, y, correction=True)\n assert_equal(T, 34)\n assert_allclose(p, 0.7240817, rtol=1e-6)\n\n\ndef test_wilcoxon_result_attributes():\n x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])\n y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])\n res = stats.wilcoxon(x, y, correction=False)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes)\n\n\ndef test_wilcoxon_tie():\n # Regression test for gh-2391.\n # Corresponding R code is:\n # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)\n # > result$p.value\n # [1] 0.001565402\n # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)\n # > result$p.value\n # [1] 0.001904195\n stat, p = stats.wilcoxon([0.1] * 10)\n expected_p = 0.001565402\n assert_equal(stat, 0)\n assert_allclose(p, expected_p, rtol=1e-6)\n\n stat, p = stats.wilcoxon([0.1] * 10, correction=True)\n expected_p = 0.001904195\n assert_equal(stat, 0)\n assert_allclose(p, expected_p, rtol=1e-6)\n\n\nclass TestMedianTest(object):\n\n def test_bad_n_samples(self):\n # median_test requires at least two samples.\n assert_raises(ValueError, stats.median_test, [1, 2, 3])\n\n def test_empty_sample(self):\n # Each sample must contain at least one value.\n assert_raises(ValueError, stats.median_test, [], [1, 2, 3])\n\n def test_empty_when_ties_ignored(self):\n # The grand median is 1, and all values in the first argument are\n # equal to the grand median. With ties=\"ignore\", those values are\n # ignored, which results in the first sample being (in effect) empty.\n # This should raise a ValueError.\n assert_raises(ValueError, stats.median_test,\n [1, 1, 1, 1], [2, 0, 1], [2, 0], ties=\"ignore\")\n\n def test_empty_contingency_row(self):\n # The grand median is 1, and with the default ties=\"below\", all the\n # values in the samples are counted as being below the grand median.\n # This would result a row of zeros in the contingency table, which is\n # an error.\n assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])\n\n # With ties=\"above\", all the values are counted as above the\n # grand median.\n assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],\n ties=\"above\")\n\n def test_bad_ties(self):\n assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],\n ties=\"foo\")\n\n def test_bad_nan_policy(self):\n assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')\n\n def test_bad_keyword(self):\n assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],\n foo=\"foo\")\n\n def test_simple(self):\n x = [1, 2, 3]\n y = [1, 2, 3]\n stat, p, med, tbl = stats.median_test(x, y)\n\n # The median is floating point, but this equality test should be safe.\n assert_equal(med, 2.0)\n\n assert_array_equal(tbl, [[1, 1], [2, 2]])\n\n # The expected values of the contingency table equal the contingency\n # table, so the statistic should be 0 and the p-value should be 1.\n assert_equal(stat, 0)\n assert_equal(p, 1)\n\n def test_ties_options(self):\n # Test the contingency table calculation.\n x = [1, 2, 3, 4]\n y = [5, 6]\n z = [7, 8, 9]\n # grand median is 5.\n\n # Default 'ties' option is \"below\".\n stat, p, m, tbl = stats.median_test(x, y, z)\n assert_equal(m, 5)\n assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])\n\n stat, p, m, tbl = stats.median_test(x, y, z, ties=\"ignore\")\n assert_equal(m, 5)\n assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])\n\n stat, p, m, tbl = stats.median_test(x, y, z, ties=\"above\")\n assert_equal(m, 5)\n assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])\n\n def test_nan_policy_options(self):\n x = [1, 2, np.nan]\n y = [4, 5, 6]\n mt1 = stats.median_test(x, y, nan_policy='propagate')\n s, p, m, t = stats.median_test(x, y, nan_policy='omit')\n\n assert_equal(mt1, (np.nan, np.nan, np.nan, None))\n assert_allclose(s, 0.31250000000000006)\n assert_allclose(p, 0.57615012203057869)\n assert_equal(m, 4.0)\n assert_equal(t, np.array([[0, 2],[2, 1]]))\n assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')\n\n def test_basic(self):\n # median_test calls chi2_contingency to compute the test statistic\n # and p-value. Make sure it hasn't screwed up the call...\n\n x = [1, 2, 3, 4, 5]\n y = [2, 4, 6, 8]\n\n stat, p, m, tbl = stats.median_test(x, y)\n assert_equal(m, 4)\n assert_equal(tbl, [[1, 2], [4, 2]])\n\n exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)\n assert_allclose(stat, exp_stat)\n assert_allclose(p, exp_p)\n\n stat, p, m, tbl = stats.median_test(x, y, lambda_=0)\n assert_equal(m, 4)\n assert_equal(tbl, [[1, 2], [4, 2]])\n\n exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)\n assert_allclose(stat, exp_stat)\n assert_allclose(p, exp_p)\n\n stat, p, m, tbl = stats.median_test(x, y, correction=False)\n assert_equal(m, 4)\n assert_equal(tbl, [[1, 2], [4, 2]])\n\n exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)\n assert_allclose(stat, exp_stat)\n assert_allclose(p, exp_p)\n\n"
] |
[
[
"scipy.stats.norm.ppf",
"scipy._lib._numpy_compat.suppress_warnings",
"numpy.linspace",
"scipy.stats.ppcc_max",
"scipy.stats.circstd",
"numpy.seterr",
"scipy.stats.kstat",
"scipy.stats.tukeylambda.rvs",
"numpy.random.randn",
"scipy.stats.gumbel_l.logsf",
"scipy.stats.median_test",
"numpy.mean",
"scipy.stats.levene",
"scipy.stats.boxcox_llf",
"scipy.stats.anderson_ksamp",
"scipy.stats.loggamma.rvs",
"scipy.stats.mood",
"numpy.testing.assert_equal",
"scipy.stats.chi2_contingency",
"numpy.arange",
"scipy.stats.bayes_mvs",
"scipy.stats.ppcc_plot",
"numpy.testing.assert_almost_equal",
"scipy.stats.boxcox_normplot",
"matplotlib.pyplot.close",
"scipy.stats.circmean",
"scipy.stats.t.rvs",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"matplotlib.pyplot.figure",
"numpy.log",
"scipy.stats.moment",
"scipy.stats.gumbel_l.logcdf",
"scipy.stats.anderson",
"scipy.stats.probplot",
"scipy.stats.wilcoxon",
"scipy.stats.norm.rvs",
"numpy.testing.assert_",
"numpy.testing.assert_allclose",
"numpy.errstate",
"numpy.array",
"numpy.random.RandomState",
"numpy.testing.assert_warns",
"scipy.stats.gumbel_l.fit",
"numpy.random.seed",
"scipy.stats.bartlett",
"scipy.stats.ansari",
"scipy.stats.binom_test",
"scipy.stats.boxcox",
"numpy.sort",
"numpy.ones",
"numpy.testing.assert_array_less",
"scipy.stats.boxcox_normmax",
"scipy.stats.fligner",
"numpy.random.permutation",
"scipy.stats.shapiro",
"numpy.testing.assert_array_equal",
"scipy.stats.mvsdist",
"scipy.stats.circvar",
"numpy.vstack"
]
] |
anajikadam17/prediction-of-segments
|
[
"eaf50b1bdc2cfafce121310ea588796fbcd10d3f"
] |
[
"code.py"
] |
[
"# --------------\n#import the packages\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport xgboost as xgb\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_selection import SelectFromModel\r\nfrom sklearn.feature_selection import RFE\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n# code starts here\r\nseg=pd.read_csv(path,encoding='latin1')\r\ncategorical=seg.select_dtypes(include='O')\r\nnumerical=seg.select_dtypes(include=np.number)\r\nprint(categorical.head())\r\nprint(numerical.head())\r\n# en=OneHotEncoder()\r\n# en.fit_transform(categorical)\r\n# categorical=pd.get_dummies(categorical)\r\ndf=pd.concat([numerical,pd.get_dummies(categorical)],axis=1)\r\n\n\n\n# --------------\n# code starts here\nfrom xgboost import XGBClassifier\nX=df.drop(columns=['customer id','segments'])\ny=df['segments']\nX_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.3,random_state=42,shuffle=True)\nclassifier=XGBClassifier(random_state=2)\nclassifier.fit(X_train,y_train)\ny_pred=classifier.predict(X_test)\nf1=f1_score(y_test,y_pred,average='macro')\nxgb_cr=classification_report(y_pred,y_test)\nprint(xgb_cr)\n# code ends here\n\n\n# --------------\nfrom sklearn.model_selection import GridSearchCV\nparameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],\n 'max_depth':range(1,3)}\n# code start here\ngrid_search=GridSearchCV(estimator=classifier,param_grid=parameters,n_jobs=-1,verbose=4)\ngrid_search.fit(X_train,y_train)\ngrid_predictions=grid_search.predict(X_test)\ngrid_f1=f1_score(y_test,grid_predictions,average='macro')\nreport=classification_report(grid_predictions,y_test)\nprint(report)\n# code ends here\n\n\n# --------------\nfrom sklearn.ensemble import RandomForestClassifier\n# code starts here\nmodel=RandomForestClassifier(random_state=2)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nf1=f1_score(y_test,y_pred,average='macro')\nreport=classification_report(y_test,y_pred)\nprint(report)\n\n\n"
] |
[
[
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.f1_score",
"sklearn.metrics.classification_report",
"pandas.get_dummies"
]
] |
arunsundar022/Corona-tweet-sentiment-analysis
|
[
"2fa37b3f1046bdbd230ce6be7a5f07c73a7d82b2"
] |
[
"nlp.py"
] |
[
"import nltk\nfrom nltk.corpus import stopwords\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\ndef nlp_ready(string):\n sentence = nltk.word_tokenize(string)\n lemma = nltk.WordNetLemmatizer()\n lem_output = ' '.join([lemma.lemmatize(w) for w in sentence])\n tokenizer = Tokenizer(num_words=5000, lower=True)\n tokenizer.fit_on_texts(lem_output)\n wordIndex = len(tokenizer.word_index) + 1\n clean_text = tokenizer.texts_to_sequences(lem_output)\n clean_text = pad_sequences(clean_text, maxlen=30)\n return clean_text\n"
] |
[
[
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.preprocessing.sequence.pad_sequences"
]
] |
DemirTonchev/iambandit
|
[
"8b445ebb7d1c65b8c27381fb574e499c07bebff6"
] |
[
"policy.py"
] |
[
"import numpy as np\nfrom base import bernuolli, random_argmax, kl_bernulli\nfrom itertools import cycle\nfrom numpy.random import beta\nfrom functools import partial\nfrom numpy import linalg\n\ndef safe_min_1d(array):\n \"\"\"Useful for taking min arm index for some policies\n \"\"\"\n if len(array) > 0:\n return np.min(array)\n else:\n return None\n\nclass BasePolicy():\n\n def __init__(self, k_arms):\n \"\"\"\n k_arms (int)- number of arms\n \"\"\"\n assert k_arms > 0, 'Number of arms should be positive integer'\n self.k_arms = k_arms\n self.arms_data = [[] for _ in range(k_arms)]\n self.pulls = np.zeros(k_arms)\n self.estimated_means = np.zeros(k_arms)\n self._t = 1\n\n\n def pick_action(self):\n raise NotImplementedError(\"This method needs to be implemented \\\n for the simulation to work\")\n\n def observe_reward(self, arm_idx, reward):\n \"\"\"This method should comply to the inputs\n \"\"\"\n raise NotImplementedError('Not implemented in class {}'.format(self.__class__.__name__))\n\n def pick_arm(self):\n \"\"\"Again not sure which I like more\n \"\"\"\n return self.pick_action(self)\n\n def reset_state(self):\n raise NotImplementedError('Not implemented')\n\n def __repr__(self):\n return '{} with {} arms'.format(self.__class__.__name__, self.k_arms)\n\n def get_estimated_means(self):\n return self.estimated_means\n\nclass ConstantPick(BasePolicy):\n \"\"\"Pick always the same arm, for testing purposes\n \"\"\"\n\n def __init__(self, k_arms, pick_arm = 0):\n super().__init__(k_arms)\n self._arm_idxs = list(range(self.k_arms))\n self.pick_arm = pick_arm\n\n def pick_action(self):\n return self.pick_arm\n\n def observe_reward(self, arm_idx, reward):\n pass\n\nclass UniformPolicy(BasePolicy):\n \"\"\"Explores each arm K times then until end of game chooses the best arm from the data\n \"\"\"\n __name__ = 'UniformPolicy'\n\n def __init__(self, k_arms, n_try_each_arm=100, horizont = 1000):\n\n if n_try_each_arm > horizont/k_arms:\n print('not enough horizont to try all arms, continue anyway')\n super().__init__(k_arms)\n self.n_try_each_arm = n_try_each_arm\n self.horizont = horizont\n self.arms_data = [[] for _ in range(k_arms)]\n self.pulls = np.zeros(k_arms)\n self.estimated_means = np.zeros(k_arms)\n self._explore_phase = True\n self.best_arm = None\n\n def pick_action(self):\n if self._explore_phase:\n # return the index of arm that needs exploring\n arm_idx = safe_min_1d(np.where(self.pulls<self.n_try_each_arm)[0])\n if arm_idx is not None:\n return arm_idx\n else:\n # return the best estimated arms\n self._explore_phase = False\n self.best_arm = np.argmax(self.estimated_means)\n return self.best_arm\n else:\n return self.best_arm\n\n def observe_reward(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward)\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self.pulls[arm_idx] += 1\n\n def get_estimated_means(self):\n return self.estimated_means\n\nclass EpsilonGreedy(BasePolicy):\n\n def __init__(self, k_arms, epsilon = lambda t: 1/t):\n\n super().__init__(k_arms)\n if not callable(epsilon):\n self.epsilon = lambda t: epsilon\n else:\n self.epsilon = epsilon\n self.arms_data = [[] for _ in range(k_arms)]\n self.pulls = np.zeros(k_arms)\n self.estimated_means = np.zeros(k_arms)\n self._explore_phase = True\n self._arm_idxs = range(self.k_arms)\n self.best_arm = None\n self._t = 1 # time keeping for epsilon if function of t\n\n def pick_action(self):\n if np.random.rand() < self.epsilon(self._t): # logic for choosing different arm\n arm_idx = np.random.choice(self._arm_idxs)\n return arm_idx\n else:\n return self.best_arm\n\n def observe_reward(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward)\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self.pulls[arm_idx] += 1\n self.best_arm = random_argmax(self.estimated_means)\n self._t += 1\n\nclass RobbinsR1Rule(BasePolicy):\n \"\"\"The rules used in Robbins paper dubbed R1. Works only for Bernulli arms\n \"\"\"\n def __init__(self, k_arms):\n\n super().__init__(k_arms)\n\n self.pulls = np.zeros(k_arms)\n self.estimated_means = np.zeros(k_arms)\n self.arm_idx = np.random.randint(0, k_arms) # WLG select first best arm randomly\n self._arm_idxs = list(range(self.k_arms))\n\n def pick_action(self):\n return self.arm_idx\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n if reward == 0:\n self._arm_idxs.remove(self.arm_idx)\n arm_idx = np.random.choice(self._arm_idxs)\n self._arm_idxs.append(self.arm_idx)\n self.arm_idx = arm_idx\n return arm_idx\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self.pulls[arm_idx] += 1 # lets leave this here for now\n\nclass SuccessiveElimination(BasePolicy):\n\n def __init__(self, k_arms, radius=lambda n, horizont: np.sqrt(2*np.log(horizont)/n), horizont= 1000):\n\n super().__init__(k_arms)\n if not callable(radius):\n self.radius = lambda x, t: radius\n else:\n self.radius = radius\n self.horizont = horizont\n self.arms_data = [[] for _ in range(k_arms)]\n self.pulls = np.zeros(k_arms)\n self.estimated_means = np.zeros(k_arms)\n self._active_arms = list(range(self.k_arms))\n self._active_arms_iter = cycle(self._active_arms) # build iterator\n self._deactivate = False\n self._UCBs = np.full(k_arms, np.inf)\n self._LCBs = np.zeros(k_arms)\n self._eliminated_arms = []\n\n def pick_action(self):\n arm_idx = next(self._active_arms_iter)\n while arm_idx in self._eliminated_arms:\n arm_idx = next(self._active_arms_iter)\n return arm_idx\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n self._eliminated_arms = np.where(self._UCBs<self._LCBs.max())[0] # returns tuple\n\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self.pulls[arm_idx] += 1 # lets leave this here for now\n self._UCBs[arm_idx] = self.estimated_means[arm_idx] + self.radius(self.pulls[arm_idx],self.horizont)\n self._LCBs[arm_idx] = self.estimated_means[arm_idx] - self.radius(self.pulls[arm_idx],self.horizont)\n\nclass UCB1(BasePolicy):\n\n def __init__(self, k_arms, radius = lambda t, n: np.sqrt(2*np.log(t)/n), horizont = None, keep_history=False):\n\n super().__init__(k_arms)\n # create dummy lambda fun if constant radius\n if not callable(radius):\n self.radius = lambda x, t: radius\n else:\n self.radius = radius\n self.horizont = horizont\n self._UCBs = np.full(k_arms, np.inf)\n self.arms_data = [[] for _ in range(k_arms)]\n self._t = 1\n self._try_each_arm = cycle(range(self.k_arms)) # build iterator\n self.pulls = np.zeros(k_arms)\n # history keeping\n self.keep_history = keep_history\n self.UCB_history = []\n self.means_history = []\n self.pulls_history = []\n\n\n def pick_action(self):\n # try each arm once then compute UCBs\n if self._t > self.k_arms:\n self._UCBs = self.estimated_means + self.radius(self._t, self.pulls)\n arm_idx = random_argmax(self._UCBs)\n if self.keep_history:\n self.UCB_history.append(self._UCBs.copy())\n self.means_history.append(self.estimated_means.copy())\n self.pulls_history.append(self.pulls.copy())\n else:\n arm_idx = next(self._try_each_arm)\n return arm_idx\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self._t += 1\n self.pulls[arm_idx] += 1 # lets leave this here for now\n\nclass UCBTuned(UCB1):\n\n def __init__(self, k_arms):\n\n super().__init__(k_arms)\n self.sq_sums = np.zeros(k_arms)\n\n def pick_action(self):\n # try each arm once then compute UCBs\n if self._t > self.k_arms:\n self.Vs = self.sq_sums - self.estimated_means**2 + self.radius(self._t, self.pulls)\n self._UCBs = self.estimated_means + np.sqrt((np.log(self._t)/self.pulls)*self.Vs)\n arm_idx = random_argmax(self._UCBs)\n else:\n arm_idx = next(self._try_each_arm)\n return arm_idx\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self.sq_sums[arm_idx] = reward**2/n + (n-1)*self.sq_sums[arm_idx]/n\n self._t += 1\n self.pulls[arm_idx] += 1 # lets leave this here for now\n\nclass KLUCB(UCB1):\n \"\"\"Works only for bernulli currently\n \"\"\"\n def __init__(self, k_arms, kl_divergence = kl_bernulli):\n\n super().__init__(k_arms)\n self.kl_divergence = np.vectorize(kl_divergence)\n self._arm_idxs = range(self.k_arms)\n\n def pick_action(self):\n # try each arm once then compute UCBs\n if self._t > self.k_arms:\n self._UCBs = [self._bound_function(arm_idx) for arm_idx in self._arm_idxs]\n arm_idx = random_argmax(self._UCBs)\n else:\n arm_idx = next(self._try_each_arm)\n return arm_idx\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n # create lambda function for KL divergence upper confidence set\n# self.bound_functions = lambda q: self.kl_divergence(self.estimated_means, q) - np.log(self._t)/self.pulls\n\n def _bound_function(self, arm_idx):\n # if the current estimated mean is 1 return the max bound in the case of bernulli\n kl = kl_bernulli(self.estimated_means[arm_idx], np.linspace(self.estimated_means[arm_idx],1,50,endpoint=False))\n return np.max(kl[np.where(kl<=np.log(self._t+1)/self.pulls[arm_idx])])\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n n = len(self.arms_data[arm_idx])\n self.estimated_means[arm_idx] = reward/n + (n-1)*self.estimated_means[arm_idx]/n\n self._t += 1\n self.pulls[arm_idx] += 1 # lets leave this here for now\n\nclass BetaBernoulliTS(BasePolicy):\n\n def __init__(self, k_arms, prior_data = None, keep_history=False):\n super().__init__(k_arms)\n self.arms_data = [[] for _ in range(k_arms)]\n if prior_data is None:\n self.prior_data = [np.array([1,1]) for _ in range(k_arms)] # start with uniform\n else:\n self.prior_data = prior_data\n self.estimated_means = np.array([prior[0]/np.sum(prior) for prior in self.prior_data])\n # history keeping\n self.keep_history = keep_history\n self.prior_data_history = []\n self.means_history = []\n self.pulls_history = []\n\n\n def _sample_from_arms(self, size = None):\n return np.array([beta(*prior_obs, size=size) for prior_obs in self.prior_data])\n\n def pick_action(self):\n if self.keep_history:\n self.prior_data_history.append(self.prior_data.copy())\n self.means_history.append(self.estimated_means.copy())\n self.pulls_history.append(self.pulls.copy())\n return random_argmax(self._sample_from_arms())\n\n def observe_reward(self, arm_idx, reward):\n self.prior_data[arm_idx] = self.prior_data[arm_idx] + np.array([reward, 1-reward], dtype=np.int16)\n self._calculate_stats(arm_idx, reward)\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward) # maybe do this with numpy arrays\n self.estimated_means[arm_idx] = self.prior_data[arm_idx][0]/np.sum(self.prior_data[arm_idx])\n self._t += 1\n self.pulls[arm_idx] += 1\n\nclass LinUCB(BasePolicy):\n \"\"\"LinUCB policy from A Contextual-Bandit Approach to Personalized News Article\n Recommendation (li et al).\n \"\"\"\n def __init__(self, k_arms, dimension):\n\n super().__init__(k_arms)\n self.dimension = dimension\n self.theta_hat = [np.zeros((1,dimension)).flatten() for _ in range(k_arms)]\n self.alpha = 1 + np.sqrt(np.log(2/0.05)/(2))\n # using the notation from the paper\n self.As = [np.identity(dimension) for _ in range(k_arms)]\n self.bs = [np.zeros((dimension, 1)).flatten() for _ in range(k_arms)]\n self._t = 1\n\n def _compute_theta(self, A, b):\n theta_hat = np.dot(linalg.inv(A), b).flatten()\n return theta_hat\n\n def _compute_ucb(self, theta_hat, context_vector, alpha, A):\n estimated_mean = np.dot(theta_hat, context_vector)\n deviation = alpha * np.sqrt(linalg.multi_dot([context_vector,\n linalg.inv(A),\n context_vector]))\n return estimated_mean + deviation\n\n def _update_A(self, A, context_vector):\n return A + np.outer(context_vector, context_vector)\n\n def _update_b(self, b, context_vector, reward):\n return b + reward * context_vector\n\n def pick_action(self, context):\n # context contains all context_vectors for all arms\n self.context = context\n self.ucbs = np.array([self._compute_ucb(self.theta_hat[i], context[i], self.alpha, self.As[i]) for\n i in range(self.k_arms)])\n return random_argmax(self.ucbs)\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n self.As[arm_idx] = self._update_A(self.As[arm_idx], self.context[arm_idx])\n self.bs[arm_idx] = self._update_b(self.bs[arm_idx], self.context[arm_idx], reward)\n self.theta_hat[arm_idx] = self._compute_theta(self.As[arm_idx], self.bs[arm_idx])\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward)\n self._t += 1\n self.pulls[arm_idx] += 1\n\n\nclass LinEpsGreedy(BasePolicy):\n def __init__(self, k_arms, dimension):\n\n super().__init__(k_arms)\n self.dimension = dimension\n self.theta_hat = [np.zeros((1,dimension)).flatten() for _ in range(k_arms)]\n self.As = [np.identity(dimension) for _ in range(k_arms)]\n self.bs = [np.zeros((dimension, 1)).flatten() for _ in range(k_arms)]\n self._t = 1\n\n def _compute_theta(self, A, b):\n theta_hat = np.dot(linalg.inv(A), b).flatten()\n return theta_hat\n\n def _compute_mean(self, theta_hat, context_vector):\n estimated_mean = np.dot(theta_hat, context_vector)\n return estimated_mean\n\n def _update_A(self, A, context_vector):\n return A + np.outer(context_vector, context_vector)\n\n def _update_b(self, b, context_vector, reward):\n return b + reward * context_vector\n\n def pick_action(self, context):\n # context contains all context_vectors for all arms\n self.context = context\n self.means = np.array([self._compute_mean(self.theta_hat[i], context[i]) for\n i in range(self.k_arms)])\n return random_argmax(self.means)\n\n def observe_reward(self, arm_idx, reward):\n self._calculate_stats(arm_idx, reward)\n self.As[arm_idx] = self._update_A(self.As[arm_idx], self.context[arm_idx])\n self.bs[arm_idx] = self._update_b(self.bs[arm_idx], self.context[arm_idx], reward)\n self.theta_hat[arm_idx] = self._compute_theta(self.As[arm_idx], self.bs[arm_idx])\n\n def _calculate_stats(self, arm_idx, reward):\n self.arms_data[arm_idx].append(reward)\n self._t += 1\n self.pulls[arm_idx] += 1"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.random.beta",
"numpy.linspace",
"numpy.min",
"numpy.random.choice",
"numpy.linalg.inv",
"numpy.full",
"numpy.vectorize",
"numpy.identity",
"numpy.random.rand",
"numpy.argmax",
"numpy.where",
"numpy.outer",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
]
] |
tomo-2525/Python
|
[
"b605ef7cc5b691afd603d289433ee8c7ba64c774"
] |
[
"02_library/pandas/pandas2.py"
] |
[
"import pandas\nimport numpy\nimport matplotlib\nnumpy.random.seed(42)\n# 10่ก5ๅใฎ้
ๅ๏ผ่ฆ็ด ใฏไนฑๆฐ๏ผใไฝใ\ndata = numpy.random.randn(10, 5)\nprint(data)\n\n# dataใ่ฆ็ด ใจใใDataFrameใไฝใ\ndf = pandas.DataFrame(data, columns=['A', 'B', 'C', 'D', 'E'])\nprint(df)\n# ๅ
้ ญ3่กใๅๅพใใ\nprint(df.head(3))\n\n# ๅพๅฐพ๏ผ่กใๅๅพใใ\nprint(df.tail(3))\nprint(df.index)\nprint(df.columns)\nprint(df.values)\nprint(df.describe())\nprint(df['C'])\n# 2่ก็ฎใใ5่ก็ฎใๅๅพใใ\nprint(df[1:5])\n# A, B, Eใฎๅ
จ่กใๅๅพใใ\nprint(df.loc[:, ['A', 'B', 'E']])\nprint(df.loc[1:4, ['A', 'B', 'E']])\nprint(df.iloc[3])\n\n# 2ใใ3่ก็ฎใ2ใใ3ๅ็ฎ\nprint(df.iloc[1:3, 1:3])\nmask = (df['A'] >= 0)\nprint(mask)\nprint(df[mask])\nprint(df[df['A'] >= 0])\nprint(df[df >= 0])\n\n# mean() ใกใฝใใใฏๅนณๅใๆฑใใพใใ\nprint(df.mean())\n\n# axis ใใใฉใกใผใฟใจใใฆไธใใใใจใงๅนณๅใๅใๆนๅใๆๅฎใใใใจใใงใใพใใ\nprint(df.mean(axis=1))\n\n\n# sum() ใกใฝใใใฏๅใๆฑใใพใใ\nprint(df.sum())\n\n\n# sum() ใกใฝใใใ axis ใไธใใใใจใงๅใๆฑใใๆนๅใๆๅฎใใใใจใใงใใพใใ\nprint(df.sum(axis=1))\n"
] |
[
[
"numpy.random.randn",
"numpy.random.seed",
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.